ia64/xen-unstable

changeset 17682:fd5b2ed9574a

x86 hvm: Xen interface and implementation for virtual S3

Signed-off-by: Tian Kevin <kevin.tian@intel.com>
Signed-off-by: Yu Ke <ke.yu@intel.com>
Signed-off-by: Ke Liping <liping.ke@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue May 20 14:17:15 2008 +0100 (2008-05-20)
parents 2757cf34d1ea
children 4b4b829e34a2
files xen/arch/ia64/xen/domain.c xen/arch/x86/domain.c xen/arch/x86/hvm/hpet.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/i8254.c xen/arch/x86/hvm/pmtimer.c xen/arch/x86/hvm/rtc.c xen/arch/x86/hvm/vioapic.c xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vpic.c xen/common/domain.c xen/common/domctl.c xen/include/asm-x86/hvm/domain.h xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/vioapic.h xen/include/asm-x86/hvm/vpic.h xen/include/asm-x86/hvm/vpt.h xen/include/public/hvm/params.h xen/include/xen/domain.h
line diff
     1.1 --- a/xen/arch/ia64/xen/domain.c	Tue May 20 09:55:50 2008 +0100
     1.2 +++ b/xen/arch/ia64/xen/domain.c	Tue May 20 14:17:15 2008 +0100
     1.3 @@ -644,10 +644,9 @@ void arch_domain_destroy(struct domain *
     1.4  	deallocate_rid_range(d);
     1.5  }
     1.6  
     1.7 -int arch_vcpu_reset(struct vcpu *v)
     1.8 +void arch_vcpu_reset(struct vcpu *v)
     1.9  {
    1.10  	/* FIXME: Stub for now */
    1.11 -	return 0;
    1.12  }
    1.13  
    1.14  /* Here it is assumed that all of the CPUs has same RSE.N_STACKED_PHYS */
     2.1 --- a/xen/arch/x86/domain.c	Tue May 20 09:55:50 2008 +0100
     2.2 +++ b/xen/arch/x86/domain.c	Tue May 20 14:17:15 2008 +0100
     2.3 @@ -823,11 +823,10 @@ int arch_set_info_guest(
     2.4  #undef c
     2.5  }
     2.6  
     2.7 -int arch_vcpu_reset(struct vcpu *v)
     2.8 +void arch_vcpu_reset(struct vcpu *v)
     2.9  {
    2.10      destroy_gdt(v);
    2.11      vcpu_destroy_pagetables(v);
    2.12 -    return 0;
    2.13  }
    2.14  
    2.15  /* 
     3.1 --- a/xen/arch/x86/hvm/hpet.c	Tue May 20 09:55:50 2008 +0100
     3.2 +++ b/xen/arch/x86/hvm/hpet.c	Tue May 20 14:17:15 2008 +0100
     3.3 @@ -591,3 +591,8 @@ void hpet_deinit(struct domain *d)
     3.4          kill_timer(&h->timers[i]);
     3.5  }
     3.6  
     3.7 +void hpet_reset(struct domain *d)
     3.8 +{
     3.9 +    hpet_deinit(d);
    3.10 +    hpet_init(d->vcpu[0]);
    3.11 +}
     4.1 --- a/xen/arch/x86/hvm/hvm.c	Tue May 20 09:55:50 2008 +0100
     4.2 +++ b/xen/arch/x86/hvm/hvm.c	Tue May 20 14:17:15 2008 +0100
     4.3 @@ -2058,6 +2058,118 @@ static int hvmop_set_pci_intx_level(
     4.4      return rc;
     4.5  }
     4.6  
     4.7 +void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
     4.8 +{
     4.9 +    struct domain *d = current->domain;
    4.10 +    struct vcpu_guest_context *ctxt;
    4.11 +    struct segment_register reg;
    4.12 +
    4.13 +    BUG_ON(vcpu_runnable(v));
    4.14 +
    4.15 +    domain_lock(d);
    4.16 +
    4.17 +    if ( v->is_initialised )
    4.18 +        goto out;
    4.19 +
    4.20 +    ctxt = &v->arch.guest_context;
    4.21 +    memset(ctxt, 0, sizeof(*ctxt));
    4.22 +    ctxt->flags = VGCF_online;
    4.23 +    ctxt->user_regs.eflags = 2;
    4.24 +    ctxt->user_regs.edx = 0x00000f00;
    4.25 +    ctxt->user_regs.eip = ip;
    4.26 +
    4.27 +    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
    4.28 +    hvm_update_guest_cr(v, 0);
    4.29 +
    4.30 +    v->arch.hvm_vcpu.guest_cr[2] = 0;
    4.31 +    hvm_update_guest_cr(v, 2);
    4.32 +
    4.33 +    v->arch.hvm_vcpu.guest_cr[3] = 0;
    4.34 +    hvm_update_guest_cr(v, 3);
    4.35 +
    4.36 +    v->arch.hvm_vcpu.guest_cr[4] = 0;
    4.37 +    hvm_update_guest_cr(v, 4);
    4.38 +
    4.39 +    v->arch.hvm_vcpu.guest_efer = 0;
    4.40 +    hvm_update_guest_efer(v);
    4.41 +
    4.42 +    reg.sel = cs;
    4.43 +    reg.base = (uint32_t)reg.sel << 4;
    4.44 +    reg.limit = 0xffff;
    4.45 +    reg.attr.bytes = 0x09b;
    4.46 +    hvm_set_segment_register(v, x86_seg_cs, &reg);
    4.47 +
    4.48 +    reg.sel = reg.base = 0;
    4.49 +    reg.limit = 0xffff;
    4.50 +    reg.attr.bytes = 0x093;
    4.51 +    hvm_set_segment_register(v, x86_seg_ds, &reg);
    4.52 +    hvm_set_segment_register(v, x86_seg_es, &reg);
    4.53 +    hvm_set_segment_register(v, x86_seg_fs, &reg);
    4.54 +    hvm_set_segment_register(v, x86_seg_gs, &reg);
    4.55 +    hvm_set_segment_register(v, x86_seg_ss, &reg);
    4.56 +
    4.57 +    reg.attr.bytes = 0x82; /* LDT */
    4.58 +    hvm_set_segment_register(v, x86_seg_ldtr, &reg);
    4.59 +
    4.60 +    reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */
    4.61 +    hvm_set_segment_register(v, x86_seg_tr, &reg);
    4.62 +
    4.63 +    reg.attr.bytes = 0;
    4.64 +    hvm_set_segment_register(v, x86_seg_gdtr, &reg);
    4.65 +    hvm_set_segment_register(v, x86_seg_idtr, &reg);
    4.66 +
    4.67 +    /* Sync AP's TSC with BSP's. */
    4.68 +    v->arch.hvm_vcpu.cache_tsc_offset =
    4.69 +        v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
    4.70 +    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
    4.71 +
    4.72 +    v->arch.flags |= TF_kernel_mode;
    4.73 +    v->is_initialised = 1;
    4.74 +    clear_bit(_VPF_down, &v->pause_flags);
    4.75 +
    4.76 + out:
    4.77 +    domain_unlock(d);
    4.78 +}
    4.79 +
    4.80 +static void hvm_s3_suspend(struct domain *d)
    4.81 +{
    4.82 +    struct vcpu *v;
    4.83 +
    4.84 +    domain_pause(d);
    4.85 +    domain_lock(d);
    4.86 +
    4.87 +    if ( (d->vcpu[0] == NULL) ||
    4.88 +         test_and_set_bool(d->arch.hvm_domain.is_s3_suspended) )
    4.89 +    {
    4.90 +        domain_unlock(d);
    4.91 +        domain_unpause(d);
    4.92 +        return;
    4.93 +    }
    4.94 +
    4.95 +    for_each_vcpu ( d, v )
    4.96 +    {
    4.97 +        vlapic_reset(vcpu_vlapic(v));
    4.98 +        vcpu_reset(v);
    4.99 +    }
   4.100 +
   4.101 +    vpic_reset(d);
   4.102 +    vioapic_reset(d);
   4.103 +    pit_reset(d);
   4.104 +    rtc_reset(d);	
   4.105 +    pmtimer_reset(d);
   4.106 +    hpet_reset(d);
   4.107 +
   4.108 +    hvm_vcpu_reset_state(d->vcpu[0], 0xf000, 0xfff0);
   4.109 +
   4.110 +    domain_unlock(d);
   4.111 +}
   4.112 +
   4.113 +static void hvm_s3_resume(struct domain *d)
   4.114 +{
   4.115 +    if ( test_and_clear_bool(d->arch.hvm_domain.is_s3_suspended) )
   4.116 +        domain_unpause(d);
   4.117 +}
   4.118 +
   4.119  static int hvmop_set_isa_irq_level(
   4.120      XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
   4.121  {
   4.122 @@ -2314,6 +2426,21 @@ long do_hvm_op(unsigned long op, XEN_GUE
   4.123                  }
   4.124                  domain_unpause(d);
   4.125                  break;
   4.126 +            case HVM_PARAM_ACPI_S_STATE:
   4.127 +                /* Privileged domains only, as we must domain_pause(d). */
   4.128 +                rc = -EPERM;
   4.129 +                if ( !IS_PRIV_FOR(current->domain, d) )
   4.130 +                    break;
   4.131 +
   4.132 +                rc = 0;
   4.133 +                if ( a.value == 3 )
   4.134 +                    hvm_s3_suspend(d);
   4.135 +                else if ( a.value == 0 )
   4.136 +                    hvm_s3_resume(d);
   4.137 +                else
   4.138 +                    rc = -EINVAL;
   4.139 +
   4.140 +                break;
   4.141              }
   4.142  
   4.143              if ( rc == 0 )
   4.144 @@ -2321,7 +2448,15 @@ long do_hvm_op(unsigned long op, XEN_GUE
   4.145          }
   4.146          else
   4.147          {
   4.148 -            a.value = d->arch.hvm_domain.params[a.index];
   4.149 +            switch ( a.index )
   4.150 +            {
   4.151 +            case HVM_PARAM_ACPI_S_STATE:
   4.152 +                a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0;
   4.153 +                break;
   4.154 +            default:
   4.155 +                a.value = d->arch.hvm_domain.params[a.index];
   4.156 +                break;
   4.157 +            }
   4.158              rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
   4.159          }
   4.160  
     5.1 --- a/xen/arch/x86/hvm/i8254.c	Tue May 20 09:55:50 2008 +0100
     5.2 +++ b/xen/arch/x86/hvm/i8254.c	Tue May 20 14:17:15 2008 +0100
     5.3 @@ -446,22 +446,16 @@ static int pit_load(struct domain *d, hv
     5.4  
     5.5  HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
     5.6  
     5.7 -void pit_init(struct vcpu *v, unsigned long cpu_khz)
     5.8 +void pit_reset(struct domain *d)
     5.9  {
    5.10 -    PITState *pit = vcpu_vpit(v);
    5.11 +    PITState *pit = domain_vpit(d);
    5.12      struct hvm_hw_pit_channel *s;
    5.13      int i;
    5.14  
    5.15 -    spin_lock_init(&pit->lock);
    5.16 -
    5.17 -    /* Some sub-functions assert that they are called with the lock held. */
    5.18 -    spin_lock(&pit->lock);
    5.19 -
    5.20 +    destroy_periodic_time(&pit->pt0);
    5.21      pit->pt0.source = PTSRC_isa;
    5.22  
    5.23 -    register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
    5.24 -    register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
    5.25 -    ticks_per_sec(v) = cpu_khz * (int64_t)1000;
    5.26 +    spin_lock(&pit->lock);
    5.27  
    5.28      for ( i = 0; i < 3; i++ )
    5.29      {
    5.30 @@ -474,6 +468,20 @@ void pit_init(struct vcpu *v, unsigned l
    5.31      spin_unlock(&pit->lock);
    5.32  }
    5.33  
    5.34 +void pit_init(struct vcpu *v, unsigned long cpu_khz)
    5.35 +{
    5.36 +    PITState *pit = vcpu_vpit(v);
    5.37 +
    5.38 +    spin_lock_init(&pit->lock);
    5.39 +
    5.40 +    register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
    5.41 +    register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
    5.42 +
    5.43 +    ticks_per_sec(v) = cpu_khz * (int64_t)1000;
    5.44 +
    5.45 +    pit_reset(v->domain);
    5.46 +}
    5.47 +
    5.48  void pit_deinit(struct domain *d)
    5.49  {
    5.50      PITState *pit = domain_vpit(d);
     6.1 --- a/xen/arch/x86/hvm/pmtimer.c	Tue May 20 09:55:50 2008 +0100
     6.2 +++ b/xen/arch/x86/hvm/pmtimer.c	Tue May 20 14:17:15 2008 +0100
     6.3 @@ -276,3 +276,9 @@ void pmtimer_deinit(struct domain *d)
     6.4      PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
     6.5      kill_timer(&s->timer);
     6.6  }
     6.7 +
     6.8 +void pmtimer_reset(struct domain *d)
     6.9 +{
    6.10 +    /* Reset the counter. */
    6.11 +    d->arch.hvm_domain.pl_time.vpmt.pm.tmr_val = 0;
    6.12 +}
     7.1 --- a/xen/arch/x86/hvm/rtc.c	Tue May 20 09:55:50 2008 +0100
     7.2 +++ b/xen/arch/x86/hvm/rtc.c	Tue May 20 14:17:15 2008 +0100
     7.3 @@ -511,3 +511,9 @@ void rtc_deinit(struct domain *d)
     7.4      kill_timer(&s->second_timer);
     7.5      kill_timer(&s->second_timer2);
     7.6  }
     7.7 +
     7.8 +void rtc_reset(struct domain *d)
     7.9 +{
    7.10 +    RTCState *s = domain_vrtc(d);
    7.11 +    destroy_periodic_time(&s->pt);
    7.12 +}
     8.1 --- a/xen/arch/x86/hvm/vioapic.c	Tue May 20 09:55:50 2008 +0100
     8.2 +++ b/xen/arch/x86/hvm/vioapic.c	Tue May 20 14:17:15 2008 +0100
     8.3 @@ -494,21 +494,25 @@ static int ioapic_load(struct domain *d,
     8.4  
     8.5  HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
     8.6  
     8.7 -int vioapic_init(struct domain *d)
     8.8 +void vioapic_reset(struct domain *d)
     8.9  {
    8.10 -    struct hvm_vioapic *vioapic;
    8.11 +    struct hvm_vioapic *vioapic = d->arch.hvm_domain.vioapic;
    8.12      int i;
    8.13  
    8.14 -    vioapic = d->arch.hvm_domain.vioapic = xmalloc(struct hvm_vioapic);
    8.15 -    if ( vioapic == NULL )
    8.16 -        return -ENOMEM;
    8.17 -
    8.18 -    vioapic->domain = d;
    8.19 -
    8.20      memset(&vioapic->hvm_hw_vioapic, 0, sizeof(vioapic->hvm_hw_vioapic));
    8.21      for ( i = 0; i < VIOAPIC_NUM_PINS; i++ )
    8.22          vioapic->hvm_hw_vioapic.redirtbl[i].fields.mask = 1;
    8.23      vioapic->hvm_hw_vioapic.base_address = VIOAPIC_DEFAULT_BASE_ADDRESS;
    8.24 +}
    8.25 +
    8.26 +int vioapic_init(struct domain *d)
    8.27 +{
    8.28 +    if ( (d->arch.hvm_domain.vioapic == NULL) &&
    8.29 +         ((d->arch.hvm_domain.vioapic = xmalloc(struct hvm_vioapic)) == NULL) )
    8.30 +        return -ENOMEM;
    8.31 +
    8.32 +    d->arch.hvm_domain.vioapic->domain = d;
    8.33 +    vioapic_reset(d);
    8.34  
    8.35      return 0;
    8.36  }
     9.1 --- a/xen/arch/x86/hvm/vlapic.c	Tue May 20 09:55:50 2008 +0100
     9.2 +++ b/xen/arch/x86/hvm/vlapic.c	Tue May 20 14:17:15 2008 +0100
     9.3 @@ -298,10 +298,6 @@ static int vlapic_accept_init(struct vcp
     9.4  
     9.5  static int vlapic_accept_sipi(struct vcpu *v, int trampoline_vector)
     9.6  {
     9.7 -    struct domain *d = current->domain;
     9.8 -    struct vcpu_guest_context *ctxt;
     9.9 -    struct segment_register reg;
    9.10 -
    9.11      /* If the VCPU is not on its way down we have nothing to do. */
    9.12      if ( !test_bit(_VPF_down, &v->pause_flags) )
    9.13          return X86EMUL_OKAY;
    9.14 @@ -309,68 +305,10 @@ static int vlapic_accept_sipi(struct vcp
    9.15      if ( !vlapic_vcpu_pause_async(v) )
    9.16          return X86EMUL_RETRY;
    9.17  
    9.18 -    domain_lock(d);
    9.19 -
    9.20 -    if ( v->is_initialised )
    9.21 -        goto out;
    9.22 -
    9.23 -    ctxt = &v->arch.guest_context;
    9.24 -    memset(ctxt, 0, sizeof(*ctxt));
    9.25 -    ctxt->flags = VGCF_online;
    9.26 -    ctxt->user_regs.eflags = 2;
    9.27 -
    9.28 -    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
    9.29 -    hvm_update_guest_cr(v, 0);
    9.30 -
    9.31 -    v->arch.hvm_vcpu.guest_cr[2] = 0;
    9.32 -    hvm_update_guest_cr(v, 2);
    9.33 -
    9.34 -    v->arch.hvm_vcpu.guest_cr[3] = 0;
    9.35 -    hvm_update_guest_cr(v, 3);
    9.36 -
    9.37 -    v->arch.hvm_vcpu.guest_cr[4] = 0;
    9.38 -    hvm_update_guest_cr(v, 4);
    9.39 -
    9.40 -    v->arch.hvm_vcpu.guest_efer = 0;
    9.41 -    hvm_update_guest_efer(v);
    9.42 -
    9.43 -    reg.sel = trampoline_vector << 8;
    9.44 -    reg.base = (uint32_t)reg.sel << 4;
    9.45 -    reg.limit = 0xffff;
    9.46 -    reg.attr.bytes = 0x89b;
    9.47 -    hvm_set_segment_register(v, x86_seg_cs, &reg);
    9.48 +    hvm_vcpu_reset_state(v, trampoline_vector << 8, 0);
    9.49  
    9.50 -    reg.sel = reg.base = 0;
    9.51 -    reg.limit = 0xffff;
    9.52 -    reg.attr.bytes = 0x893;
    9.53 -    hvm_set_segment_register(v, x86_seg_ds, &reg);
    9.54 -    hvm_set_segment_register(v, x86_seg_es, &reg);
    9.55 -    hvm_set_segment_register(v, x86_seg_fs, &reg);
    9.56 -    hvm_set_segment_register(v, x86_seg_gs, &reg);
    9.57 -    hvm_set_segment_register(v, x86_seg_ss, &reg);
    9.58 -
    9.59 -    reg.attr.bytes = 0x82; /* LDT */
    9.60 -    hvm_set_segment_register(v, x86_seg_ldtr, &reg);
    9.61 -
    9.62 -    reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */
    9.63 -    hvm_set_segment_register(v, x86_seg_tr, &reg);
    9.64 +    vcpu_unpause(v);
    9.65  
    9.66 -    reg.attr.bytes = 0;
    9.67 -    hvm_set_segment_register(v, x86_seg_gdtr, &reg);
    9.68 -    hvm_set_segment_register(v, x86_seg_idtr, &reg);
    9.69 -
    9.70 -    /* Sync AP's TSC with BSP's. */
    9.71 -    v->arch.hvm_vcpu.cache_tsc_offset =
    9.72 -        v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
    9.73 -    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
    9.74 -
    9.75 -    v->arch.flags |= TF_kernel_mode;
    9.76 -    v->is_initialised = 1;
    9.77 -    clear_bit(_VPF_down, &v->pause_flags);
    9.78 -
    9.79 - out:
    9.80 -    domain_unlock(d);
    9.81 -    vcpu_unpause(v);
    9.82      return X86EMUL_OKAY;
    9.83  }
    9.84  
    9.85 @@ -1028,23 +966,26 @@ int vlapic_init(struct vcpu *v)
    9.86      if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
    9.87          memflags |= MEMF_bits(32);
    9.88  #endif
    9.89 -
    9.90 -    vlapic->regs_page = alloc_domheap_page(NULL, memflags);
    9.91 -    if ( vlapic->regs_page == NULL )
    9.92 +    if (vlapic->regs_page == NULL)
    9.93      {
    9.94 -        dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
    9.95 -                v->domain->domain_id, v->vcpu_id);
    9.96 -        return -ENOMEM;
    9.97 +        vlapic->regs_page = alloc_domheap_page(NULL, memflags);
    9.98 +        if ( vlapic->regs_page == NULL )
    9.99 +        {
   9.100 +            dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
   9.101 +                    v->domain->domain_id, v->vcpu_id);
   9.102 +            return -ENOMEM;
   9.103 +        }
   9.104      }
   9.105 -
   9.106 -    vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
   9.107 -    if ( vlapic->regs == NULL )
   9.108 +    if (vlapic->regs == NULL) 
   9.109      {
   9.110 -        dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
   9.111 -                v->domain->domain_id, v->vcpu_id);
   9.112 -        return -ENOMEM;
   9.113 +        vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
   9.114 +        if ( vlapic->regs == NULL )
   9.115 +        {
   9.116 +            dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
   9.117 +                    v->domain->domain_id, v->vcpu_id);
   9.118 +            return -ENOMEM;
   9.119 +        }
   9.120      }
   9.121 -
   9.122      clear_page(vlapic->regs);
   9.123  
   9.124      vlapic_reset(vlapic);
    10.1 --- a/xen/arch/x86/hvm/vpic.c	Tue May 20 09:55:50 2008 +0100
    10.2 +++ b/xen/arch/x86/hvm/vpic.c	Tue May 20 14:17:15 2008 +0100
    10.3 @@ -395,7 +395,7 @@ static int vpic_load(struct domain *d, h
    10.4  
    10.5  HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
    10.6  
    10.7 -void vpic_init(struct domain *d)
    10.8 +void vpic_reset(struct domain *d)
    10.9  {
   10.10      struct hvm_hw_vpic *vpic;
   10.11  
   10.12 @@ -404,13 +404,20 @@ void vpic_init(struct domain *d)
   10.13      memset(vpic, 0, sizeof(*vpic));
   10.14      vpic->is_master = 1;
   10.15      vpic->elcr      = 1 << 2;
   10.16 -    register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
   10.17 -    register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io);
   10.18  
   10.19      /* Slave PIC. */
   10.20      vpic++;
   10.21      memset(vpic, 0, sizeof(*vpic));
   10.22 +}
   10.23 +
   10.24 +void vpic_init(struct domain *d)
   10.25 +{
   10.26 +    vpic_reset(d);
   10.27 +
   10.28 +    register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
   10.29      register_portio_handler(d, 0xa0, 2, vpic_intercept_pic_io);
   10.30 +
   10.31 +    register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io);
   10.32      register_portio_handler(d, 0x4d1, 1, vpic_intercept_elcr_io);
   10.33  }
   10.34  
    11.1 --- a/xen/common/domain.c	Tue May 20 09:55:50 2008 +0100
    11.2 +++ b/xen/common/domain.c	Tue May 20 14:17:15 2008 +0100
    11.3 @@ -633,17 +633,14 @@ int boot_vcpu(struct domain *d, int vcpu
    11.4      return arch_set_info_guest(v, ctxt);
    11.5  }
    11.6  
    11.7 -int vcpu_reset(struct vcpu *v)
    11.8 +void vcpu_reset(struct vcpu *v)
    11.9  {
   11.10      struct domain *d = v->domain;
   11.11 -    int rc;
   11.12  
   11.13      domain_pause(d);
   11.14      domain_lock(d);
   11.15  
   11.16 -    rc = arch_vcpu_reset(v);
   11.17 -    if ( rc != 0 )
   11.18 -        goto out;
   11.19 +    arch_vcpu_reset(v);
   11.20  
   11.21      set_bit(_VPF_down, &v->pause_flags);
   11.22  
   11.23 @@ -655,11 +652,8 @@ int vcpu_reset(struct vcpu *v)
   11.24      v->nmi_masked      = 0;
   11.25      clear_bit(_VPF_blocked, &v->pause_flags);
   11.26  
   11.27 - out:
   11.28      domain_unlock(v->domain);
   11.29      domain_unpause(d);
   11.30 -
   11.31 -    return rc;
   11.32  }
   11.33  
   11.34  
    12.1 --- a/xen/common/domctl.c	Tue May 20 09:55:50 2008 +0100
    12.2 +++ b/xen/common/domctl.c	Tue May 20 14:17:15 2008 +0100
    12.3 @@ -218,7 +218,8 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
    12.4  
    12.5          if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
    12.6          {
    12.7 -            ret = vcpu_reset(v);
    12.8 +            vcpu_reset(v);
    12.9 +            ret = 0;
   12.10              goto svc_out;
   12.11          }
   12.12  
    13.1 --- a/xen/include/asm-x86/hvm/domain.h	Tue May 20 09:55:50 2008 +0100
    13.2 +++ b/xen/include/asm-x86/hvm/domain.h	Tue May 20 14:17:15 2008 +0100
    13.3 @@ -76,6 +76,7 @@ struct hvm_domain {
    13.4  
    13.5      bool_t                 hap_enabled;
    13.6      bool_t                 qemu_mapcache_invalidate;
    13.7 +    bool_t                 is_s3_suspended;
    13.8  
    13.9      union {
   13.10          struct vmx_domain vmx;
    14.1 --- a/xen/include/asm-x86/hvm/hvm.h	Tue May 20 09:55:50 2008 +0100
    14.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Tue May 20 14:17:15 2008 +0100
    14.3 @@ -141,6 +141,7 @@ void hvm_vcpu_destroy(struct vcpu *v);
    14.4  void hvm_vcpu_down(struct vcpu *v);
    14.5  int hvm_vcpu_cacheattr_init(struct vcpu *v);
    14.6  void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
    14.7 +void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
    14.8  
    14.9  void hvm_send_assist_req(struct vcpu *v);
   14.10  
    15.1 --- a/xen/include/asm-x86/hvm/vioapic.h	Tue May 20 09:55:50 2008 +0100
    15.2 +++ b/xen/include/asm-x86/hvm/vioapic.h	Tue May 20 14:17:15 2008 +0100
    15.3 @@ -63,6 +63,7 @@ struct hvm_vioapic {
    15.4  
    15.5  int vioapic_init(struct domain *d);
    15.6  void vioapic_deinit(struct domain *d);
    15.7 +void vioapic_reset(struct domain *d);
    15.8  void vioapic_irq_positive_edge(struct domain *d, unsigned int irq);
    15.9  void vioapic_update_EOI(struct domain *d, int vector);
   15.10  
    16.1 --- a/xen/include/asm-x86/hvm/vpic.h	Tue May 20 09:55:50 2008 +0100
    16.2 +++ b/xen/include/asm-x86/hvm/vpic.h	Tue May 20 14:17:15 2008 +0100
    16.3 @@ -32,6 +32,7 @@
    16.4  void vpic_irq_positive_edge(struct domain *d, int irq);
    16.5  void vpic_irq_negative_edge(struct domain *d, int irq);
    16.6  void vpic_init(struct domain *d);
    16.7 +void vpic_reset(struct domain *d);
    16.8  int vpic_ack_pending_irq(struct vcpu *v);
    16.9  int is_periodic_irq(struct vcpu *v, int irq, int type);
   16.10  
    17.1 --- a/xen/include/asm-x86/hvm/vpt.h	Tue May 20 09:55:50 2008 +0100
    17.2 +++ b/xen/include/asm-x86/hvm/vpt.h	Tue May 20 14:17:15 2008 +0100
    17.3 @@ -166,17 +166,23 @@ void create_periodic_time(
    17.4  void destroy_periodic_time(struct periodic_time *pt);
    17.5  
    17.6  int pv_pit_handler(int port, int data, int write);
    17.7 +void pit_reset(struct domain *d);
    17.8 +
    17.9  void pit_init(struct vcpu *v, unsigned long cpu_khz);
   17.10  void pit_stop_channel0_irq(PITState * pit);
   17.11  void pit_deinit(struct domain *d);
   17.12  void rtc_init(struct vcpu *v, int base);
   17.13  void rtc_migrate_timers(struct vcpu *v);
   17.14  void rtc_deinit(struct domain *d);
   17.15 +void rtc_reset(struct domain *d);
   17.16 +
   17.17  void pmtimer_init(struct vcpu *v);
   17.18  void pmtimer_deinit(struct domain *d);
   17.19 +void pmtimer_reset(struct domain *d);
   17.20  
   17.21  void hpet_migrate_timers(struct vcpu *v);
   17.22  void hpet_init(struct vcpu *v);
   17.23  void hpet_deinit(struct domain *d);
   17.24 +void hpet_reset(struct domain *d);
   17.25  
   17.26  #endif /* __ASM_X86_HVM_VPT_H__ */
    18.1 --- a/xen/include/public/hvm/params.h	Tue May 20 09:55:50 2008 +0100
    18.2 +++ b/xen/include/public/hvm/params.h	Tue May 20 14:17:15 2008 +0100
    18.3 @@ -90,6 +90,9 @@
    18.4  /* Device Model domain, defaults to 0. */
    18.5  #define HVM_PARAM_DM_DOMAIN    13
    18.6  
    18.7 -#define HVM_NR_PARAMS          14
    18.8 +/* ACPI S state: currently support S0 and S3 on x86. */
    18.9 +#define HVM_PARAM_ACPI_S_STATE 14
   18.10 +
   18.11 +#define HVM_NR_PARAMS          15
   18.12  
   18.13  #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
    19.1 --- a/xen/include/xen/domain.h	Tue May 20 09:55:50 2008 +0100
    19.2 +++ b/xen/include/xen/domain.h	Tue May 20 14:17:15 2008 +0100
    19.3 @@ -14,7 +14,7 @@ struct vcpu *alloc_vcpu(
    19.4  int boot_vcpu(
    19.5      struct domain *d, int vcpuid, vcpu_guest_context_u ctxt);
    19.6  struct vcpu *alloc_idle_vcpu(unsigned int cpu_id);
    19.7 -int vcpu_reset(struct vcpu *v);
    19.8 +void vcpu_reset(struct vcpu *v);
    19.9  
   19.10  struct domain *alloc_domain(domid_t domid);
   19.11  void free_domain(struct domain *d);
   19.12 @@ -55,7 +55,7 @@ void arch_dump_vcpu_info(struct vcpu *v)
   19.13  
   19.14  void arch_dump_domain_info(struct domain *d);
   19.15  
   19.16 -int arch_vcpu_reset(struct vcpu *v);
   19.17 +void arch_vcpu_reset(struct vcpu *v);
   19.18  
   19.19  extern unsigned int xen_processor_pmbits;
   19.20