ia64/xen-unstable
changeset 13543:2457741f4ec3
[HVM] Save/restore cleanups 03: IRQ
IRQ, PIC, IOAPIC and LAPIC
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
IRQ, PIC, IOAPIC and LAPIC
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Sat Jan 20 11:17:40 2007 +0000 (2007-01-20) |
parents | dccdc3ee0efc |
children | 1fd5f1754cea |
files | xen/arch/x86/hvm/hpet.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/irq.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vioapic.c xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vpic.c xen/include/asm-x86/hvm/domain.h xen/include/asm-x86/hvm/irq.h xen/include/asm-x86/hvm/vioapic.h xen/include/asm-x86/hvm/vlapic.h xen/include/asm-x86/hvm/vpic.h xen/include/public/hvm/save.h |
line diff
1.1 --- a/xen/arch/x86/hvm/hpet.c Sat Jan 20 11:17:39 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/hpet.c Sat Jan 20 11:17:40 2007 +0000 1.3 @@ -314,7 +314,6 @@ static void hpet_route_interrupt(HPETSta 1.4 { 1.5 unsigned int tn_int_route = timer_int_route(h, tn); 1.6 struct domain *d = h->vcpu->domain; 1.7 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 1.8 1.9 if ( (tn <= 1) && (h->hpet.config & HPET_CFG_LEGACY) ) 1.10 { 1.11 @@ -336,9 +335,9 @@ static void hpet_route_interrupt(HPETSta 1.12 } 1.13 1.14 /* We only support edge-triggered interrupt now */ 1.15 - spin_lock(&hvm_irq->lock); 1.16 + spin_lock(&d->arch.hvm_domain.irq_lock); 1.17 vioapic_irq_positive_edge(d, tn_int_route); 1.18 - spin_unlock(&hvm_irq->lock); 1.19 + spin_unlock(&d->arch.hvm_domain.irq_lock); 1.20 } 1.21 1.22 static void hpet_timer_fn(void *opaque)
2.1 --- a/xen/arch/x86/hvm/hvm.c Sat Jan 20 11:17:39 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/hvm.c Sat Jan 20 11:17:40 2007 +0000 2.3 @@ -135,7 +135,7 @@ int hvm_domain_initialise(struct domain 2.4 2.5 spin_lock_init(&d->arch.hvm_domain.pbuf_lock); 2.6 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock); 2.7 - spin_lock_init(&d->arch.hvm_domain.irq.lock); 2.8 + spin_lock_init(&d->arch.hvm_domain.irq_lock); 2.9 2.10 rc = shadow_enable(d, SHM2_refcounts|SHM2_translate|SHM2_external); 2.11 if ( rc != 0 )
3.1 --- a/xen/arch/x86/hvm/irq.c Sat Jan 20 11:17:39 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/irq.c Sat Jan 20 11:17:40 2007 +0000 3.3 @@ -28,7 +28,7 @@ 3.4 static void __hvm_pci_intx_assert( 3.5 struct domain *d, unsigned int device, unsigned int intx) 3.6 { 3.7 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.8 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.9 unsigned int gsi, link, isa_irq; 3.10 3.11 ASSERT((device <= 31) && (intx <= 3)); 3.12 @@ -53,17 +53,15 @@ static void __hvm_pci_intx_assert( 3.13 void hvm_pci_intx_assert( 3.14 struct domain *d, unsigned int device, unsigned int intx) 3.15 { 3.16 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.17 - 3.18 - spin_lock(&hvm_irq->lock); 3.19 + spin_lock(&d->arch.hvm_domain.irq_lock); 3.20 __hvm_pci_intx_assert(d, device, intx); 3.21 - spin_unlock(&hvm_irq->lock); 3.22 + spin_unlock(&d->arch.hvm_domain.irq_lock); 3.23 } 3.24 3.25 static void __hvm_pci_intx_deassert( 3.26 struct domain *d, unsigned int device, unsigned int intx) 3.27 { 3.28 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.29 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.30 unsigned int gsi, link, isa_irq; 3.31 3.32 ASSERT((device <= 31) && (intx <= 3)); 3.33 @@ -84,22 +82,20 @@ static void __hvm_pci_intx_deassert( 3.34 void hvm_pci_intx_deassert( 3.35 struct domain *d, unsigned int device, unsigned int intx) 3.36 { 3.37 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.38 - 3.39 - spin_lock(&hvm_irq->lock); 3.40 + spin_lock(&d->arch.hvm_domain.irq_lock); 3.41 __hvm_pci_intx_deassert(d, device, intx); 3.42 - spin_unlock(&hvm_irq->lock); 3.43 + spin_unlock(&d->arch.hvm_domain.irq_lock); 3.44 } 3.45 3.46 void hvm_isa_irq_assert( 3.47 struct domain *d, unsigned int isa_irq) 3.48 { 3.49 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.50 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.51 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); 3.52 3.53 ASSERT(isa_irq <= 15); 3.54 3.55 - spin_lock(&hvm_irq->lock); 3.56 + spin_lock(&d->arch.hvm_domain.irq_lock); 3.57 3.58 if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq) && 3.59 (hvm_irq->gsi_assert_count[gsi]++ == 0) ) 3.60 @@ -108,31 +104,31 @@ void hvm_isa_irq_assert( 3.61 vpic_irq_positive_edge(d, isa_irq); 3.62 } 3.63 3.64 - spin_unlock(&hvm_irq->lock); 3.65 + spin_unlock(&d->arch.hvm_domain.irq_lock); 3.66 } 3.67 3.68 void hvm_isa_irq_deassert( 3.69 struct domain *d, unsigned int isa_irq) 3.70 { 3.71 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.72 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.73 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); 3.74 3.75 ASSERT(isa_irq <= 15); 3.76 3.77 - spin_lock(&hvm_irq->lock); 3.78 + spin_lock(&d->arch.hvm_domain.irq_lock); 3.79 3.80 if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq) && 3.81 (--hvm_irq->gsi_assert_count[gsi] == 0) ) 3.82 vpic_irq_negative_edge(d, isa_irq); 3.83 3.84 - spin_unlock(&hvm_irq->lock); 3.85 + spin_unlock(&d->arch.hvm_domain.irq_lock); 3.86 } 3.87 3.88 void hvm_set_callback_irq_level(void) 3.89 { 3.90 struct vcpu *v = current; 3.91 struct domain *d = v->domain; 3.92 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.93 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.94 unsigned int gsi, pdev, pintx, asserted; 3.95 3.96 /* Fast lock-free tests. */ 3.97 @@ -140,7 +136,7 @@ void hvm_set_callback_irq_level(void) 3.98 (hvm_irq->callback_via_type == HVMIRQ_callback_none) ) 3.99 return; 3.100 3.101 - spin_lock(&hvm_irq->lock); 3.102 + spin_lock(&d->arch.hvm_domain.irq_lock); 3.103 3.104 /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */ 3.105 asserted = !!vcpu_info(v, evtchn_upcall_pending); 3.106 @@ -177,17 +173,17 @@ void hvm_set_callback_irq_level(void) 3.107 } 3.108 3.109 out: 3.110 - spin_unlock(&hvm_irq->lock); 3.111 + spin_unlock(&d->arch.hvm_domain.irq_lock); 3.112 } 3.113 3.114 void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq) 3.115 { 3.116 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.117 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.118 u8 old_isa_irq; 3.119 3.120 ASSERT((link <= 3) && (isa_irq <= 15)); 3.121 3.122 - spin_lock(&hvm_irq->lock); 3.123 + spin_lock(&d->arch.hvm_domain.irq_lock); 3.124 3.125 old_isa_irq = hvm_irq->pci_link_route[link]; 3.126 if ( old_isa_irq == isa_irq ) 3.127 @@ -207,7 +203,7 @@ void hvm_set_pci_link_route(struct domai 3.128 } 3.129 3.130 out: 3.131 - spin_unlock(&hvm_irq->lock); 3.132 + spin_unlock(&d->arch.hvm_domain.irq_lock); 3.133 3.134 dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n", 3.135 d->domain_id, link, old_isa_irq, isa_irq); 3.136 @@ -215,7 +211,7 @@ void hvm_set_pci_link_route(struct domai 3.137 3.138 void hvm_set_callback_via(struct domain *d, uint64_t via) 3.139 { 3.140 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.141 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.142 unsigned int gsi=0, pdev=0, pintx=0; 3.143 uint8_t via_type; 3.144 3.145 @@ -224,7 +220,7 @@ void hvm_set_callback_via(struct domain 3.146 (via_type > HVMIRQ_callback_pci_intx) ) 3.147 via_type = HVMIRQ_callback_none; 3.148 3.149 - spin_lock(&hvm_irq->lock); 3.150 + spin_lock(&d->arch.hvm_domain.irq_lock); 3.151 3.152 /* Tear down old callback via. */ 3.153 if ( hvm_irq->callback_via_asserted ) 3.154 @@ -271,7 +267,7 @@ void hvm_set_callback_via(struct domain 3.155 break; 3.156 } 3.157 3.158 - spin_unlock(&hvm_irq->lock); 3.159 + spin_unlock(&d->arch.hvm_domain.irq_lock); 3.160 3.161 dprintk(XENLOG_G_INFO, "Dom%u callback via changed to ", d->domain_id); 3.162 switch ( via_type ) 3.163 @@ -300,7 +296,7 @@ int cpu_has_pending_irq(struct vcpu *v) 3.164 if ( !vlapic_accept_pic_intr(v) ) 3.165 return 0; 3.166 3.167 - return plat->irq.vpic[0].int_output; 3.168 + return plat->vpic[0].int_output; 3.169 } 3.170 3.171 int cpu_get_interrupt(struct vcpu *v, int *type) 3.172 @@ -322,7 +318,7 @@ int get_isa_irq_vector(struct vcpu *v, i 3.173 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); 3.174 3.175 if ( type == APIC_DM_EXTINT ) 3.176 - return (v->domain->arch.hvm_domain.irq.vpic[isa_irq >> 3].irq_base 3.177 + return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base 3.178 + (isa_irq & 7)); 3.179 3.180 return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector; 3.181 @@ -335,7 +331,7 @@ int is_isa_irq_masked(struct vcpu *v, in 3.182 if ( is_lvtt(v, isa_irq) ) 3.183 return !is_lvtt_enabled(v); 3.184 3.185 - return ((v->domain->arch.hvm_domain.irq.vpic[isa_irq >> 3].imr & 3.186 + return ((v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr & 3.187 (1 << (isa_irq & 7))) && 3.188 domain_vioapic(v->domain)->redirtbl[gsi].fields.mask); 3.189 }
4.1 --- a/xen/arch/x86/hvm/svm/svm.c Sat Jan 20 11:17:39 2007 +0000 4.2 +++ b/xen/arch/x86/hvm/svm/svm.c Sat Jan 20 11:17:40 2007 +0000 4.3 @@ -1960,7 +1960,7 @@ static inline void svm_do_msr_access( 4.4 msr_content = vmcb->sysenter_eip; 4.5 break; 4.6 case MSR_IA32_APICBASE: 4.7 - msr_content = vcpu_vlapic(v)->apic_base_msr; 4.8 + msr_content = vcpu_vlapic(v)->hw.apic_base_msr; 4.9 break; 4.10 default: 4.11 if (long_mode_do_msr_read(regs))
5.1 --- a/xen/arch/x86/hvm/vioapic.c Sat Jan 20 11:17:39 2007 +0000 5.2 +++ b/xen/arch/x86/hvm/vioapic.c Sat Jan 20 11:17:40 2007 +0000 5.3 @@ -47,9 +47,9 @@ 5.4 #define opt_hvm_debug_level opt_vmx_debug_level 5.5 #endif 5.6 5.7 -static void vioapic_deliver(struct vioapic *vioapic, int irq); 5.8 +static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq); 5.9 5.10 -static unsigned long vioapic_read_indirect(struct vioapic *vioapic, 5.11 +static unsigned long vioapic_read_indirect(struct hvm_hw_vioapic *vioapic, 5.12 unsigned long addr, 5.13 unsigned long length) 5.14 { 5.15 @@ -96,7 +96,7 @@ static unsigned long vioapic_read(struct 5.16 unsigned long addr, 5.17 unsigned long length) 5.18 { 5.19 - struct vioapic *vioapic = domain_vioapic(v->domain); 5.20 + struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain); 5.21 uint32_t result; 5.22 5.23 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vioapic_read addr %lx\n", addr); 5.24 @@ -122,13 +122,13 @@ static unsigned long vioapic_read(struct 5.25 } 5.26 5.27 static void vioapic_write_redirent( 5.28 - struct vioapic *vioapic, unsigned int idx, int top_word, uint32_t val) 5.29 + struct hvm_hw_vioapic *vioapic, unsigned int idx, int top_word, uint32_t val) 5.30 { 5.31 struct domain *d = vioapic_domain(vioapic); 5.32 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 5.33 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 5.34 union vioapic_redir_entry *pent, ent; 5.35 5.36 - spin_lock(&hvm_irq->lock); 5.37 + spin_lock(&d->arch.hvm_domain.irq_lock); 5.38 5.39 pent = &vioapic->redirtbl[idx]; 5.40 ent = *pent; 5.41 @@ -157,11 +157,11 @@ static void vioapic_write_redirent( 5.42 vioapic_deliver(vioapic, idx); 5.43 } 5.44 5.45 - spin_unlock(&hvm_irq->lock); 5.46 + spin_unlock(&d->arch.hvm_domain.irq_lock); 5.47 } 5.48 5.49 static void vioapic_write_indirect( 5.50 - struct vioapic *vioapic, unsigned long addr, 5.51 + struct hvm_hw_vioapic *vioapic, unsigned long addr, 5.52 unsigned long length, unsigned long val) 5.53 { 5.54 switch ( vioapic->ioregsel ) 5.55 @@ -206,7 +206,7 @@ static void vioapic_write(struct vcpu *v 5.56 unsigned long length, 5.57 unsigned long val) 5.58 { 5.59 - struct vioapic *vioapic = domain_vioapic(v->domain); 5.60 + struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain); 5.61 5.62 addr &= 0xff; 5.63 5.64 @@ -233,7 +233,7 @@ static void vioapic_write(struct vcpu *v 5.65 5.66 static int vioapic_range(struct vcpu *v, unsigned long addr) 5.67 { 5.68 - struct vioapic *vioapic = domain_vioapic(v->domain); 5.69 + struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain); 5.70 5.71 return ((addr >= vioapic->base_address && 5.72 (addr < vioapic->base_address + VIOAPIC_MEM_LENGTH))); 5.73 @@ -246,7 +246,7 @@ struct hvm_mmio_handler vioapic_mmio_han 5.74 }; 5.75 5.76 static void ioapic_inj_irq( 5.77 - struct vioapic *vioapic, 5.78 + struct hvm_hw_vioapic *vioapic, 5.79 struct vlapic *target, 5.80 uint8_t vector, 5.81 uint8_t trig_mode, 5.82 @@ -270,7 +270,7 @@ static void ioapic_inj_irq( 5.83 } 5.84 5.85 static uint32_t ioapic_get_delivery_bitmask( 5.86 - struct vioapic *vioapic, uint16_t dest, uint8_t dest_mode) 5.87 + struct hvm_hw_vioapic *vioapic, uint16_t dest, uint8_t dest_mode) 5.88 { 5.89 uint32_t mask = 0; 5.90 struct vcpu *v; 5.91 @@ -316,7 +316,7 @@ static inline int pit_channel0_enabled(v 5.92 return pt->enabled; 5.93 } 5.94 5.95 -static void vioapic_deliver(struct vioapic *vioapic, int irq) 5.96 +static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq) 5.97 { 5.98 uint16_t dest = vioapic->redirtbl[irq].fields.dest_id; 5.99 uint8_t dest_mode = vioapic->redirtbl[irq].fields.dest_mode; 5.100 @@ -327,7 +327,7 @@ static void vioapic_deliver(struct vioap 5.101 struct vlapic *target; 5.102 struct vcpu *v; 5.103 5.104 - ASSERT(spin_is_locked(&vioapic_domain(vioapic)->arch.hvm_domain.irq.lock)); 5.105 + ASSERT(spin_is_locked(&vioapic_domain(vioapic)->arch.hvm_domain.irq_lock)); 5.106 5.107 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, 5.108 "dest=%x dest_mode=%x delivery_mode=%x " 5.109 @@ -409,13 +409,13 @@ static void vioapic_deliver(struct vioap 5.110 5.111 void vioapic_irq_positive_edge(struct domain *d, unsigned int irq) 5.112 { 5.113 - struct vioapic *vioapic = domain_vioapic(d); 5.114 + struct hvm_hw_vioapic *vioapic = domain_vioapic(d); 5.115 union vioapic_redir_entry *ent; 5.116 5.117 HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_irq_positive_edge irq %x", irq); 5.118 5.119 ASSERT(irq < VIOAPIC_NUM_PINS); 5.120 - ASSERT(spin_is_locked(&d->arch.hvm_domain.irq.lock)); 5.121 + ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock)); 5.122 5.123 ent = &vioapic->redirtbl[irq]; 5.124 if ( ent->fields.mask ) 5.125 @@ -432,7 +432,7 @@ void vioapic_irq_positive_edge(struct do 5.126 } 5.127 } 5.128 5.129 -static int get_eoi_gsi(struct vioapic *vioapic, int vector) 5.130 +static int get_eoi_gsi(struct hvm_hw_vioapic *vioapic, int vector) 5.131 { 5.132 int i; 5.133 5.134 @@ -445,12 +445,12 @@ static int get_eoi_gsi(struct vioapic *v 5.135 5.136 void vioapic_update_EOI(struct domain *d, int vector) 5.137 { 5.138 - struct vioapic *vioapic = domain_vioapic(d); 5.139 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 5.140 + struct hvm_hw_vioapic *vioapic = domain_vioapic(d); 5.141 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 5.142 union vioapic_redir_entry *ent; 5.143 int gsi; 5.144 5.145 - spin_lock(&hvm_irq->lock); 5.146 + spin_lock(&d->arch.hvm_domain.irq_lock); 5.147 5.148 if ( (gsi = get_eoi_gsi(vioapic, vector)) == -1 ) 5.149 { 5.150 @@ -470,11 +470,11 @@ void vioapic_update_EOI(struct domain *d 5.151 } 5.152 5.153 out: 5.154 - spin_unlock(&hvm_irq->lock); 5.155 + spin_unlock(&d->arch.hvm_domain.irq_lock); 5.156 } 5.157 5.158 #ifdef HVM_DEBUG_SUSPEND 5.159 -static void ioapic_info(struct vioapic *s) 5.160 +static void ioapic_info(struct hvm_hw_vioapic *s) 5.161 { 5.162 int i; 5.163 printk("*****ioapic state:*****\n"); 5.164 @@ -486,7 +486,7 @@ static void ioapic_info(struct vioapic * 5.165 } 5.166 5.167 } 5.168 -static void hvmirq_info(struct hvm_irq *hvm_irq) 5.169 +static void hvmirq_info(struct hvm_hw_irq *hvm_irq) 5.170 { 5.171 int i; 5.172 printk("*****hvmirq state:*****\n"); 5.173 @@ -515,87 +515,44 @@ static void hvmirq_info(struct hvm_irq * 5.174 printk("hvmirq round_robin_prev_vcpu:0x%"PRIx8".\n", hvm_irq->round_robin_prev_vcpu); 5.175 } 5.176 #else 5.177 -static void ioapic_info(struct vioapic *s) 5.178 +static void ioapic_info(struct hvm_hw_vioapic *s) 5.179 { 5.180 } 5.181 -static void hvmirq_info(struct hvm_irq *hvm_irq) 5.182 +static void hvmirq_info(struct hvm_hw_irq *hvm_irq) 5.183 { 5.184 } 5.185 #endif 5.186 5.187 static void ioapic_save(hvm_domain_context_t *h, void *opaque) 5.188 { 5.189 - int i; 5.190 struct domain *d = opaque; 5.191 - struct vioapic *s = domain_vioapic(d); 5.192 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 5.193 + struct hvm_hw_vioapic *s = domain_vioapic(d); 5.194 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 5.195 5.196 ioapic_info(s); 5.197 hvmirq_info(hvm_irq); 5.198 5.199 - /* save iopaic state*/ 5.200 - hvm_put_32u(h, s->ioregsel); 5.201 - hvm_put_32u(h, s->id); 5.202 - hvm_put_64u(h, s->base_address); 5.203 - for (i = 0; i < VIOAPIC_NUM_PINS; i++) { 5.204 - hvm_put_64u(h, s->redirtbl[i].bits); 5.205 - } 5.206 + /* save io-apic state*/ 5.207 + hvm_put_struct(h, s); 5.208 5.209 /* save hvm irq state */ 5.210 - hvm_put_buffer(h, (char*)hvm_irq->pci_intx, 16); 5.211 - hvm_put_buffer(h, (char*)hvm_irq->isa_irq, 2); 5.212 - hvm_put_32u(h, hvm_irq->callback_via_asserted); 5.213 - hvm_put_32u(h, hvm_irq->callback_via_type); 5.214 - hvm_put_32u(h, hvm_irq->callback_via.gsi); 5.215 - 5.216 - for (i = 0; i < 4; i++) 5.217 - hvm_put_8u(h, hvm_irq->pci_link_route[i]); 5.218 - 5.219 - for (i = 0; i < 4; i++) 5.220 - hvm_put_8u(h, hvm_irq->pci_link_assert_count[i]); 5.221 - 5.222 - for (i = 0; i < VIOAPIC_NUM_PINS; i++) 5.223 - hvm_put_8u(h, hvm_irq->gsi_assert_count[i]); 5.224 - 5.225 - hvm_put_8u(h, hvm_irq->round_robin_prev_vcpu); 5.226 - 5.227 + hvm_put_struct(h, hvm_irq); 5.228 } 5.229 5.230 static int ioapic_load(hvm_domain_context_t *h, void *opaque, int version_id) 5.231 { 5.232 - int i; 5.233 struct domain *d = opaque; 5.234 - struct vioapic *s = domain_vioapic(d); 5.235 - struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 5.236 + struct hvm_hw_vioapic *s = domain_vioapic(d); 5.237 + struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq; 5.238 5.239 if (version_id != 1) 5.240 return -EINVAL; 5.241 5.242 /* restore ioapic state */ 5.243 - s->ioregsel = hvm_get_32u(h); 5.244 - s->id = hvm_get_32u(h); 5.245 - s->base_address = hvm_get_64u(h); 5.246 - for (i = 0; i < VIOAPIC_NUM_PINS; i++) { 5.247 - s->redirtbl[i].bits = hvm_get_64u(h); 5.248 - } 5.249 + hvm_get_struct(h, s); 5.250 5.251 /* restore irq state */ 5.252 - hvm_get_buffer(h, (char*)hvm_irq->pci_intx, 16); 5.253 - hvm_get_buffer(h, (char*)hvm_irq->isa_irq, 2); 5.254 - hvm_irq->callback_via_asserted = hvm_get_32u(h); 5.255 - hvm_irq->callback_via_type = hvm_get_32u(h); 5.256 - hvm_irq->callback_via.gsi = hvm_get_32u(h); 5.257 - 5.258 - for (i = 0; i < 4; i++) 5.259 - hvm_irq->pci_link_route[i] = hvm_get_8u(h); 5.260 - 5.261 - for (i = 0; i < 4; i++) 5.262 - hvm_irq->pci_link_assert_count[i] = hvm_get_8u(h); 5.263 - 5.264 - for (i = 0; i < VIOAPIC_NUM_PINS; i++) 5.265 - hvm_irq->gsi_assert_count[i] = hvm_get_8u(h); 5.266 - 5.267 - hvm_irq->round_robin_prev_vcpu = hvm_get_8u(h); 5.268 + hvm_get_struct(h, hvm_irq); 5.269 5.270 ioapic_info(s); 5.271 hvmirq_info(hvm_irq); 5.272 @@ -605,7 +562,7 @@ static int ioapic_load(hvm_domain_contex 5.273 5.274 void vioapic_init(struct domain *d) 5.275 { 5.276 - struct vioapic *vioapic = domain_vioapic(d); 5.277 + struct hvm_hw_vioapic *vioapic = domain_vioapic(d); 5.278 int i; 5.279 5.280 hvm_register_savevm(d, "xen_hvm_ioapic", 0, 1, ioapic_save, ioapic_load, d);
6.1 --- a/xen/arch/x86/hvm/vlapic.c Sat Jan 20 11:17:39 2007 +0000 6.2 +++ b/xen/arch/x86/hvm/vlapic.c Sat Jan 20 11:17:40 2007 +0000 6.3 @@ -81,7 +81,7 @@ static unsigned int vlapic_lvt_mask[VLAP 6.4 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC) 6.5 6.6 #define vlapic_base_address(vlapic) \ 6.7 - (vlapic->apic_base_msr & MSR_IA32_APICBASE_BASE) 6.8 + (vlapic->hw.apic_base_msr & MSR_IA32_APICBASE_BASE) 6.9 6.10 static int vlapic_reset(struct vlapic *vlapic); 6.11 6.12 @@ -100,15 +100,16 @@ static int vlapic_reset(struct vlapic *v 6.13 #define vlapic_clear_vector(vec, bitmap) \ 6.14 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)) 6.15 6.16 -static int vlapic_find_highest_vector(u32 *bitmap) 6.17 +static int vlapic_find_highest_vector(void *bitmap) 6.18 { 6.19 + uint32_t *word = bitmap; 6.20 int word_offset = MAX_VECTOR / 32; 6.21 6.22 /* Work backwards through the bitmap (first 32-bit word in every four). */ 6.23 - while ( (word_offset != 0) && (bitmap[(--word_offset)*4] == 0) ) 6.24 + while ( (word_offset != 0) && (word[(--word_offset)*4] == 0) ) 6.25 continue; 6.26 6.27 - return (fls(bitmap[word_offset*4]) - 1) + (word_offset * 32); 6.28 + return (fls(word[word_offset*4]) - 1) + (word_offset * 32); 6.29 } 6.30 6.31 6.32 @@ -118,19 +119,19 @@ static int vlapic_find_highest_vector(u3 6.33 6.34 static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic) 6.35 { 6.36 - return vlapic_test_and_set_vector(vector, vlapic->regs + APIC_IRR); 6.37 + return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]); 6.38 } 6.39 6.40 static void vlapic_clear_irr(int vector, struct vlapic *vlapic) 6.41 { 6.42 - vlapic_clear_vector(vector, vlapic->regs + APIC_IRR); 6.43 + vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]); 6.44 } 6.45 6.46 int vlapic_find_highest_irr(struct vlapic *vlapic) 6.47 { 6.48 int result; 6.49 6.50 - result = vlapic_find_highest_vector(vlapic->regs + APIC_IRR); 6.51 + result = vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]); 6.52 ASSERT((result == -1) || (result >= 16)); 6.53 6.54 return result; 6.55 @@ -142,7 +143,7 @@ int vlapic_set_irq(struct vlapic *vlapic 6.56 6.57 ret = !vlapic_test_and_set_irr(vec, vlapic); 6.58 if ( trig ) 6.59 - vlapic_set_vector(vec, vlapic->regs + APIC_TMR); 6.60 + vlapic_set_vector(vec, &vlapic->regs->data[APIC_TMR]); 6.61 6.62 /* We may need to wake up target vcpu, besides set pending bit here */ 6.63 return ret; 6.64 @@ -152,7 +153,7 @@ int vlapic_find_highest_isr(struct vlapi 6.65 { 6.66 int result; 6.67 6.68 - result = vlapic_find_highest_vector(vlapic->regs + APIC_ISR); 6.69 + result = vlapic_find_highest_vector(&vlapic->regs->data[APIC_ISR]); 6.70 ASSERT((result == -1) || (result >= 16)); 6.71 6.72 return result; 6.73 @@ -279,7 +280,7 @@ static int vlapic_accept_irq(struct vcpu 6.74 { 6.75 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, 6.76 "level trig mode for vector %d\n", vector); 6.77 - vlapic_set_vector(vector, vlapic->regs + APIC_TMR); 6.78 + vlapic_set_vector(vector, &vlapic->regs->data[APIC_TMR]); 6.79 } 6.80 6.81 vcpu_kick(v); 6.82 @@ -375,9 +376,9 @@ void vlapic_EOI_set(struct vlapic *vlapi 6.83 if ( vector == -1 ) 6.84 return; 6.85 6.86 - vlapic_clear_vector(vector, vlapic->regs + APIC_ISR); 6.87 + vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]); 6.88 6.89 - if ( vlapic_test_and_clear_vector(vector, vlapic->regs + APIC_TMR) ) 6.90 + if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) ) 6.91 vioapic_update_EOI(vlapic_domain(vlapic), vector); 6.92 } 6.93 6.94 @@ -433,7 +434,7 @@ static uint32_t vlapic_get_tmcct(struct 6.95 6.96 counter_passed = (hvm_get_guest_time(v) - vlapic->pt.last_plt_gtime) // TSC 6.97 * 1000000000ULL / ticks_per_sec(v) // NS 6.98 - / APIC_BUS_CYCLE_NS / vlapic->timer_divisor; 6.99 + / APIC_BUS_CYCLE_NS / vlapic->hw.timer_divisor; 6.100 tmcct = tmict - counter_passed; 6.101 6.102 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, 6.103 @@ -450,12 +451,12 @@ static void vlapic_set_tdcr(struct vlapi 6.104 val &= 0xb; 6.105 vlapic_set_reg(vlapic, APIC_TDCR, val); 6.106 6.107 - /* Update the demangled timer_divisor. */ 6.108 + /* Update the demangled hw.timer_divisor. */ 6.109 val = ((val & 3) | ((val & 8) >> 1)) + 1; 6.110 - vlapic->timer_divisor = 1 << (val & 7); 6.111 + vlapic->hw.timer_divisor = 1 << (val & 7); 6.112 6.113 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, 6.114 - "vlapic_set_tdcr timer_divisor: %d.", vlapic->timer_divisor); 6.115 + "vlapic_set_tdcr timer_divisor: %d.", vlapic->hw.timer_divisor); 6.116 } 6.117 6.118 static void vlapic_read_aligned(struct vlapic *vlapic, unsigned int offset, 6.119 @@ -614,7 +615,7 @@ static void vlapic_write(struct vcpu *v, 6.120 int i; 6.121 uint32_t lvt_val; 6.122 6.123 - vlapic->disabled |= VLAPIC_SW_DISABLED; 6.124 + vlapic->hw.disabled |= VLAPIC_SW_DISABLED; 6.125 6.126 for ( i = 0; i < VLAPIC_LVT_NUM; i++ ) 6.127 { 6.128 @@ -624,7 +625,7 @@ static void vlapic_write(struct vcpu *v, 6.129 } 6.130 } 6.131 else 6.132 - vlapic->disabled &= ~VLAPIC_SW_DISABLED; 6.133 + vlapic->hw.disabled &= ~VLAPIC_SW_DISABLED; 6.134 break; 6.135 6.136 case APIC_ESR: 6.137 @@ -656,7 +657,7 @@ static void vlapic_write(struct vcpu *v, 6.138 6.139 case APIC_TMICT: 6.140 { 6.141 - uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)val * vlapic->timer_divisor; 6.142 + uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)val * vlapic->hw.timer_divisor; 6.143 6.144 vlapic_set_reg(vlapic, APIC_TMICT, val); 6.145 create_periodic_time(current, &vlapic->pt, period, vlapic->pt.irq, 6.146 @@ -672,7 +673,7 @@ static void vlapic_write(struct vcpu *v, 6.147 case APIC_TDCR: 6.148 vlapic_set_tdcr(vlapic, val & 0xb); 6.149 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer divisor is 0x%x", 6.150 - vlapic->timer_divisor); 6.151 + vlapic->hw.timer_divisor); 6.152 break; 6.153 6.154 default: 6.155 @@ -697,23 +698,23 @@ struct hvm_mmio_handler vlapic_mmio_hand 6.156 6.157 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value) 6.158 { 6.159 - if ( (vlapic->apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE ) 6.160 + if ( (vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE ) 6.161 { 6.162 if ( value & MSR_IA32_APICBASE_ENABLE ) 6.163 { 6.164 vlapic_reset(vlapic); 6.165 - vlapic->disabled &= ~VLAPIC_HW_DISABLED; 6.166 + vlapic->hw.disabled &= ~VLAPIC_HW_DISABLED; 6.167 } 6.168 else 6.169 { 6.170 - vlapic->disabled |= VLAPIC_HW_DISABLED; 6.171 + vlapic->hw.disabled |= VLAPIC_HW_DISABLED; 6.172 } 6.173 } 6.174 6.175 - vlapic->apic_base_msr = value; 6.176 + vlapic->hw.apic_base_msr = value; 6.177 6.178 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, 6.179 - "apic base msr is 0x%016"PRIx64".", vlapic->apic_base_msr); 6.180 + "apic base msr is 0x%016"PRIx64".", vlapic->hw.apic_base_msr); 6.181 } 6.182 6.183 int vlapic_accept_pic_intr(struct vcpu *v) 6.184 @@ -754,7 +755,7 @@ int cpu_get_apic_interrupt(struct vcpu * 6.185 if ( vector == -1 ) 6.186 return -1; 6.187 6.188 - vlapic_set_vector(vector, vlapic->regs + APIC_ISR); 6.189 + vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]); 6.190 vlapic_clear_irr(vector, vlapic); 6.191 6.192 *mode = APIC_DM_FIXED; 6.193 @@ -790,7 +791,7 @@ static int vlapic_reset(struct vlapic *v 6.194 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 6.195 6.196 vlapic_set_reg(vlapic, APIC_SPIV, 0xff); 6.197 - vlapic->disabled |= VLAPIC_SW_DISABLED; 6.198 + vlapic->hw.disabled |= VLAPIC_SW_DISABLED; 6.199 6.200 return 1; 6.201 } 6.202 @@ -799,10 +800,9 @@ static int vlapic_reset(struct vlapic *v 6.203 static void lapic_info(struct vlapic *s) 6.204 { 6.205 printk("*****lapic state:*****\n"); 6.206 - printk("lapic 0x%"PRIx64".\n", s->apic_base_msr); 6.207 - printk("lapic 0x%x.\n", s->disabled); 6.208 - printk("lapic 0x%x.\n", s->timer_divisor); 6.209 - printk("lapic 0x%x.\n", s->timer_pending_count); 6.210 + printk("lapic 0x%"PRIx64".\n", s->hw.apic_base_msr); 6.211 + printk("lapic 0x%x.\n", s->hw.disabled); 6.212 + printk("lapic 0x%x.\n", s->hw.timer_divisor); 6.213 } 6.214 #else 6.215 static void lapic_info(struct vlapic *s) 6.216 @@ -816,15 +816,8 @@ static void lapic_save(hvm_domain_contex 6.217 6.218 lapic_info(s); 6.219 6.220 - hvm_put_64u(h, s->apic_base_msr); 6.221 - hvm_put_32u(h, s->disabled); 6.222 - hvm_put_32u(h, s->timer_divisor); 6.223 - 6.224 - /*XXX: need this?*/ 6.225 - hvm_put_32u(h, s->timer_pending_count); 6.226 - 6.227 - hvm_put_buffer(h, (char*)s->regs, 0x3f0); 6.228 - 6.229 + hvm_put_struct(h, &s->hw); 6.230 + hvm_put_struct(h, s->regs); 6.231 } 6.232 6.233 static int lapic_load(hvm_domain_context_t *h, void *opaque, int version_id) 6.234 @@ -836,19 +829,13 @@ static int lapic_load(hvm_domain_context 6.235 if (version_id != 1) 6.236 return -EINVAL; 6.237 6.238 - s->apic_base_msr = hvm_get_64u(h); 6.239 - s->disabled = hvm_get_32u(h); 6.240 - s->timer_divisor = hvm_get_32u(h); 6.241 - 6.242 - /*XXX: need this?*/ 6.243 - s->timer_pending_count = hvm_get_32u(h); 6.244 - 6.245 - hvm_get_buffer(h, (char*)s->regs, 0x3f0); 6.246 + hvm_get_struct(h, &s->hw); 6.247 + hvm_get_struct(h, s->regs); 6.248 6.249 /* rearm the actiemr if needed */ 6.250 tmict = vlapic_get_reg(s, APIC_TMICT); 6.251 if (tmict > 0) { 6.252 - uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)tmict * s->timer_divisor; 6.253 + uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)tmict * s->hw.timer_divisor; 6.254 6.255 create_periodic_time(v, &s->pt, period, s->pt.irq, 6.256 vlapic_lvtt_period(s), NULL, s); 6.257 @@ -887,9 +874,9 @@ int vlapic_init(struct vcpu *v) 6.258 hvm_register_savevm(v->domain, "xen_hvm_lapic", v->vcpu_id, 1, lapic_save, lapic_load, vlapic); 6.259 vlapic_reset(vlapic); 6.260 6.261 - vlapic->apic_base_msr = MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; 6.262 + vlapic->hw.apic_base_msr = MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; 6.263 if ( v->vcpu_id == 0 ) 6.264 - vlapic->apic_base_msr |= MSR_IA32_APICBASE_BSP; 6.265 + vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP; 6.266 6.267 init_timer(&vlapic->pt.timer, pt_timer_fn, &vlapic->pt, v->processor); 6.268
7.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Sat Jan 20 11:17:39 2007 +0000 7.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Sat Jan 20 11:17:40 2007 +0000 7.3 @@ -2309,7 +2309,7 @@ static inline int vmx_do_msr_read(struct 7.4 msr_content = __vmread(GUEST_SYSENTER_EIP); 7.5 break; 7.6 case MSR_IA32_APICBASE: 7.7 - msr_content = vcpu_vlapic(v)->apic_base_msr; 7.8 + msr_content = vcpu_vlapic(v)->hw.apic_base_msr; 7.9 break; 7.10 default: 7.11 if ( long_mode_do_msr_read(regs) )
8.1 --- a/xen/arch/x86/hvm/vpic.c Sat Jan 20 11:17:39 2007 +0000 8.2 +++ b/xen/arch/x86/hvm/vpic.c Sat Jan 20 11:17:40 2007 +0000 8.3 @@ -35,9 +35,9 @@ 8.4 #include <asm/hvm/support.h> 8.5 8.6 #define vpic_domain(v) (container_of((v), struct domain, \ 8.7 - arch.hvm_domain.irq.vpic[!vpic->is_master])) 8.8 -#define __vpic_lock(v) &container_of((v), struct hvm_irq, \ 8.9 - vpic[!(v)->is_master])->lock 8.10 + arch.hvm_domain.vpic[!vpic->is_master])) 8.11 +#define __vpic_lock(v) &container_of((v), struct hvm_domain, \ 8.12 + vpic[!(v)->is_master])->irq_lock 8.13 #define vpic_lock(v) spin_lock(__vpic_lock(v)) 8.14 #define vpic_unlock(v) spin_unlock(__vpic_lock(v)) 8.15 #define vpic_is_locked(v) spin_is_locked(__vpic_lock(v)) 8.16 @@ -45,7 +45,7 @@ 8.17 8.18 /* Return the highest priority found in mask. Return 8 if none. */ 8.19 #define VPIC_PRIO_NONE 8 8.20 -static int vpic_get_priority(struct vpic *vpic, uint8_t mask) 8.21 +static int vpic_get_priority(struct hvm_hw_vpic *vpic, uint8_t mask) 8.22 { 8.23 int prio; 8.24 8.25 @@ -61,7 +61,7 @@ static int vpic_get_priority(struct vpic 8.26 } 8.27 8.28 /* Return the PIC's highest priority pending interrupt. Return -1 if none. */ 8.29 -static int vpic_get_highest_priority_irq(struct vpic *vpic) 8.30 +static int vpic_get_highest_priority_irq(struct hvm_hw_vpic *vpic) 8.31 { 8.32 int cur_priority, priority, irq; 8.33 uint8_t mask; 8.34 @@ -92,7 +92,7 @@ static int vpic_get_highest_priority_irq 8.35 return (priority < cur_priority) ? irq : -1; 8.36 } 8.37 8.38 -static void vpic_update_int_output(struct vpic *vpic) 8.39 +static void vpic_update_int_output(struct hvm_hw_vpic *vpic) 8.40 { 8.41 int irq; 8.42 8.43 @@ -129,7 +129,7 @@ static void vpic_update_int_output(struc 8.44 } 8.45 } 8.46 8.47 -static void __vpic_intack(struct vpic *vpic, int irq) 8.48 +static void __vpic_intack(struct hvm_hw_vpic *vpic, int irq) 8.49 { 8.50 uint8_t mask = 1 << irq; 8.51 8.52 @@ -147,7 +147,7 @@ static void __vpic_intack(struct vpic *v 8.53 vpic_update_int_output(vpic); 8.54 } 8.55 8.56 -static int vpic_intack(struct vpic *vpic) 8.57 +static int vpic_intack(struct hvm_hw_vpic *vpic) 8.58 { 8.59 int irq = -1; 8.60 8.61 @@ -174,7 +174,7 @@ static int vpic_intack(struct vpic *vpic 8.62 return irq; 8.63 } 8.64 8.65 -static void vpic_ioport_write(struct vpic *vpic, uint32_t addr, uint32_t val) 8.66 +static void vpic_ioport_write(struct hvm_hw_vpic *vpic, uint32_t addr, uint32_t val) 8.67 { 8.68 int priority, cmd, irq; 8.69 uint8_t mask; 8.70 @@ -291,7 +291,7 @@ static void vpic_ioport_write(struct vpi 8.71 vpic_unlock(vpic); 8.72 } 8.73 8.74 -static uint32_t vpic_ioport_read(struct vpic *vpic, uint32_t addr) 8.75 +static uint32_t vpic_ioport_read(struct hvm_hw_vpic *vpic, uint32_t addr) 8.76 { 8.77 if ( vpic->poll ) 8.78 { 8.79 @@ -307,7 +307,7 @@ static uint32_t vpic_ioport_read(struct 8.80 8.81 static int vpic_intercept_pic_io(ioreq_t *p) 8.82 { 8.83 - struct vpic *vpic; 8.84 + struct hvm_hw_vpic *vpic; 8.85 uint32_t data; 8.86 8.87 if ( (p->size != 1) || (p->count != 1) ) 8.88 @@ -316,7 +316,7 @@ static int vpic_intercept_pic_io(ioreq_t 8.89 return 1; 8.90 } 8.91 8.92 - vpic = ¤t->domain->arch.hvm_domain.irq.vpic[p->addr >> 7]; 8.93 + vpic = ¤t->domain->arch.hvm_domain.vpic[p->addr >> 7]; 8.94 8.95 if ( p->dir == IOREQ_WRITE ) 8.96 { 8.97 @@ -340,7 +340,7 @@ static int vpic_intercept_pic_io(ioreq_t 8.98 8.99 static int vpic_intercept_elcr_io(ioreq_t *p) 8.100 { 8.101 - struct vpic *vpic; 8.102 + struct hvm_hw_vpic *vpic; 8.103 uint32_t data; 8.104 8.105 if ( (p->size != 1) || (p->count != 1) ) 8.106 @@ -349,7 +349,7 @@ static int vpic_intercept_elcr_io(ioreq_ 8.107 return 1; 8.108 } 8.109 8.110 - vpic = ¤t->domain->arch.hvm_domain.irq.vpic[p->addr & 1]; 8.111 + vpic = ¤t->domain->arch.hvm_domain.vpic[p->addr & 1]; 8.112 8.113 if ( p->dir == IOREQ_WRITE ) 8.114 { 8.115 @@ -379,7 +379,7 @@ static int vpic_intercept_elcr_io(ioreq_ 8.116 } 8.117 8.118 #ifdef HVM_DEBUG_SUSPEND 8.119 -static void vpic_info(struct vpic *s) 8.120 +static void vpic_info(struct hvm_hw_vpic *s) 8.121 { 8.122 printk("*****pic state:*****\n"); 8.123 printk("pic 0x%x.\n", s->irr); 8.124 @@ -399,61 +399,27 @@ static void vpic_info(struct vpic *s) 8.125 printk("pic 0x%x.\n", s->is_master); 8.126 } 8.127 #else 8.128 -static void vpic_info(struct vpic *s) 8.129 +static void vpic_info(struct hvm_hw_vpic *s) 8.130 { 8.131 } 8.132 #endif 8.133 8.134 static void vpic_save(hvm_domain_context_t *h, void *opaque) 8.135 { 8.136 - struct vpic *s = opaque; 8.137 + struct hvm_hw_vpic *s = opaque; 8.138 8.139 vpic_info(s); 8.140 - 8.141 - hvm_put_8u(h, s->irr); 8.142 - hvm_put_8u(h, s->imr); 8.143 - hvm_put_8u(h, s->isr); 8.144 - hvm_put_8u(h, s->irq_base); 8.145 - hvm_put_8u(h, s->init_state); 8.146 - hvm_put_8u(h, s->priority_add); 8.147 - hvm_put_8u(h, s->readsel_isr); 8.148 - 8.149 - hvm_put_8u(h, s->poll); 8.150 - hvm_put_8u(h, s->auto_eoi); 8.151 - 8.152 - hvm_put_8u(h, s->rotate_on_auto_eoi); 8.153 - hvm_put_8u(h, s->special_fully_nested_mode); 8.154 - hvm_put_8u(h, s->special_mask_mode); 8.155 - 8.156 - hvm_put_8u(h, s->elcr); 8.157 - hvm_put_8u(h, s->int_output); 8.158 + hvm_put_struct(h, s); 8.159 } 8.160 8.161 static int vpic_load(hvm_domain_context_t *h, void *opaque, int version_id) 8.162 { 8.163 - struct vpic *s = opaque; 8.164 + struct hvm_hw_vpic *s = opaque; 8.165 8.166 if (version_id != 1) 8.167 return -EINVAL; 8.168 8.169 - s->irr = hvm_get_8u(h); 8.170 - s->imr = hvm_get_8u(h); 8.171 - s->isr = hvm_get_8u(h); 8.172 - s->irq_base = hvm_get_8u(h); 8.173 - s->init_state = hvm_get_8u(h); 8.174 - s->priority_add = hvm_get_8u(h); 8.175 - s->readsel_isr = hvm_get_8u(h); 8.176 - 8.177 - s->poll = hvm_get_8u(h); 8.178 - s->auto_eoi = hvm_get_8u(h); 8.179 - 8.180 - s->rotate_on_auto_eoi = hvm_get_8u(h); 8.181 - s->special_fully_nested_mode = hvm_get_8u(h); 8.182 - s->special_mask_mode = hvm_get_8u(h); 8.183 - 8.184 - s->elcr = hvm_get_8u(h); 8.185 - s->int_output = hvm_get_8u(h); 8.186 - 8.187 + hvm_get_struct(h, s); 8.188 vpic_info(s); 8.189 8.190 return 0; 8.191 @@ -461,10 +427,10 @@ static int vpic_load(hvm_domain_context_ 8.192 8.193 void vpic_init(struct domain *d) 8.194 { 8.195 - struct vpic *vpic; 8.196 + struct hvm_hw_vpic *vpic; 8.197 8.198 /* Master PIC. */ 8.199 - vpic = &d->arch.hvm_domain.irq.vpic[0]; 8.200 + vpic = &d->arch.hvm_domain.vpic[0]; 8.201 memset(vpic, 0, sizeof(*vpic)); 8.202 vpic->is_master = 1; 8.203 vpic->elcr = 1 << 2; 8.204 @@ -482,7 +448,7 @@ void vpic_init(struct domain *d) 8.205 8.206 void vpic_irq_positive_edge(struct domain *d, int irq) 8.207 { 8.208 - struct vpic *vpic = &d->arch.hvm_domain.irq.vpic[irq >> 3]; 8.209 + struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3]; 8.210 uint8_t mask = 1 << (irq & 7); 8.211 8.212 ASSERT(irq <= 15); 8.213 @@ -498,7 +464,7 @@ void vpic_irq_positive_edge(struct domai 8.214 8.215 void vpic_irq_negative_edge(struct domain *d, int irq) 8.216 { 8.217 - struct vpic *vpic = &d->arch.hvm_domain.irq.vpic[irq >> 3]; 8.218 + struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3]; 8.219 uint8_t mask = 1 << (irq & 7); 8.220 8.221 ASSERT(irq <= 15); 8.222 @@ -515,7 +481,7 @@ void vpic_irq_negative_edge(struct domai 8.223 int cpu_get_pic_interrupt(struct vcpu *v, int *type) 8.224 { 8.225 int irq, vector; 8.226 - struct vpic *vpic = &v->domain->arch.hvm_domain.irq.vpic[0]; 8.227 + struct hvm_hw_vpic *vpic = &v->domain->arch.hvm_domain.vpic[0]; 8.228 8.229 if ( !vlapic_accept_pic_intr(v) || !vpic->int_output ) 8.230 return -1;
9.1 --- a/xen/include/asm-x86/hvm/domain.h Sat Jan 20 11:17:39 2007 +0000 9.2 +++ b/xen/include/asm-x86/hvm/domain.h Sat Jan 20 11:17:40 2007 +0000 9.3 @@ -26,6 +26,7 @@ 9.4 #include <asm/hvm/vlapic.h> 9.5 #include <asm/hvm/io.h> 9.6 #include <public/hvm/params.h> 9.7 +#include <public/hvm/save.h> 9.8 9.9 typedef void SaveStateHandler(hvm_domain_context_t *h, void *opaque); 9.10 typedef int LoadStateHandler(hvm_domain_context_t *h, void *opaque, int version_id); 9.11 @@ -50,7 +51,11 @@ struct hvm_domain { 9.12 9.13 struct hvm_io_handler io_handler; 9.14 9.15 - struct hvm_irq irq; 9.16 + /* Lock protects access to irq, vpic and vioapic. */ 9.17 + spinlock_t irq_lock; 9.18 + struct hvm_hw_irq irq; 9.19 + struct hvm_hw_vpic vpic[2]; /* 0=master; 1=slave */ 9.20 + struct hvm_hw_vioapic vioapic; 9.21 9.22 /* hvm_print_line() logging. */ 9.23 char pbuf[80];
10.1 --- a/xen/include/asm-x86/hvm/irq.h Sat Jan 20 11:17:39 2007 +0000 10.2 +++ b/xen/include/asm-x86/hvm/irq.h Sat Jan 20 11:17:40 2007 +0000 10.3 @@ -26,70 +26,7 @@ 10.4 #include <xen/spinlock.h> 10.5 #include <asm/hvm/vpic.h> 10.6 #include <asm/hvm/vioapic.h> 10.7 - 10.8 -struct hvm_irq { 10.9 - /* Lock protects access to all other fields. */ 10.10 - spinlock_t lock; 10.11 - 10.12 - /* 10.13 - * Virtual interrupt wires for a single PCI bus. 10.14 - * Indexed by: device*4 + INTx#. 10.15 - */ 10.16 - DECLARE_BITMAP(pci_intx, 32*4); 10.17 - 10.18 - /* 10.19 - * Virtual interrupt wires for ISA devices. 10.20 - * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). 10.21 - */ 10.22 - DECLARE_BITMAP(isa_irq, 16); 10.23 - 10.24 - /* Virtual interrupt and via-link for paravirtual platform driver. */ 10.25 - unsigned int callback_via_asserted; 10.26 - enum { 10.27 - HVMIRQ_callback_none, 10.28 - HVMIRQ_callback_gsi, 10.29 - HVMIRQ_callback_pci_intx 10.30 - } callback_via_type; 10.31 - union { 10.32 - unsigned int gsi; 10.33 - struct { uint8_t dev, intx; } pci; 10.34 - } callback_via; 10.35 - 10.36 - /* 10.37 - * PCI-ISA interrupt router. 10.38 - * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using 10.39 - * the traditional 'barber's pole' mapping ((device + INTx#) & 3). 10.40 - * The router provides a programmable mapping from each link to a GSI. 10.41 - */ 10.42 - u8 pci_link_route[4]; 10.43 - 10.44 - /* Number of INTx wires asserting each PCI-ISA link. */ 10.45 - u8 pci_link_assert_count[4]; 10.46 - 10.47 - /* 10.48 - * Number of wires asserting each GSI. 10.49 - * 10.50 - * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space 10.51 - * except ISA IRQ 0, which is connected to GSI 2. 10.52 - * PCI links map into this space via the PCI-ISA bridge. 10.53 - * 10.54 - * GSIs 16+ are used only be PCI devices. The mapping from PCI device to 10.55 - * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 10.56 - */ 10.57 - u8 gsi_assert_count[VIOAPIC_NUM_PINS]; 10.58 - 10.59 - /* 10.60 - * GSIs map onto PIC/IO-APIC in the usual way: 10.61 - * 0-7: Master 8259 PIC, IO-APIC pins 0-7 10.62 - * 8-15: Slave 8259 PIC, IO-APIC pins 8-15 10.63 - * 16+ : IO-APIC pins 16+ 10.64 - */ 10.65 - struct vpic vpic[2]; /* 0=master; 1=slave */ 10.66 - struct vioapic vioapic; 10.67 - 10.68 - /* Last VCPU that was delivered a LowestPrio interrupt. */ 10.69 - u8 round_robin_prev_vcpu; 10.70 -}; 10.71 +#include <public/hvm/save.h> 10.72 10.73 #define hvm_pci_intx_gsi(dev, intx) \ 10.74 (((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16)
11.1 --- a/xen/include/asm-x86/hvm/vioapic.h Sat Jan 20 11:17:39 2007 +0000 11.2 +++ b/xen/include/asm-x86/hvm/vioapic.h Sat Jan 20 11:17:40 2007 +0000 11.3 @@ -28,13 +28,7 @@ 11.4 #include <xen/config.h> 11.5 #include <xen/types.h> 11.6 #include <xen/smp.h> 11.7 - 11.8 -#ifdef __ia64__ 11.9 -#define VIOAPIC_IS_IOSAPIC 1 11.10 -#define VIOAPIC_NUM_PINS 24 11.11 -#else 11.12 -#define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ 11.13 -#endif 11.14 +#include <public/hvm/save.h> 11.15 11.16 #if !VIOAPIC_IS_IOSAPIC 11.17 #define VIOAPIC_VERSION_ID 0x11 /* IOAPIC version */ 11.18 @@ -58,38 +52,9 @@ 11.19 #define VIOAPIC_REG_VERSION 0x01 11.20 #define VIOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */ 11.21 11.22 -#define domain_vioapic(d) (&(d)->arch.hvm_domain.irq.vioapic) 11.23 +#define domain_vioapic(d) (&(d)->arch.hvm_domain.vioapic) 11.24 #define vioapic_domain(v) (container_of((v), struct domain, \ 11.25 - arch.hvm_domain.irq.vioapic)) 11.26 - 11.27 -union vioapic_redir_entry 11.28 -{ 11.29 - uint64_t bits; 11.30 - struct { 11.31 - uint8_t vector; 11.32 - uint8_t delivery_mode:3; 11.33 - uint8_t dest_mode:1; 11.34 - uint8_t delivery_status:1; 11.35 - uint8_t polarity:1; 11.36 - uint8_t remote_irr:1; 11.37 - uint8_t trig_mode:1; 11.38 - uint8_t mask:1; 11.39 - uint8_t reserve:7; 11.40 -#if !VIOAPIC_IS_IOSAPIC 11.41 - uint8_t reserved[4]; 11.42 - uint8_t dest_id; 11.43 -#else 11.44 - uint8_t reserved[3]; 11.45 - uint16_t dest_id; 11.46 -#endif 11.47 - } fields; 11.48 -}; 11.49 - 11.50 -struct vioapic { 11.51 - uint32_t ioregsel, id; 11.52 - unsigned long base_address; 11.53 - union vioapic_redir_entry redirtbl[VIOAPIC_NUM_PINS]; 11.54 -}; 11.55 + arch.hvm_domain.vioapic)) 11.56 11.57 void vioapic_init(struct domain *d); 11.58 void vioapic_irq_positive_edge(struct domain *d, unsigned int irq);
12.1 --- a/xen/include/asm-x86/hvm/vlapic.h Sat Jan 20 11:17:39 2007 +0000 12.2 +++ b/xen/include/asm-x86/hvm/vlapic.h Sat Jan 20 11:17:40 2007 +0000 12.3 @@ -44,31 +44,28 @@ 12.4 */ 12.5 #define VLAPIC_HW_DISABLED 0x1 12.6 #define VLAPIC_SW_DISABLED 0x2 12.7 -#define vlapic_sw_disabled(vlapic) ((vlapic)->disabled & VLAPIC_SW_DISABLED) 12.8 -#define vlapic_hw_disabled(vlapic) ((vlapic)->disabled & VLAPIC_HW_DISABLED) 12.9 -#define vlapic_disabled(vlapic) ((vlapic)->disabled) 12.10 -#define vlapic_enabled(vlapic) (!vlapic_disabled(vlapic)) 12.11 +#define vlapic_sw_disabled(vlapic) ((vlapic)->hw.disabled & VLAPIC_SW_DISABLED) 12.12 +#define vlapic_hw_disabled(vlapic) ((vlapic)->hw.disabled & VLAPIC_HW_DISABLED) 12.13 +#define vlapic_disabled(vlapic) ((vlapic)->hw.disabled) 12.14 +#define vlapic_enabled(vlapic) (!vlapic_disabled(vlapic)) 12.15 12.16 struct vlapic { 12.17 - uint64_t apic_base_msr; 12.18 - uint32_t disabled; /* VLAPIC_xx_DISABLED */ 12.19 - uint32_t timer_divisor; 12.20 - struct periodic_time pt; 12.21 - int timer_pending_count; 12.22 - s_time_t timer_last_update; 12.23 - struct page_info *regs_page; 12.24 - void *regs; 12.25 + struct hvm_hw_lapic hw; 12.26 + struct hvm_hw_lapic_regs *regs; 12.27 + struct periodic_time pt; 12.28 + s_time_t timer_last_update; 12.29 + struct page_info *regs_page; 12.30 }; 12.31 12.32 static inline uint32_t vlapic_get_reg(struct vlapic *vlapic, uint32_t reg) 12.33 { 12.34 - return *((uint32_t *)(vlapic->regs + reg)); 12.35 + return *((uint32_t *)(&vlapic->regs->data[reg])); 12.36 } 12.37 12.38 static inline void vlapic_set_reg( 12.39 struct vlapic *vlapic, uint32_t reg, uint32_t val) 12.40 { 12.41 - *((uint32_t *)(vlapic->regs + reg)) = val; 12.42 + *((uint32_t *)(&vlapic->regs->data[reg])) = val; 12.43 } 12.44 12.45 int vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig);
13.1 --- a/xen/include/asm-x86/hvm/vpic.h Sat Jan 20 11:17:39 2007 +0000 13.2 +++ b/xen/include/asm-x86/hvm/vpic.h Sat Jan 20 11:17:40 2007 +0000 13.3 @@ -27,51 +27,7 @@ 13.4 #ifndef __ASM_X86_HVM_VPIC_H__ 13.5 #define __ASM_X86_HVM_VPIC_H__ 13.6 13.7 -struct vpic { 13.8 - /* IR line bitmasks. */ 13.9 - uint8_t irr, imr, isr; 13.10 - 13.11 - /* Line IRx maps to IRQ irq_base+x */ 13.12 - uint8_t irq_base; 13.13 - 13.14 - /* 13.15 - * Where are we in ICW2-4 initialisation (0 means no init in progress)? 13.16 - * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1). 13.17 - * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence) 13.18 - * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence) 13.19 - */ 13.20 - uint8_t init_state:4; 13.21 - 13.22 - /* IR line with highest priority. */ 13.23 - uint8_t priority_add:4; 13.24 - 13.25 - /* Reads from A=0 obtain ISR or IRR? */ 13.26 - uint8_t readsel_isr:1; 13.27 - 13.28 - /* Reads perform a polling read? */ 13.29 - uint8_t poll:1; 13.30 - 13.31 - /* Automatically clear IRQs from the ISR during INTA? */ 13.32 - uint8_t auto_eoi:1; 13.33 - 13.34 - /* Automatically rotate IRQ priorities during AEOI? */ 13.35 - uint8_t rotate_on_auto_eoi:1; 13.36 - 13.37 - /* Exclude slave inputs when considering in-service IRQs? */ 13.38 - uint8_t special_fully_nested_mode:1; 13.39 - 13.40 - /* Special mask mode excludes masked IRs from AEOI and priority checks. */ 13.41 - uint8_t special_mask_mode:1; 13.42 - 13.43 - /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */ 13.44 - uint8_t is_master:1; 13.45 - 13.46 - /* Edge/trigger selection. */ 13.47 - uint8_t elcr; 13.48 - 13.49 - /* Virtual INT output. */ 13.50 - uint8_t int_output; 13.51 -}; 13.52 +#include <public/hvm/save.h> 13.53 13.54 void vpic_irq_positive_edge(struct domain *d, int irq); 13.55 void vpic_irq_negative_edge(struct domain *d, int irq);
14.1 --- a/xen/include/public/hvm/save.h Sat Jan 20 11:17:39 2007 +0000 14.2 +++ b/xen/include/public/hvm/save.h Sat Jan 20 11:17:40 2007 +0000 14.3 @@ -84,7 +84,6 @@ struct hvm_hw_cpu { 14.4 uint64_t idtr_base; 14.5 uint64_t gdtr_base; 14.6 14.7 - 14.8 uint32_t cs_arbytes; 14.9 uint32_t ds_arbytes; 14.10 uint32_t es_arbytes; 14.11 @@ -137,5 +136,182 @@ struct hvm_hw_pit { 14.12 }; 14.13 14.14 14.15 +/* 14.16 + * PIC 14.17 + */ 14.18 +#define HVM_SAVE_TYPE_PIC 3 14.19 +struct hvm_hw_vpic { 14.20 + /* IR line bitmasks. */ 14.21 + uint8_t irr; 14.22 + uint8_t imr; 14.23 + uint8_t isr; 14.24 + 14.25 + /* Line IRx maps to IRQ irq_base+x */ 14.26 + uint8_t irq_base; 14.27 + 14.28 + /* 14.29 + * Where are we in ICW2-4 initialisation (0 means no init in progress)? 14.30 + * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1). 14.31 + * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence) 14.32 + * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence) 14.33 + */ 14.34 + uint8_t init_state:4; 14.35 + 14.36 + /* IR line with highest priority. */ 14.37 + uint8_t priority_add:4; 14.38 + 14.39 + /* Reads from A=0 obtain ISR or IRR? */ 14.40 + uint8_t readsel_isr:1; 14.41 + 14.42 + /* Reads perform a polling read? */ 14.43 + uint8_t poll:1; 14.44 + 14.45 + /* Automatically clear IRQs from the ISR during INTA? */ 14.46 + uint8_t auto_eoi:1; 14.47 + 14.48 + /* Automatically rotate IRQ priorities during AEOI? */ 14.49 + uint8_t rotate_on_auto_eoi:1; 14.50 + 14.51 + /* Exclude slave inputs when considering in-service IRQs? */ 14.52 + uint8_t special_fully_nested_mode:1; 14.53 + 14.54 + /* Special mask mode excludes masked IRs from AEOI and priority checks. */ 14.55 + uint8_t special_mask_mode:1; 14.56 + 14.57 + /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */ 14.58 + uint8_t is_master:1; 14.59 + 14.60 + /* Edge/trigger selection. */ 14.61 + uint8_t elcr; 14.62 + 14.63 + /* Virtual INT output. */ 14.64 + uint8_t int_output; 14.65 +}; 14.66 + 14.67 + 14.68 +/* 14.69 + * IO-APIC 14.70 + */ 14.71 +#define HVM_SAVE_TYPE_IOAPIC 4 14.72 + 14.73 +#ifdef __ia64__ 14.74 +#define VIOAPIC_IS_IOSAPIC 1 14.75 +#define VIOAPIC_NUM_PINS 24 14.76 +#else 14.77 +#define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ 14.78 +#endif 14.79 + 14.80 +struct hvm_hw_vioapic { 14.81 + uint64_t base_address; 14.82 + uint32_t ioregsel; 14.83 + uint32_t id; 14.84 + union vioapic_redir_entry 14.85 + { 14.86 + uint64_t bits; 14.87 + struct { 14.88 + uint8_t vector; 14.89 + uint8_t delivery_mode:3; 14.90 + uint8_t dest_mode:1; 14.91 + uint8_t delivery_status:1; 14.92 + uint8_t polarity:1; 14.93 + uint8_t remote_irr:1; 14.94 + uint8_t trig_mode:1; 14.95 + uint8_t mask:1; 14.96 + uint8_t reserve:7; 14.97 +#if !VIOAPIC_IS_IOSAPIC 14.98 + uint8_t reserved[4]; 14.99 + uint8_t dest_id; 14.100 +#else 14.101 + uint8_t reserved[3]; 14.102 + uint16_t dest_id; 14.103 +#endif 14.104 + } fields; 14.105 + } redirtbl[VIOAPIC_NUM_PINS]; 14.106 +}; 14.107 + 14.108 + 14.109 +/* 14.110 + * IRQ 14.111 + */ 14.112 +#define HVM_SAVE_TYPE_IRQ 5 14.113 +struct hvm_hw_irq { 14.114 + /* 14.115 + * Virtual interrupt wires for a single PCI bus. 14.116 + * Indexed by: device*4 + INTx#. 14.117 + */ 14.118 + DECLARE_BITMAP(pci_intx, 32*4); 14.119 + 14.120 + /* 14.121 + * Virtual interrupt wires for ISA devices. 14.122 + * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). 14.123 + */ 14.124 + DECLARE_BITMAP(isa_irq, 16); 14.125 + 14.126 + /* Virtual interrupt and via-link for paravirtual platform driver. */ 14.127 + uint32_t callback_via_asserted; 14.128 + union { 14.129 + enum { 14.130 + HVMIRQ_callback_none, 14.131 + HVMIRQ_callback_gsi, 14.132 + HVMIRQ_callback_pci_intx 14.133 + } callback_via_type; 14.134 + uint32_t pad; /* So the next field will be aligned */ 14.135 + }; 14.136 + union { 14.137 + uint32_t gsi; 14.138 + struct { uint8_t dev, intx; } pci; 14.139 + } callback_via; 14.140 + 14.141 + /* 14.142 + * PCI-ISA interrupt router. 14.143 + * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using 14.144 + * the traditional 'barber's pole' mapping ((device + INTx#) & 3). 14.145 + * The router provides a programmable mapping from each link to a GSI. 14.146 + */ 14.147 + u8 pci_link_route[4]; 14.148 + 14.149 + /* Number of INTx wires asserting each PCI-ISA link. */ 14.150 + u8 pci_link_assert_count[4]; 14.151 + 14.152 + /* 14.153 + * Number of wires asserting each GSI. 14.154 + * 14.155 + * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space 14.156 + * except ISA IRQ 0, which is connected to GSI 2. 14.157 + * PCI links map into this space via the PCI-ISA bridge. 14.158 + * 14.159 + * GSIs 16+ are used only be PCI devices. The mapping from PCI device to 14.160 + * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16 14.161 + */ 14.162 + u8 gsi_assert_count[VIOAPIC_NUM_PINS]; 14.163 + 14.164 + /* 14.165 + * GSIs map onto PIC/IO-APIC in the usual way: 14.166 + * 0-7: Master 8259 PIC, IO-APIC pins 0-7 14.167 + * 8-15: Slave 8259 PIC, IO-APIC pins 8-15 14.168 + * 16+ : IO-APIC pins 16+ 14.169 + */ 14.170 + 14.171 + /* Last VCPU that was delivered a LowestPrio interrupt. */ 14.172 + u8 round_robin_prev_vcpu; 14.173 +}; 14.174 + 14.175 + 14.176 +/* 14.177 + * LAPIC 14.178 + */ 14.179 +#define HVM_SAVE_TYPE_LAPIC 6 14.180 +struct hvm_hw_lapic { 14.181 + uint64_t apic_base_msr; 14.182 + uint32_t disabled; /* VLAPIC_xx_DISABLED */ 14.183 + uint32_t timer_divisor; 14.184 +}; 14.185 + 14.186 +#define HVM_SAVE_TYPE_LAPIC_REGS 7 14.187 + 14.188 +struct hvm_hw_lapic_regs { 14.189 + /* A 4k page of register state */ 14.190 + uint8_t data[0x400]; 14.191 +}; 14.192 14.193 #endif /* __XEN_PUBLIC_HVM_SAVE_H__ */