ia64/xen-unstable

view xen/arch/x86/hvm/vmx/io.c @ 10908:a6cb8ba24a91

[HVM] Place all APIC registers into one page in native format.
With this change we can re-use code at include/asm-x86/apicdef.h,
making the code much cleaner. Also it help for future enhancement.

This patch does not change any logic except the change to
CONTROL_REG_ACCESS_NUM, which should be 0xf for CR8 access.

Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com
author kfraser@localhost.localdomain
date Wed Aug 02 10:07:03 2006 +0100 (2006-08-02)
parents a1c2cede77c7
children 415614d3a1ee
line source
1 /*
2 * io.c: handling I/O, interrupts related VMX entry/exit
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/mm.h>
23 #include <xen/lib.h>
24 #include <xen/errno.h>
25 #include <xen/trace.h>
26 #include <xen/event.h>
28 #include <asm/current.h>
29 #include <asm/cpufeature.h>
30 #include <asm/processor.h>
31 #include <asm/msr.h>
32 #include <asm/hvm/hvm.h>
33 #include <asm/hvm/io.h>
34 #include <asm/hvm/support.h>
35 #include <asm/hvm/vmx/vmx.h>
36 #include <asm/hvm/vmx/vmcs.h>
37 #include <asm/hvm/vpic.h>
38 #include <asm/hvm/vlapic.h>
39 #include <public/hvm/ioreq.h>
41 #define BSP_CPU(v) (!(v->vcpu_id))
43 static inline
44 void __set_tsc_offset(u64 offset)
45 {
46 __vmwrite(TSC_OFFSET, offset);
47 #if defined (__i386__)
48 __vmwrite(TSC_OFFSET_HIGH, offset >> 32);
49 #endif
50 }
52 void set_guest_time(struct vcpu *v, u64 gtime)
53 {
54 u64 host_tsc;
56 rdtscll(host_tsc);
58 v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
59 __set_tsc_offset(v->arch.hvm_vcpu.cache_tsc_offset);
60 }
62 static inline void
63 interrupt_post_injection(struct vcpu * v, int vector, int type)
64 {
65 struct periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
67 if ( is_pit_irq(v, vector, type) ) {
68 if ( !pt->first_injected ) {
69 pt->pending_intr_nr = 0;
70 pt->last_plt_gtime = hvm_get_guest_time(v);
71 pt->scheduled = NOW() + pt->period;
72 set_timer(&pt->timer, pt->scheduled);
73 pt->first_injected = 1;
74 } else {
75 pt->pending_intr_nr--;
76 pt->last_plt_gtime += pt->period_cycles;
77 set_guest_time(v, pt->last_plt_gtime);
78 pit_time_fired(v, pt->priv);
79 }
80 }
82 switch(type)
83 {
84 case APIC_DM_EXTINT:
85 break;
87 default:
88 vlapic_post_injection(v, vector, type);
89 break;
90 }
91 }
93 static inline void
94 enable_irq_window(struct vcpu *v)
95 {
96 u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
98 if (!(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING)) {
99 *cpu_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
100 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
101 }
102 }
104 static inline void
105 disable_irq_window(struct vcpu *v)
106 {
107 u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
109 if ( *cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING ) {
110 *cpu_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
111 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
112 }
113 }
115 static inline int is_interruptibility_state(void)
116 {
117 int interruptibility;
118 __vmread(GUEST_INTERRUPTIBILITY_INFO, &interruptibility);
119 return interruptibility;
120 }
122 /* check to see if there is pending interrupt */
123 int cpu_has_pending_irq(struct vcpu *v)
124 {
125 struct hvm_domain *plat = &v->domain->arch.hvm_domain;
127 /* APIC */
128 if ( cpu_has_apic_interrupt(v) ) return 1;
130 /* PIC */
131 if ( !vlapic_accept_pic_intr(v) ) return 0;
133 return plat->interrupt_request;
134 }
136 asmlinkage void vmx_intr_assist(void)
137 {
138 int intr_type = 0;
139 int highest_vector;
140 unsigned long eflags;
141 struct vcpu *v = current;
142 struct hvm_domain *plat=&v->domain->arch.hvm_domain;
143 struct periodic_time *pt = &plat->pl_time.periodic_tm;
144 struct hvm_virpic *pic= &plat->vpic;
145 unsigned int idtv_info_field;
146 unsigned long inst_len;
147 int has_ext_irq;
149 if ( v->vcpu_id == 0 )
150 hvm_pic_assist(v);
152 if ( (v->vcpu_id == 0) && pt->enabled && pt->pending_intr_nr ) {
153 pic_set_irq(pic, pt->irq, 0);
154 pic_set_irq(pic, pt->irq, 1);
155 }
157 has_ext_irq = cpu_has_pending_irq(v);
159 if (unlikely(v->arch.hvm_vmx.vector_injected)) {
160 v->arch.hvm_vmx.vector_injected=0;
161 if (unlikely(has_ext_irq)) enable_irq_window(v);
162 return;
163 }
165 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
166 if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
167 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
169 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
170 __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
172 if (unlikely(idtv_info_field & 0x800)) { /* valid error code */
173 unsigned long error_code;
174 __vmread(IDT_VECTORING_ERROR_CODE, &error_code);
175 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
176 }
177 if (unlikely(has_ext_irq))
178 enable_irq_window(v);
180 HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
182 return;
183 }
185 if (likely(!has_ext_irq)) return;
187 if (unlikely(is_interruptibility_state())) { /* pre-cleared for emulated instruction */
188 enable_irq_window(v);
189 HVM_DBG_LOG(DBG_LEVEL_1, "interruptibility");
190 return;
191 }
193 __vmread(GUEST_RFLAGS, &eflags);
194 if (irq_masked(eflags)) {
195 enable_irq_window(v);
196 return;
197 }
199 highest_vector = cpu_get_interrupt(v, &intr_type);
200 switch (intr_type) {
201 case APIC_DM_EXTINT:
202 case APIC_DM_FIXED:
203 case APIC_DM_LOWEST:
204 vmx_inject_extint(v, highest_vector, VMX_DELIVER_NO_ERROR_CODE);
205 TRACE_3D(TRC_VMX_INT, v->domain->domain_id, highest_vector, 0);
206 break;
208 case APIC_DM_SMI:
209 case APIC_DM_NMI:
210 case APIC_DM_INIT:
211 case APIC_DM_STARTUP:
212 default:
213 printk("Unsupported interrupt type\n");
214 BUG();
215 break;
216 }
218 interrupt_post_injection(v, highest_vector, intr_type);
219 return;
220 }
222 void vmx_do_resume(struct vcpu *v)
223 {
224 struct domain *d = v->domain;
225 struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
227 vmx_stts();
229 /* pick up the elapsed PIT ticks and re-enable pit_timer */
230 if ( pt->enabled && pt->first_injected ) {
231 if ( v->arch.hvm_vcpu.guest_time ) {
232 set_guest_time(v, v->arch.hvm_vcpu.guest_time);
233 v->arch.hvm_vcpu.guest_time = 0;
234 }
235 pickup_deactive_ticks(pt);
236 }
238 if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
239 test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
240 hvm_wait_io();
242 /* We can't resume the guest if we're waiting on I/O */
243 ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
244 }
246 /*
247 * Local variables:
248 * mode: C
249 * c-set-style: "BSD"
250 * c-basic-offset: 4
251 * tab-width: 4
252 * indent-tabs-mode: nil
253 * End:
254 */