ia64/xen-unstable

view xen/arch/x86/hvm/vmx/vmx.c @ 18612:cafbd83e2258

vmx: Update RIP past INT3 instruction on INT3 vmexit.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Oct 10 10:11:34 2008 +0100 (2008-10-10)
parents 583e45983aaa
children 8d993552673a
line source
1 /*
2 * vmx.c: handling VMX architecture-related VM exits
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/lib.h>
22 #include <xen/trace.h>
23 #include <xen/sched.h>
24 #include <xen/irq.h>
25 #include <xen/softirq.h>
26 #include <xen/domain_page.h>
27 #include <xen/hypercall.h>
28 #include <xen/perfc.h>
29 #include <asm/current.h>
30 #include <asm/io.h>
31 #include <asm/regs.h>
32 #include <asm/cpufeature.h>
33 #include <asm/processor.h>
34 #include <asm/types.h>
35 #include <asm/debugreg.h>
36 #include <asm/msr.h>
37 #include <asm/spinlock.h>
38 #include <asm/paging.h>
39 #include <asm/p2m.h>
40 #include <asm/hvm/hvm.h>
41 #include <asm/hvm/support.h>
42 #include <asm/hvm/vmx/vmx.h>
43 #include <asm/hvm/vmx/vmcs.h>
44 #include <public/sched.h>
45 #include <public/hvm/ioreq.h>
46 #include <asm/hvm/vpic.h>
47 #include <asm/hvm/vlapic.h>
48 #include <asm/x86_emulate.h>
49 #include <asm/hvm/vpt.h>
50 #include <public/hvm/save.h>
51 #include <asm/hvm/trace.h>
53 enum handler_return { HNDL_done, HNDL_unhandled, HNDL_exception_raised };
55 static void vmx_ctxt_switch_from(struct vcpu *v);
56 static void vmx_ctxt_switch_to(struct vcpu *v);
58 static int vmx_alloc_vlapic_mapping(struct domain *d);
59 static void vmx_free_vlapic_mapping(struct domain *d);
60 static int vmx_alloc_vpid(struct domain *d);
61 static void vmx_free_vpid(struct domain *d);
62 static void vmx_install_vlapic_mapping(struct vcpu *v);
63 static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
64 static void vmx_update_guest_efer(struct vcpu *v);
65 static void vmx_cpuid_intercept(
66 unsigned int *eax, unsigned int *ebx,
67 unsigned int *ecx, unsigned int *edx);
68 static void vmx_wbinvd_intercept(void);
69 static void vmx_fpu_dirty_intercept(void);
70 static int vmx_msr_read_intercept(struct cpu_user_regs *regs);
71 static int vmx_msr_write_intercept(struct cpu_user_regs *regs);
72 static void vmx_invlpg_intercept(unsigned long vaddr);
74 static int vmx_domain_initialise(struct domain *d)
75 {
76 int rc;
78 d->arch.hvm_domain.vmx.ept_control.etmt = EPT_DEFAULT_MT;
79 d->arch.hvm_domain.vmx.ept_control.gaw = EPT_DEFAULT_GAW;
80 d->arch.hvm_domain.vmx.ept_control.asr =
81 pagetable_get_pfn(d->arch.phys_table);
83 if ( (rc = vmx_alloc_vpid(d)) != 0 )
84 return rc;
86 if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
87 {
88 vmx_free_vpid(d);
89 return rc;
90 }
92 return 0;
93 }
95 static void vmx_domain_destroy(struct domain *d)
96 {
97 ept_sync_domain(d);
98 vmx_free_vlapic_mapping(d);
99 vmx_free_vpid(d);
100 }
102 static int vmx_vcpu_initialise(struct vcpu *v)
103 {
104 int rc;
106 spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
108 v->arch.schedule_tail = vmx_do_resume;
109 v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
110 v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
112 if ( (rc = vmx_create_vmcs(v)) != 0 )
113 {
114 dprintk(XENLOG_WARNING,
115 "Failed to create VMCS for vcpu %d: err=%d.\n",
116 v->vcpu_id, rc);
117 return rc;
118 }
120 vpmu_initialise(v);
122 vmx_install_vlapic_mapping(v);
124 /* %eax == 1 signals full real-mode support to the guest loader. */
125 if ( v->vcpu_id == 0 )
126 v->arch.guest_context.user_regs.eax = 1;
128 return 0;
129 }
131 static void vmx_vcpu_destroy(struct vcpu *v)
132 {
133 vmx_destroy_vmcs(v);
134 vpmu_destroy(v);
135 }
137 #ifdef __x86_64__
139 static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
141 static u32 msr_index[VMX_MSR_COUNT] =
142 {
143 MSR_LSTAR, MSR_STAR, MSR_SYSCALL_MASK
144 };
146 static void vmx_save_host_msrs(void)
147 {
148 struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
149 int i;
151 for ( i = 0; i < VMX_MSR_COUNT; i++ )
152 rdmsrl(msr_index[i], host_msr_state->msrs[i]);
153 }
155 #define WRITE_MSR(address) \
156 guest_msr_state->msrs[VMX_INDEX_MSR_ ## address] = msr_content; \
157 set_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags); \
158 wrmsrl(MSR_ ## address, msr_content); \
159 set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags); \
160 break
162 static enum handler_return long_mode_do_msr_read(struct cpu_user_regs *regs)
163 {
164 u64 msr_content = 0;
165 u32 ecx = regs->ecx;
166 struct vcpu *v = current;
167 struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
169 switch ( ecx )
170 {
171 case MSR_EFER:
172 msr_content = v->arch.hvm_vcpu.guest_efer;
173 break;
175 case MSR_FS_BASE:
176 msr_content = __vmread(GUEST_FS_BASE);
177 goto check_long_mode;
179 case MSR_GS_BASE:
180 msr_content = __vmread(GUEST_GS_BASE);
181 goto check_long_mode;
183 case MSR_SHADOW_GS_BASE:
184 msr_content = v->arch.hvm_vmx.shadow_gs;
185 check_long_mode:
186 if ( !(hvm_long_mode_enabled(v)) )
187 {
188 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
189 return HNDL_exception_raised;
190 }
191 break;
193 case MSR_STAR:
194 msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_STAR];
195 break;
197 case MSR_LSTAR:
198 msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_LSTAR];
199 break;
201 case MSR_CSTAR:
202 msr_content = v->arch.hvm_vmx.cstar;
203 break;
205 case MSR_SYSCALL_MASK:
206 msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
207 break;
209 default:
210 return HNDL_unhandled;
211 }
213 HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
215 regs->eax = (u32)(msr_content >> 0);
216 regs->edx = (u32)(msr_content >> 32);
218 return HNDL_done;
219 }
221 static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
222 {
223 u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
224 u32 ecx = regs->ecx;
225 struct vcpu *v = current;
226 struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
227 struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
229 HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
231 switch ( ecx )
232 {
233 case MSR_EFER:
234 if ( hvm_set_efer(msr_content) )
235 goto exception_raised;
236 break;
238 case MSR_FS_BASE:
239 case MSR_GS_BASE:
240 case MSR_SHADOW_GS_BASE:
241 if ( !hvm_long_mode_enabled(v) )
242 goto gp_fault;
244 if ( !is_canonical_address(msr_content) )
245 goto uncanonical_address;
247 if ( ecx == MSR_FS_BASE )
248 __vmwrite(GUEST_FS_BASE, msr_content);
249 else if ( ecx == MSR_GS_BASE )
250 __vmwrite(GUEST_GS_BASE, msr_content);
251 else
252 {
253 v->arch.hvm_vmx.shadow_gs = msr_content;
254 wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
255 }
257 break;
259 case MSR_STAR:
260 WRITE_MSR(STAR);
262 case MSR_LSTAR:
263 if ( !is_canonical_address(msr_content) )
264 goto uncanonical_address;
265 WRITE_MSR(LSTAR);
267 case MSR_CSTAR:
268 if ( !is_canonical_address(msr_content) )
269 goto uncanonical_address;
270 v->arch.hvm_vmx.cstar = msr_content;
271 break;
273 case MSR_SYSCALL_MASK:
274 WRITE_MSR(SYSCALL_MASK);
276 default:
277 return HNDL_unhandled;
278 }
280 return HNDL_done;
282 uncanonical_address:
283 HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", ecx);
284 gp_fault:
285 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
286 exception_raised:
287 return HNDL_exception_raised;
288 }
290 /*
291 * To avoid MSR save/restore at every VM exit/entry time, we restore
292 * the x86_64 specific MSRs at domain switch time. Since these MSRs
293 * are not modified once set for para domains, we don't save them,
294 * but simply reset them to values set in percpu_traps_init().
295 */
296 static void vmx_restore_host_msrs(void)
297 {
298 struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
299 int i;
301 while ( host_msr_state->flags )
302 {
303 i = find_first_set_bit(host_msr_state->flags);
304 wrmsrl(msr_index[i], host_msr_state->msrs[i]);
305 clear_bit(i, &host_msr_state->flags);
306 }
308 if ( cpu_has_nx && !(read_efer() & EFER_NX) )
309 write_efer(read_efer() | EFER_NX);
310 }
312 static void vmx_save_guest_msrs(struct vcpu *v)
313 {
314 /* MSR_SHADOW_GS_BASE may have been changed by swapgs instruction. */
315 rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs);
316 }
318 static void vmx_restore_guest_msrs(struct vcpu *v)
319 {
320 struct vmx_msr_state *guest_msr_state, *host_msr_state;
321 unsigned long guest_flags;
322 int i;
324 guest_msr_state = &v->arch.hvm_vmx.msr_state;
325 host_msr_state = &this_cpu(host_msr_state);
327 wrmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs);
329 guest_flags = guest_msr_state->flags;
331 while ( guest_flags )
332 {
333 i = find_first_set_bit(guest_flags);
335 HVM_DBG_LOG(DBG_LEVEL_2,
336 "restore guest's index %d msr %x with value %lx",
337 i, msr_index[i], guest_msr_state->msrs[i]);
338 set_bit(i, &host_msr_state->flags);
339 wrmsrl(msr_index[i], guest_msr_state->msrs[i]);
340 clear_bit(i, &guest_flags);
341 }
343 if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
344 {
345 HVM_DBG_LOG(DBG_LEVEL_2,
346 "restore guest's EFER with value %lx",
347 v->arch.hvm_vcpu.guest_efer);
348 write_efer((read_efer() & ~(EFER_NX | EFER_SCE)) |
349 (v->arch.hvm_vcpu.guest_efer & (EFER_NX | EFER_SCE)));
350 }
351 }
353 #else /* __i386__ */
355 #define vmx_save_host_msrs() ((void)0)
357 static void vmx_restore_host_msrs(void)
358 {
359 if ( cpu_has_nx && !(read_efer() & EFER_NX) )
360 write_efer(read_efer() | EFER_NX);
361 }
363 #define vmx_save_guest_msrs(v) ((void)0)
365 static void vmx_restore_guest_msrs(struct vcpu *v)
366 {
367 if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & EFER_NX )
368 {
369 HVM_DBG_LOG(DBG_LEVEL_2,
370 "restore guest's EFER with value %lx",
371 v->arch.hvm_vcpu.guest_efer);
372 write_efer((read_efer() & ~EFER_NX) |
373 (v->arch.hvm_vcpu.guest_efer & EFER_NX));
374 }
375 }
377 static enum handler_return long_mode_do_msr_read(struct cpu_user_regs *regs)
378 {
379 u64 msr_content = 0;
380 struct vcpu *v = current;
382 switch ( regs->ecx )
383 {
384 case MSR_EFER:
385 msr_content = v->arch.hvm_vcpu.guest_efer;
386 break;
388 default:
389 return HNDL_unhandled;
390 }
392 regs->eax = msr_content >> 0;
393 regs->edx = msr_content >> 32;
395 return HNDL_done;
396 }
398 static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
399 {
400 u64 msr_content = regs->eax | ((u64)regs->edx << 32);
402 switch ( regs->ecx )
403 {
404 case MSR_EFER:
405 if ( hvm_set_efer(msr_content) )
406 return HNDL_exception_raised;
407 break;
409 default:
410 return HNDL_unhandled;
411 }
413 return HNDL_done;
414 }
416 #endif /* __i386__ */
418 static int vmx_guest_x86_mode(struct vcpu *v)
419 {
420 unsigned int cs_ar_bytes;
422 if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
423 return 0;
424 if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
425 return 1;
426 cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
427 if ( hvm_long_mode_enabled(v) &&
428 likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
429 return 8;
430 return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2);
431 }
433 static void vmx_save_dr(struct vcpu *v)
434 {
435 if ( !v->arch.hvm_vcpu.flag_dr_dirty )
436 return;
438 /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
439 v->arch.hvm_vcpu.flag_dr_dirty = 0;
440 v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
441 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
443 v->arch.guest_context.debugreg[0] = read_debugreg(0);
444 v->arch.guest_context.debugreg[1] = read_debugreg(1);
445 v->arch.guest_context.debugreg[2] = read_debugreg(2);
446 v->arch.guest_context.debugreg[3] = read_debugreg(3);
447 v->arch.guest_context.debugreg[6] = read_debugreg(6);
448 /* DR7 must be saved as it is used by vmx_restore_dr(). */
449 v->arch.guest_context.debugreg[7] = __vmread(GUEST_DR7);
450 }
452 static void __restore_debug_registers(struct vcpu *v)
453 {
454 if ( v->arch.hvm_vcpu.flag_dr_dirty )
455 return;
457 v->arch.hvm_vcpu.flag_dr_dirty = 1;
459 write_debugreg(0, v->arch.guest_context.debugreg[0]);
460 write_debugreg(1, v->arch.guest_context.debugreg[1]);
461 write_debugreg(2, v->arch.guest_context.debugreg[2]);
462 write_debugreg(3, v->arch.guest_context.debugreg[3]);
463 write_debugreg(6, v->arch.guest_context.debugreg[6]);
464 /* DR7 is loaded from the VMCS. */
465 }
467 /*
468 * DR7 is saved and restored on every vmexit. Other debug registers only
469 * need to be restored if their value is going to affect execution -- i.e.,
470 * if one of the breakpoints is enabled. So mask out all bits that don't
471 * enable some breakpoint functionality.
472 */
473 static void vmx_restore_dr(struct vcpu *v)
474 {
475 /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
476 if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
477 __restore_debug_registers(v);
478 }
480 static void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
481 {
482 uint32_t ev;
484 vmx_vmcs_enter(v);
486 c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
487 c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
488 c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
489 c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
491 c->msr_efer = v->arch.hvm_vcpu.guest_efer;
493 c->sysenter_cs = __vmread(GUEST_SYSENTER_CS);
494 c->sysenter_esp = __vmread(GUEST_SYSENTER_ESP);
495 c->sysenter_eip = __vmread(GUEST_SYSENTER_EIP);
497 c->pending_event = 0;
498 c->error_code = 0;
499 if ( ((ev = __vmread(VM_ENTRY_INTR_INFO)) & INTR_INFO_VALID_MASK) &&
500 hvm_event_needs_reinjection((ev >> 8) & 7, ev & 0xff) )
501 {
502 c->pending_event = ev;
503 c->error_code = __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE);
504 }
506 vmx_vmcs_exit(v);
507 }
509 static int vmx_restore_cr0_cr3(
510 struct vcpu *v, unsigned long cr0, unsigned long cr3)
511 {
512 unsigned long mfn = 0;
513 p2m_type_t p2mt;
515 if ( paging_mode_shadow(v->domain) )
516 {
517 if ( cr0 & X86_CR0_PG )
518 {
519 mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
520 if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
521 {
522 gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%lx\n", cr3);
523 return -EINVAL;
524 }
525 }
527 if ( hvm_paging_enabled(v) )
528 put_page(pagetable_get_page(v->arch.guest_table));
530 v->arch.guest_table = pagetable_from_pfn(mfn);
531 }
533 v->arch.hvm_vcpu.guest_cr[0] = cr0 | X86_CR0_ET;
534 v->arch.hvm_vcpu.guest_cr[3] = cr3;
536 return 0;
537 }
539 static int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c)
540 {
541 int rc;
543 if ( c->pending_valid &&
544 ((c->pending_type == 1) || (c->pending_type > 6) ||
545 (c->pending_reserved != 0)) )
546 {
547 gdprintk(XENLOG_ERR, "Invalid pending event 0x%"PRIx32".\n",
548 c->pending_event);
549 return -EINVAL;
550 }
552 rc = vmx_restore_cr0_cr3(v, c->cr0, c->cr3);
553 if ( rc )
554 return rc;
556 vmx_vmcs_enter(v);
558 v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
559 v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
560 vmx_update_guest_cr(v, 0);
561 vmx_update_guest_cr(v, 2);
562 vmx_update_guest_cr(v, 4);
564 v->arch.hvm_vcpu.guest_efer = c->msr_efer;
565 vmx_update_guest_efer(v);
567 __vmwrite(GUEST_SYSENTER_CS, c->sysenter_cs);
568 __vmwrite(GUEST_SYSENTER_ESP, c->sysenter_esp);
569 __vmwrite(GUEST_SYSENTER_EIP, c->sysenter_eip);
571 __vmwrite(GUEST_DR7, c->dr7);
573 vmx_vmcs_exit(v);
575 paging_update_paging_modes(v);
577 if ( c->pending_valid )
578 {
579 gdprintk(XENLOG_INFO, "Re-injecting 0x%"PRIx32", 0x%"PRIx32"\n",
580 c->pending_event, c->error_code);
582 if ( hvm_event_needs_reinjection(c->pending_type, c->pending_vector) )
583 {
584 vmx_vmcs_enter(v);
585 __vmwrite(VM_ENTRY_INTR_INFO, c->pending_event);
586 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, c->error_code);
587 vmx_vmcs_exit(v);
588 }
589 }
591 return 0;
592 }
594 static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
595 {
596 #ifdef __x86_64__
597 struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
598 unsigned long guest_flags = guest_state->flags;
600 data->shadow_gs = v->arch.hvm_vmx.shadow_gs;
601 data->msr_cstar = v->arch.hvm_vmx.cstar;
603 /* save msrs */
604 data->msr_flags = guest_flags;
605 data->msr_lstar = guest_state->msrs[VMX_INDEX_MSR_LSTAR];
606 data->msr_star = guest_state->msrs[VMX_INDEX_MSR_STAR];
607 data->msr_syscall_mask = guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
608 #endif
610 data->tsc = hvm_get_guest_tsc(v);
611 }
613 static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
614 {
615 #ifdef __x86_64__
616 struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
618 /* restore msrs */
619 guest_state->flags = data->msr_flags & 7;
620 guest_state->msrs[VMX_INDEX_MSR_LSTAR] = data->msr_lstar;
621 guest_state->msrs[VMX_INDEX_MSR_STAR] = data->msr_star;
622 guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK] = data->msr_syscall_mask;
624 v->arch.hvm_vmx.cstar = data->msr_cstar;
625 v->arch.hvm_vmx.shadow_gs = data->shadow_gs;
626 #endif
628 hvm_set_guest_tsc(v, data->tsc);
629 }
632 static void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
633 {
634 vmx_save_cpu_state(v, ctxt);
635 vmx_vmcs_save(v, ctxt);
636 }
638 static int vmx_load_vmcs_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
639 {
640 vmx_load_cpu_state(v, ctxt);
642 if ( vmx_vmcs_restore(v, ctxt) )
643 {
644 gdprintk(XENLOG_ERR, "vmx_vmcs restore failed!\n");
645 domain_crash(v->domain);
646 return -EINVAL;
647 }
649 return 0;
650 }
652 static void vmx_fpu_enter(struct vcpu *v)
653 {
654 setup_fpu(v);
655 __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
656 v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
657 __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
658 }
660 static void vmx_fpu_leave(struct vcpu *v)
661 {
662 ASSERT(!v->fpu_dirtied);
663 ASSERT(read_cr0() & X86_CR0_TS);
665 if ( !(v->arch.hvm_vmx.host_cr0 & X86_CR0_TS) )
666 {
667 v->arch.hvm_vmx.host_cr0 |= X86_CR0_TS;
668 __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
669 }
671 /*
672 * If the guest does not have TS enabled then we must cause and handle an
673 * exception on first use of the FPU. If the guest *does* have TS enabled
674 * then this is not necessary: no FPU activity can occur until the guest
675 * clears CR0.TS, and we will initialise the FPU when that happens.
676 */
677 if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
678 {
679 v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
680 __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
681 __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
682 }
683 }
685 static void vmx_ctxt_switch_from(struct vcpu *v)
686 {
687 vmx_fpu_leave(v);
688 vmx_save_guest_msrs(v);
689 vmx_restore_host_msrs();
690 vmx_save_dr(v);
691 vpmu_save(v);
692 }
694 static void vmx_ctxt_switch_to(struct vcpu *v)
695 {
696 /* HOST_CR4 in VMCS is always mmu_cr4_features. Sync CR4 now. */
697 if ( unlikely(read_cr4() != mmu_cr4_features) )
698 write_cr4(mmu_cr4_features);
700 vmx_restore_guest_msrs(v);
701 vmx_restore_dr(v);
702 vpmu_load(v);
703 }
705 static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
706 struct segment_register *reg)
707 {
708 uint32_t attr = 0;
710 vmx_vmcs_enter(v);
712 switch ( seg )
713 {
714 case x86_seg_cs:
715 reg->sel = __vmread(GUEST_CS_SELECTOR);
716 reg->limit = __vmread(GUEST_CS_LIMIT);
717 reg->base = __vmread(GUEST_CS_BASE);
718 attr = __vmread(GUEST_CS_AR_BYTES);
719 break;
720 case x86_seg_ds:
721 reg->sel = __vmread(GUEST_DS_SELECTOR);
722 reg->limit = __vmread(GUEST_DS_LIMIT);
723 reg->base = __vmread(GUEST_DS_BASE);
724 attr = __vmread(GUEST_DS_AR_BYTES);
725 break;
726 case x86_seg_es:
727 reg->sel = __vmread(GUEST_ES_SELECTOR);
728 reg->limit = __vmread(GUEST_ES_LIMIT);
729 reg->base = __vmread(GUEST_ES_BASE);
730 attr = __vmread(GUEST_ES_AR_BYTES);
731 break;
732 case x86_seg_fs:
733 reg->sel = __vmread(GUEST_FS_SELECTOR);
734 reg->limit = __vmread(GUEST_FS_LIMIT);
735 reg->base = __vmread(GUEST_FS_BASE);
736 attr = __vmread(GUEST_FS_AR_BYTES);
737 break;
738 case x86_seg_gs:
739 reg->sel = __vmread(GUEST_GS_SELECTOR);
740 reg->limit = __vmread(GUEST_GS_LIMIT);
741 reg->base = __vmread(GUEST_GS_BASE);
742 attr = __vmread(GUEST_GS_AR_BYTES);
743 break;
744 case x86_seg_ss:
745 reg->sel = __vmread(GUEST_SS_SELECTOR);
746 reg->limit = __vmread(GUEST_SS_LIMIT);
747 reg->base = __vmread(GUEST_SS_BASE);
748 attr = __vmread(GUEST_SS_AR_BYTES);
749 break;
750 case x86_seg_tr:
751 reg->sel = __vmread(GUEST_TR_SELECTOR);
752 reg->limit = __vmread(GUEST_TR_LIMIT);
753 reg->base = __vmread(GUEST_TR_BASE);
754 attr = __vmread(GUEST_TR_AR_BYTES);
755 break;
756 case x86_seg_gdtr:
757 reg->limit = __vmread(GUEST_GDTR_LIMIT);
758 reg->base = __vmread(GUEST_GDTR_BASE);
759 break;
760 case x86_seg_idtr:
761 reg->limit = __vmread(GUEST_IDTR_LIMIT);
762 reg->base = __vmread(GUEST_IDTR_BASE);
763 break;
764 case x86_seg_ldtr:
765 reg->sel = __vmread(GUEST_LDTR_SELECTOR);
766 reg->limit = __vmread(GUEST_LDTR_LIMIT);
767 reg->base = __vmread(GUEST_LDTR_BASE);
768 attr = __vmread(GUEST_LDTR_AR_BYTES);
769 break;
770 default:
771 BUG();
772 }
774 vmx_vmcs_exit(v);
776 reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
777 /* Unusable flag is folded into Present flag. */
778 if ( attr & (1u<<16) )
779 reg->attr.fields.p = 0;
780 }
782 static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg,
783 struct segment_register *reg)
784 {
785 uint32_t attr;
787 attr = reg->attr.bytes;
788 attr = ((attr & 0xf00) << 4) | (attr & 0xff);
790 /* Not-present must mean unusable. */
791 if ( !reg->attr.fields.p )
792 attr |= (1u << 16);
794 /* VMX has strict consistency requirement for flag G. */
795 attr |= !!(reg->limit >> 20) << 15;
797 vmx_vmcs_enter(v);
799 switch ( seg )
800 {
801 case x86_seg_cs:
802 __vmwrite(GUEST_CS_SELECTOR, reg->sel);
803 __vmwrite(GUEST_CS_LIMIT, reg->limit);
804 __vmwrite(GUEST_CS_BASE, reg->base);
805 __vmwrite(GUEST_CS_AR_BYTES, attr);
806 break;
807 case x86_seg_ds:
808 __vmwrite(GUEST_DS_SELECTOR, reg->sel);
809 __vmwrite(GUEST_DS_LIMIT, reg->limit);
810 __vmwrite(GUEST_DS_BASE, reg->base);
811 __vmwrite(GUEST_DS_AR_BYTES, attr);
812 break;
813 case x86_seg_es:
814 __vmwrite(GUEST_ES_SELECTOR, reg->sel);
815 __vmwrite(GUEST_ES_LIMIT, reg->limit);
816 __vmwrite(GUEST_ES_BASE, reg->base);
817 __vmwrite(GUEST_ES_AR_BYTES, attr);
818 break;
819 case x86_seg_fs:
820 __vmwrite(GUEST_FS_SELECTOR, reg->sel);
821 __vmwrite(GUEST_FS_LIMIT, reg->limit);
822 __vmwrite(GUEST_FS_BASE, reg->base);
823 __vmwrite(GUEST_FS_AR_BYTES, attr);
824 break;
825 case x86_seg_gs:
826 __vmwrite(GUEST_GS_SELECTOR, reg->sel);
827 __vmwrite(GUEST_GS_LIMIT, reg->limit);
828 __vmwrite(GUEST_GS_BASE, reg->base);
829 __vmwrite(GUEST_GS_AR_BYTES, attr);
830 break;
831 case x86_seg_ss:
832 __vmwrite(GUEST_SS_SELECTOR, reg->sel);
833 __vmwrite(GUEST_SS_LIMIT, reg->limit);
834 __vmwrite(GUEST_SS_BASE, reg->base);
835 __vmwrite(GUEST_SS_AR_BYTES, attr);
836 break;
837 case x86_seg_tr:
838 __vmwrite(GUEST_TR_SELECTOR, reg->sel);
839 __vmwrite(GUEST_TR_LIMIT, reg->limit);
840 __vmwrite(GUEST_TR_BASE, reg->base);
841 /* VMX checks that the the busy flag (bit 1) is set. */
842 __vmwrite(GUEST_TR_AR_BYTES, attr | 2);
843 break;
844 case x86_seg_gdtr:
845 __vmwrite(GUEST_GDTR_LIMIT, reg->limit);
846 __vmwrite(GUEST_GDTR_BASE, reg->base);
847 break;
848 case x86_seg_idtr:
849 __vmwrite(GUEST_IDTR_LIMIT, reg->limit);
850 __vmwrite(GUEST_IDTR_BASE, reg->base);
851 break;
852 case x86_seg_ldtr:
853 __vmwrite(GUEST_LDTR_SELECTOR, reg->sel);
854 __vmwrite(GUEST_LDTR_LIMIT, reg->limit);
855 __vmwrite(GUEST_LDTR_BASE, reg->base);
856 __vmwrite(GUEST_LDTR_AR_BYTES, attr);
857 break;
858 default:
859 BUG();
860 }
862 vmx_vmcs_exit(v);
863 }
865 static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
866 {
867 vmx_vmcs_enter(v);
868 __vmwrite(TSC_OFFSET, offset);
869 #if defined (__i386__)
870 __vmwrite(TSC_OFFSET_HIGH, offset >> 32);
871 #endif
872 vmx_vmcs_exit(v);
873 }
875 void do_nmi(struct cpu_user_regs *);
877 static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
878 {
879 char *p;
880 int i;
882 for ( i = 0; i < (PAGE_SIZE / 32); i++ )
883 {
884 p = (char *)(hypercall_page + (i * 32));
885 *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */
886 *(u32 *)(p + 1) = i;
887 *(u8 *)(p + 5) = 0x0f; /* vmcall */
888 *(u8 *)(p + 6) = 0x01;
889 *(u8 *)(p + 7) = 0xc1;
890 *(u8 *)(p + 8) = 0xc3; /* ret */
891 }
893 /* Don't support HYPERVISOR_iret at the moment */
894 *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
895 }
897 static unsigned int vmx_get_interrupt_shadow(struct vcpu *v)
898 {
899 return __vmread(GUEST_INTERRUPTIBILITY_INFO);
900 }
902 static void vmx_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
903 {
904 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
905 }
907 static void vmx_load_pdptrs(struct vcpu *v)
908 {
909 unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3], mfn;
910 uint64_t *guest_pdptrs;
911 p2m_type_t p2mt;
912 char *p;
914 /* EPT needs to load PDPTRS into VMCS for PAE. */
915 if ( !hvm_pae_enabled(v) || (v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
916 return;
918 if ( cr3 & 0x1fUL )
919 goto crash;
921 mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
922 if ( !p2m_is_ram(p2mt) )
923 goto crash;
925 p = map_domain_page(mfn);
927 guest_pdptrs = (uint64_t *)(p + (cr3 & ~PAGE_MASK));
929 /*
930 * We do not check the PDPTRs for validity. The CPU will do this during
931 * vm entry, and we can handle the failure there and crash the guest.
932 * The only thing we could do better here is #GP instead.
933 */
935 vmx_vmcs_enter(v);
937 __vmwrite(GUEST_PDPTR0, guest_pdptrs[0]);
938 __vmwrite(GUEST_PDPTR1, guest_pdptrs[1]);
939 __vmwrite(GUEST_PDPTR2, guest_pdptrs[2]);
940 __vmwrite(GUEST_PDPTR3, guest_pdptrs[3]);
941 #ifdef __i386__
942 __vmwrite(GUEST_PDPTR0_HIGH, guest_pdptrs[0] >> 32);
943 __vmwrite(GUEST_PDPTR1_HIGH, guest_pdptrs[1] >> 32);
944 __vmwrite(GUEST_PDPTR2_HIGH, guest_pdptrs[2] >> 32);
945 __vmwrite(GUEST_PDPTR3_HIGH, guest_pdptrs[3] >> 32);
946 #endif
948 vmx_vmcs_exit(v);
950 unmap_domain_page(p);
951 return;
953 crash:
954 domain_crash(v->domain);
955 }
957 static void vmx_update_host_cr3(struct vcpu *v)
958 {
959 vmx_vmcs_enter(v);
960 __vmwrite(HOST_CR3, v->arch.cr3);
961 vmx_vmcs_exit(v);
962 }
964 static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
965 {
966 vmx_vmcs_enter(v);
968 switch ( cr )
969 {
970 case 0: {
971 unsigned long hw_cr0_mask =
972 X86_CR0_NE | X86_CR0_PG | X86_CR0_PE;
974 if ( paging_mode_shadow(v->domain) )
975 hw_cr0_mask |= X86_CR0_WP;
977 if ( paging_mode_hap(v->domain) )
978 {
979 /* We manage GUEST_CR3 when guest CR0.PE is zero. */
980 uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
981 CPU_BASED_CR3_STORE_EXITING);
982 v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
983 if ( !hvm_paging_enabled(v) )
984 v->arch.hvm_vmx.exec_control |= cr3_ctls;
985 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
987 /* Changing CR0.PE can change some bits in real CR4. */
988 vmx_update_guest_cr(v, 4);
989 }
991 if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
992 {
993 if ( v != current )
994 hw_cr0_mask |= X86_CR0_TS;
995 else if ( v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS )
996 vmx_fpu_enter(v);
997 }
999 v->arch.hvm_vmx.vmxemul &= ~VMXEMUL_REALMODE;
1000 if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
1001 v->arch.hvm_vmx.vmxemul |= VMXEMUL_REALMODE;
1003 v->arch.hvm_vcpu.hw_cr[0] =
1004 v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
1005 __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
1006 __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
1007 break;
1009 case 2:
1010 /* CR2 is updated in exit stub. */
1011 break;
1012 case 3:
1013 if ( paging_mode_hap(v->domain) )
1015 if ( !hvm_paging_enabled(v) )
1016 v->arch.hvm_vcpu.hw_cr[3] =
1017 v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT];
1018 vmx_load_pdptrs(v);
1021 __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
1022 vpid_sync_vcpu_all(v);
1023 break;
1024 case 4:
1025 v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
1026 if ( paging_mode_hap(v->domain) )
1027 v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
1028 v->arch.hvm_vcpu.hw_cr[4] |= v->arch.hvm_vcpu.guest_cr[4];
1029 if ( paging_mode_hap(v->domain) && !hvm_paging_enabled(v) )
1031 v->arch.hvm_vcpu.hw_cr[4] |= X86_CR4_PSE;
1032 v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
1034 __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
1035 __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
1036 break;
1037 default:
1038 BUG();
1041 vmx_vmcs_exit(v);
1044 static void vmx_update_guest_efer(struct vcpu *v)
1046 #ifdef __x86_64__
1047 unsigned long vm_entry_value;
1049 vmx_vmcs_enter(v);
1051 vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
1052 if ( v->arch.hvm_vcpu.guest_efer & EFER_LMA )
1053 vm_entry_value |= VM_ENTRY_IA32E_MODE;
1054 else
1055 vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
1056 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
1058 vmx_vmcs_exit(v);
1059 #endif
1061 if ( v == current )
1062 write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
1063 (v->arch.hvm_vcpu.guest_efer & (EFER_NX|EFER_SCE)));
1066 static void vmx_flush_guest_tlbs(void)
1068 /*
1069 * If VPID (i.e. tagged TLB support) is not enabled, the fact that
1070 * we're in Xen at all means any guest will have a clean TLB when
1071 * it's next run, because VMRESUME will flush it for us.
1073 * If enabled, we invalidate all translations associated with all
1074 * VPID values.
1075 */
1076 vpid_sync_all();
1079 static void __ept_sync_domain(void *info)
1081 struct domain *d = info;
1082 __invept(1, d->arch.hvm_domain.vmx.ept_control.eptp, 0);
1085 void ept_sync_domain(struct domain *d)
1087 /* Only if using EPT and this domain has some VCPUs to dirty. */
1088 if ( d->arch.hvm_domain.hap_enabled && d->vcpu[0] )
1090 ASSERT(local_irq_is_enabled());
1091 on_each_cpu(__ept_sync_domain, d, 1, 1);
1095 static void __vmx_inject_exception(
1096 struct vcpu *v, int trap, int type, int error_code)
1098 unsigned long intr_fields;
1100 /*
1101 * NB. Callers do not need to worry about clearing STI/MOV-SS blocking:
1102 * "If the VM entry is injecting, there is no blocking by STI or by
1103 * MOV SS following the VM entry, regardless of the contents of the
1104 * interruptibility-state field [in the guest-state area before the
1105 * VM entry]", PRM Vol. 3, 22.6.1 (Interruptibility State).
1106 */
1108 intr_fields = (INTR_INFO_VALID_MASK | (type<<8) | trap);
1109 if ( error_code != HVM_DELIVER_NO_ERROR_CODE ) {
1110 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1111 intr_fields |= INTR_INFO_DELIVER_CODE_MASK;
1114 __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
1116 if ( trap == TRAP_page_fault )
1117 HVMTRACE_LONG_2D(PF_INJECT, error_code,
1118 TRC_PAR_LONG(v->arch.hvm_vcpu.guest_cr[2]));
1119 else
1120 HVMTRACE_2D(INJ_EXC, trap, error_code);
1123 void vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code)
1125 unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
1127 if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
1128 (((intr_info >> 8) & 7) == X86_EVENTTYPE_HW_EXCEPTION) )
1130 trap = hvm_combine_hw_exceptions((uint8_t)intr_info, trap);
1131 if ( trap == TRAP_double_fault )
1132 error_code = 0;
1135 __vmx_inject_exception(v, trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
1138 void vmx_inject_extint(struct vcpu *v, int trap)
1140 __vmx_inject_exception(v, trap, X86_EVENTTYPE_EXT_INTR,
1141 HVM_DELIVER_NO_ERROR_CODE);
1144 void vmx_inject_nmi(struct vcpu *v)
1146 __vmx_inject_exception(v, 2, X86_EVENTTYPE_NMI,
1147 HVM_DELIVER_NO_ERROR_CODE);
1150 static void vmx_inject_exception(
1151 unsigned int trapnr, int errcode, unsigned long cr2)
1153 struct vcpu *curr = current;
1155 vmx_inject_hw_exception(curr, trapnr, errcode);
1157 if ( trapnr == TRAP_page_fault )
1158 curr->arch.hvm_vcpu.guest_cr[2] = cr2;
1160 if ( (trapnr == TRAP_debug) &&
1161 (guest_cpu_user_regs()->eflags & X86_EFLAGS_TF) )
1163 __restore_debug_registers(curr);
1164 write_debugreg(6, read_debugreg(6) | 0x4000);
1168 static int vmx_event_pending(struct vcpu *v)
1170 ASSERT(v == current);
1171 return (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK);
1174 static int vmx_do_pmu_interrupt(struct cpu_user_regs *regs)
1176 return vpmu_do_interrupt(regs);
1179 static void vmx_set_uc_mode(struct vcpu *v)
1181 if ( paging_mode_hap(v->domain) )
1182 ept_change_entry_emt_with_range(
1183 v->domain, 0, v->domain->arch.p2m->max_mapped_pfn);
1184 vpid_sync_all();
1187 static struct hvm_function_table vmx_function_table = {
1188 .name = "VMX",
1189 .domain_initialise = vmx_domain_initialise,
1190 .domain_destroy = vmx_domain_destroy,
1191 .vcpu_initialise = vmx_vcpu_initialise,
1192 .vcpu_destroy = vmx_vcpu_destroy,
1193 .save_cpu_ctxt = vmx_save_vmcs_ctxt,
1194 .load_cpu_ctxt = vmx_load_vmcs_ctxt,
1195 .get_interrupt_shadow = vmx_get_interrupt_shadow,
1196 .set_interrupt_shadow = vmx_set_interrupt_shadow,
1197 .guest_x86_mode = vmx_guest_x86_mode,
1198 .get_segment_register = vmx_get_segment_register,
1199 .set_segment_register = vmx_set_segment_register,
1200 .update_host_cr3 = vmx_update_host_cr3,
1201 .update_guest_cr = vmx_update_guest_cr,
1202 .update_guest_efer = vmx_update_guest_efer,
1203 .flush_guest_tlbs = vmx_flush_guest_tlbs,
1204 .set_tsc_offset = vmx_set_tsc_offset,
1205 .inject_exception = vmx_inject_exception,
1206 .init_hypercall_page = vmx_init_hypercall_page,
1207 .event_pending = vmx_event_pending,
1208 .do_pmu_interrupt = vmx_do_pmu_interrupt,
1209 .cpu_up = vmx_cpu_up,
1210 .cpu_down = vmx_cpu_down,
1211 .cpuid_intercept = vmx_cpuid_intercept,
1212 .wbinvd_intercept = vmx_wbinvd_intercept,
1213 .fpu_dirty_intercept = vmx_fpu_dirty_intercept,
1214 .msr_read_intercept = vmx_msr_read_intercept,
1215 .msr_write_intercept = vmx_msr_write_intercept,
1216 .invlpg_intercept = vmx_invlpg_intercept,
1217 .set_uc_mode = vmx_set_uc_mode
1218 };
1220 static unsigned long *vpid_bitmap;
1221 #define VPID_BITMAP_SIZE ((1u << VMCS_VPID_WIDTH) / MAX_VIRT_CPUS)
1223 void start_vmx(void)
1225 static int bootstrapped;
1227 vmx_save_host_msrs();
1229 if ( bootstrapped )
1231 if ( hvm_enabled && !vmx_cpu_up() )
1233 printk("VMX: FATAL: failed to initialise CPU%d!\n",
1234 smp_processor_id());
1235 BUG();
1237 return;
1240 bootstrapped = 1;
1242 /* Xen does not fill x86_capability words except 0. */
1243 boot_cpu_data.x86_capability[4] = cpuid_ecx(1);
1245 if ( !test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability) )
1246 return;
1248 set_in_cr4(X86_CR4_VMXE);
1250 if ( !vmx_cpu_up() )
1252 printk("VMX: failed to initialise.\n");
1253 return;
1256 if ( cpu_has_vmx_ept )
1258 printk("VMX: EPT is available.\n");
1259 vmx_function_table.hap_supported = 1;
1262 if ( cpu_has_vmx_vpid )
1264 printk("VMX: VPID is available.\n");
1266 vpid_bitmap = xmalloc_array(
1267 unsigned long, BITS_TO_LONGS(VPID_BITMAP_SIZE));
1268 BUG_ON(vpid_bitmap == NULL);
1269 memset(vpid_bitmap, 0, BITS_TO_LONGS(VPID_BITMAP_SIZE) * sizeof(long));
1271 /* VPID 0 is used by VMX root mode (the hypervisor). */
1272 __set_bit(0, vpid_bitmap);
1275 setup_vmcs_dump();
1277 hvm_enable(&vmx_function_table);
1280 /*
1281 * Not all cases receive valid value in the VM-exit instruction length field.
1282 * Callers must know what they're doing!
1283 */
1284 static int __get_instruction_length(void)
1286 int len;
1287 len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */
1288 BUG_ON((len < 1) || (len > 15));
1289 return len;
1292 static void __update_guest_eip(unsigned long inst_len)
1294 struct cpu_user_regs *regs = guest_cpu_user_regs();
1295 unsigned long x;
1297 regs->eip += inst_len;
1298 regs->eflags &= ~X86_EFLAGS_RF;
1300 x = __vmread(GUEST_INTERRUPTIBILITY_INFO);
1301 if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
1303 x &= ~(VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS);
1304 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, x);
1307 if ( regs->eflags & X86_EFLAGS_TF )
1308 vmx_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0);
1311 static void vmx_fpu_dirty_intercept(void)
1313 struct vcpu *curr = current;
1315 vmx_fpu_enter(curr);
1317 /* Disable TS in guest CR0 unless the guest wants the exception too. */
1318 if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
1320 curr->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
1321 __vmwrite(GUEST_CR0, curr->arch.hvm_vcpu.hw_cr[0]);
1325 #define bitmaskof(idx) (1U << ((idx) & 31))
1326 static void vmx_cpuid_intercept(
1327 unsigned int *eax, unsigned int *ebx,
1328 unsigned int *ecx, unsigned int *edx)
1330 unsigned int input = *eax;
1331 struct segment_register cs;
1332 struct vcpu *v = current;
1334 hvm_cpuid(input, eax, ebx, ecx, edx);
1336 switch ( input )
1338 case 0x80000001:
1339 /* SYSCALL is visible iff running in long mode. */
1340 hvm_get_segment_register(v, x86_seg_cs, &cs);
1341 if ( cs.attr.fields.l )
1342 *edx |= bitmaskof(X86_FEATURE_SYSCALL);
1343 else
1344 *edx &= ~(bitmaskof(X86_FEATURE_SYSCALL));
1345 break;
1348 HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
1351 static void vmx_do_cpuid(struct cpu_user_regs *regs)
1353 unsigned int eax, ebx, ecx, edx;
1355 eax = regs->eax;
1356 ebx = regs->ebx;
1357 ecx = regs->ecx;
1358 edx = regs->edx;
1360 vmx_cpuid_intercept(&eax, &ebx, &ecx, &edx);
1362 regs->eax = eax;
1363 regs->ebx = ebx;
1364 regs->ecx = ecx;
1365 regs->edx = edx;
1368 static void vmx_dr_access(unsigned long exit_qualification,
1369 struct cpu_user_regs *regs)
1371 struct vcpu *v = current;
1373 HVMTRACE_0D(DR_WRITE);
1375 if ( !v->arch.hvm_vcpu.flag_dr_dirty )
1376 __restore_debug_registers(v);
1378 /* Allow guest direct access to DR registers */
1379 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
1380 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
1383 static void vmx_invlpg_intercept(unsigned long vaddr)
1385 struct vcpu *curr = current;
1386 HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
1387 if ( paging_invlpg(curr, vaddr) )
1388 vpid_sync_vcpu_gva(curr, vaddr);
1391 #define CASE_SET_REG(REG, reg) \
1392 case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: regs->reg = value; break
1393 #define CASE_GET_REG(REG, reg) \
1394 case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: value = regs->reg; break
1396 #define CASE_EXTEND_SET_REG \
1397 CASE_EXTEND_REG(S)
1398 #define CASE_EXTEND_GET_REG \
1399 CASE_EXTEND_REG(G)
1401 #ifdef __i386__
1402 #define CASE_EXTEND_REG(T)
1403 #else
1404 #define CASE_EXTEND_REG(T) \
1405 CASE_ ## T ## ET_REG(R8, r8); \
1406 CASE_ ## T ## ET_REG(R9, r9); \
1407 CASE_ ## T ## ET_REG(R10, r10); \
1408 CASE_ ## T ## ET_REG(R11, r11); \
1409 CASE_ ## T ## ET_REG(R12, r12); \
1410 CASE_ ## T ## ET_REG(R13, r13); \
1411 CASE_ ## T ## ET_REG(R14, r14); \
1412 CASE_ ## T ## ET_REG(R15, r15)
1413 #endif
1415 static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
1417 unsigned long value;
1418 struct vcpu *v = current;
1419 struct vlapic *vlapic = vcpu_vlapic(v);
1421 switch ( gp )
1423 CASE_GET_REG(EAX, eax);
1424 CASE_GET_REG(ECX, ecx);
1425 CASE_GET_REG(EDX, edx);
1426 CASE_GET_REG(EBX, ebx);
1427 CASE_GET_REG(EBP, ebp);
1428 CASE_GET_REG(ESI, esi);
1429 CASE_GET_REG(EDI, edi);
1430 CASE_GET_REG(ESP, esp);
1431 CASE_EXTEND_GET_REG;
1432 default:
1433 gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
1434 goto exit_and_crash;
1437 HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value));
1439 HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
1441 switch ( cr )
1443 case 0:
1444 return !hvm_set_cr0(value);
1446 case 3:
1447 return !hvm_set_cr3(value);
1449 case 4:
1450 return !hvm_set_cr4(value);
1452 case 8:
1453 vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
1454 break;
1456 default:
1457 gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
1458 goto exit_and_crash;
1461 return 1;
1463 exit_and_crash:
1464 domain_crash(v->domain);
1465 return 0;
1468 /*
1469 * Read from control registers. CR0 and CR4 are read from the shadow.
1470 */
1471 static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
1473 unsigned long value = 0;
1474 struct vcpu *v = current;
1475 struct vlapic *vlapic = vcpu_vlapic(v);
1477 switch ( cr )
1479 case 3:
1480 value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
1481 break;
1482 case 8:
1483 value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
1484 value = (value & 0xF0) >> 4;
1485 break;
1486 default:
1487 gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
1488 domain_crash(v->domain);
1489 break;
1492 switch ( gp ) {
1493 CASE_SET_REG(EAX, eax);
1494 CASE_SET_REG(ECX, ecx);
1495 CASE_SET_REG(EDX, edx);
1496 CASE_SET_REG(EBX, ebx);
1497 CASE_SET_REG(EBP, ebp);
1498 CASE_SET_REG(ESI, esi);
1499 CASE_SET_REG(EDI, edi);
1500 CASE_SET_REG(ESP, esp);
1501 CASE_EXTEND_SET_REG;
1502 default:
1503 printk("invalid gp: %d\n", gp);
1504 domain_crash(v->domain);
1505 break;
1508 HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value));
1510 HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
1513 static int vmx_cr_access(unsigned long exit_qualification,
1514 struct cpu_user_regs *regs)
1516 unsigned int gp, cr;
1517 unsigned long value;
1518 struct vcpu *v = current;
1520 switch ( exit_qualification & VMX_CONTROL_REG_ACCESS_TYPE )
1522 case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
1523 gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
1524 cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
1525 return mov_to_cr(gp, cr, regs);
1526 case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR:
1527 gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
1528 cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
1529 mov_from_cr(cr, gp, regs);
1530 break;
1531 case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
1532 v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
1533 vmx_update_guest_cr(v, 0);
1534 HVMTRACE_0D(CLTS);
1535 break;
1536 case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
1537 value = v->arch.hvm_vcpu.guest_cr[0];
1538 /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
1539 value = (value & ~0xe) | ((exit_qualification >> 16) & 0xf);
1540 HVMTRACE_LONG_1D(LMSW, value);
1541 return !hvm_set_cr0(value);
1542 default:
1543 BUG();
1546 return 1;
1549 static const struct lbr_info {
1550 u32 base, count;
1551 } p4_lbr[] = {
1552 { MSR_P4_LER_FROM_LIP, 1 },
1553 { MSR_P4_LER_TO_LIP, 1 },
1554 { MSR_P4_LASTBRANCH_TOS, 1 },
1555 { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
1556 { MSR_P4_LASTBRANCH_0_TO_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
1557 { 0, 0 }
1558 }, c2_lbr[] = {
1559 { MSR_IA32_LASTINTFROMIP, 1 },
1560 { MSR_IA32_LASTINTTOIP, 1 },
1561 { MSR_C2_LASTBRANCH_TOS, 1 },
1562 { MSR_C2_LASTBRANCH_0_FROM_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO },
1563 { MSR_C2_LASTBRANCH_0_TO_IP, NUM_MSR_C2_LASTBRANCH_FROM_TO },
1564 { 0, 0 }
1565 #ifdef __i386__
1566 }, pm_lbr[] = {
1567 { MSR_IA32_LASTINTFROMIP, 1 },
1568 { MSR_IA32_LASTINTTOIP, 1 },
1569 { MSR_PM_LASTBRANCH_TOS, 1 },
1570 { MSR_PM_LASTBRANCH_0, NUM_MSR_PM_LASTBRANCH },
1571 { 0, 0 }
1572 #endif
1573 };
1575 static const struct lbr_info *last_branch_msr_get(void)
1577 switch ( boot_cpu_data.x86 )
1579 case 6:
1580 switch ( boot_cpu_data.x86_model )
1582 #ifdef __i386__
1583 /* PentiumM */
1584 case 9: case 13:
1585 /* Core Solo/Duo */
1586 case 14:
1587 return pm_lbr;
1588 break;
1589 #endif
1590 /* Core2 Duo */
1591 case 15:
1592 return c2_lbr;
1593 break;
1595 break;
1597 case 15:
1598 switch ( boot_cpu_data.x86_model )
1600 /* Pentium4/Xeon with em64t */
1601 case 3: case 4: case 6:
1602 return p4_lbr;
1603 break;
1605 break;
1608 return NULL;
1611 static int is_last_branch_msr(u32 ecx)
1613 const struct lbr_info *lbr = last_branch_msr_get();
1615 if ( lbr == NULL )
1616 return 0;
1618 for ( ; lbr->count; lbr++ )
1619 if ( (ecx >= lbr->base) && (ecx < (lbr->base + lbr->count)) )
1620 return 1;
1622 return 0;
1625 static int vmx_msr_read_intercept(struct cpu_user_regs *regs)
1627 u64 msr_content = 0;
1628 u32 ecx = regs->ecx, eax, edx;
1629 struct vcpu *v = current;
1631 HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", ecx);
1633 switch ( ecx )
1635 case MSR_IA32_SYSENTER_CS:
1636 msr_content = (u32)__vmread(GUEST_SYSENTER_CS);
1637 break;
1638 case MSR_IA32_SYSENTER_ESP:
1639 msr_content = __vmread(GUEST_SYSENTER_ESP);
1640 break;
1641 case MSR_IA32_SYSENTER_EIP:
1642 msr_content = __vmread(GUEST_SYSENTER_EIP);
1643 break;
1644 case MSR_IA32_DEBUGCTLMSR:
1645 msr_content = __vmread(GUEST_IA32_DEBUGCTL);
1646 #ifdef __i386__
1647 msr_content |= (u64)__vmread(GUEST_IA32_DEBUGCTL_HIGH) << 32;
1648 #endif
1649 break;
1650 case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
1651 goto gp_fault;
1652 case MSR_IA32_MISC_ENABLE:
1653 rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
1654 /* Debug Trace Store is not supported. */
1655 msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
1656 MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
1657 break;
1658 default:
1659 if ( vpmu_do_rdmsr(regs) )
1660 goto done;
1661 switch ( long_mode_do_msr_read(regs) )
1663 case HNDL_unhandled:
1664 break;
1665 case HNDL_exception_raised:
1666 return X86EMUL_EXCEPTION;
1667 case HNDL_done:
1668 goto done;
1671 if ( vmx_read_guest_msr(ecx, &msr_content) == 0 )
1672 break;
1674 if ( is_last_branch_msr(ecx) )
1676 msr_content = 0;
1677 break;
1680 if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
1681 rdmsr_safe(ecx, eax, edx) == 0 )
1683 regs->eax = eax;
1684 regs->edx = edx;
1685 goto done;
1688 goto gp_fault;
1691 regs->eax = (uint32_t)msr_content;
1692 regs->edx = (uint32_t)(msr_content >> 32);
1694 done:
1695 HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
1696 HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
1697 ecx, (unsigned long)regs->eax,
1698 (unsigned long)regs->edx);
1699 return X86EMUL_OKAY;
1701 gp_fault:
1702 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
1703 return X86EMUL_EXCEPTION;
1706 static int vmx_alloc_vlapic_mapping(struct domain *d)
1708 void *apic_va;
1710 if ( !cpu_has_vmx_virtualize_apic_accesses )
1711 return 0;
1713 apic_va = alloc_xenheap_page();
1714 if ( apic_va == NULL )
1715 return -ENOMEM;
1716 share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable);
1717 set_mmio_p2m_entry(
1718 d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(virt_to_mfn(apic_va)));
1719 d->arch.hvm_domain.vmx.apic_access_mfn = virt_to_mfn(apic_va);
1721 return 0;
1724 static void vmx_free_vlapic_mapping(struct domain *d)
1726 unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
1727 if ( mfn != 0 )
1728 free_xenheap_page(mfn_to_virt(mfn));
1731 static int vmx_alloc_vpid(struct domain *d)
1733 int idx;
1735 if ( !cpu_has_vmx_vpid )
1736 return 0;
1738 do {
1739 idx = find_first_zero_bit(vpid_bitmap, VPID_BITMAP_SIZE);
1740 if ( idx >= VPID_BITMAP_SIZE )
1742 dprintk(XENLOG_WARNING, "VMX VPID space exhausted.\n");
1743 return -EBUSY;
1746 while ( test_and_set_bit(idx, vpid_bitmap) );
1748 d->arch.hvm_domain.vmx.vpid_base = idx * MAX_VIRT_CPUS;
1749 return 0;
1752 static void vmx_free_vpid(struct domain *d)
1754 if ( !cpu_has_vmx_vpid )
1755 return;
1757 clear_bit(d->arch.hvm_domain.vmx.vpid_base / MAX_VIRT_CPUS, vpid_bitmap);
1760 static void vmx_install_vlapic_mapping(struct vcpu *v)
1762 paddr_t virt_page_ma, apic_page_ma;
1764 if ( !cpu_has_vmx_virtualize_apic_accesses )
1765 return;
1767 virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page);
1768 apic_page_ma = v->domain->arch.hvm_domain.vmx.apic_access_mfn;
1769 apic_page_ma <<= PAGE_SHIFT;
1771 vmx_vmcs_enter(v);
1772 __vmwrite(VIRTUAL_APIC_PAGE_ADDR, virt_page_ma);
1773 __vmwrite(APIC_ACCESS_ADDR, apic_page_ma);
1774 vmx_vmcs_exit(v);
1777 void vmx_vlapic_msr_changed(struct vcpu *v)
1779 struct vlapic *vlapic = vcpu_vlapic(v);
1780 uint32_t ctl;
1782 if ( !cpu_has_vmx_virtualize_apic_accesses )
1783 return;
1785 vmx_vmcs_enter(v);
1786 ctl = __vmread(SECONDARY_VM_EXEC_CONTROL);
1787 ctl &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1788 if ( !vlapic_hw_disabled(vlapic) &&
1789 (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) )
1790 ctl |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1791 __vmwrite(SECONDARY_VM_EXEC_CONTROL, ctl);
1792 vmx_vmcs_exit(v);
1795 static int vmx_msr_write_intercept(struct cpu_user_regs *regs)
1797 u32 ecx = regs->ecx;
1798 u64 msr_content;
1799 struct vcpu *v = current;
1801 HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x",
1802 ecx, (u32)regs->eax, (u32)regs->edx);
1804 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
1806 HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
1808 switch ( ecx )
1810 case MSR_IA32_SYSENTER_CS:
1811 __vmwrite(GUEST_SYSENTER_CS, msr_content);
1812 break;
1813 case MSR_IA32_SYSENTER_ESP:
1814 __vmwrite(GUEST_SYSENTER_ESP, msr_content);
1815 break;
1816 case MSR_IA32_SYSENTER_EIP:
1817 __vmwrite(GUEST_SYSENTER_EIP, msr_content);
1818 break;
1819 case MSR_IA32_DEBUGCTLMSR: {
1820 int i, rc = 0;
1822 if ( !msr_content || (msr_content & ~3) )
1823 break;
1825 if ( msr_content & 1 )
1827 const struct lbr_info *lbr = last_branch_msr_get();
1828 if ( lbr == NULL )
1829 break;
1831 for ( ; (rc == 0) && lbr->count; lbr++ )
1832 for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
1833 if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )
1834 vmx_disable_intercept_for_msr(v, lbr->base + i);
1837 if ( (rc < 0) ||
1838 (vmx_add_host_load_msr(ecx) < 0) )
1839 vmx_inject_hw_exception(v, TRAP_machine_check, 0);
1840 else
1842 __vmwrite(GUEST_IA32_DEBUGCTL, msr_content);
1843 #ifdef __i386__
1844 __vmwrite(GUEST_IA32_DEBUGCTL_HIGH, msr_content >> 32);
1845 #endif
1848 break;
1850 case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
1851 goto gp_fault;
1852 default:
1853 if ( vpmu_do_wrmsr(regs) )
1854 return X86EMUL_OKAY;
1855 switch ( long_mode_do_msr_write(regs) )
1857 case HNDL_unhandled:
1858 if ( (vmx_write_guest_msr(ecx, msr_content) != 0) &&
1859 !is_last_branch_msr(ecx) )
1860 wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
1861 break;
1862 case HNDL_exception_raised:
1863 return X86EMUL_EXCEPTION;
1864 case HNDL_done:
1865 break;
1867 break;
1870 return X86EMUL_OKAY;
1872 gp_fault:
1873 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
1874 return X86EMUL_EXCEPTION;
1877 static void vmx_do_extint(struct cpu_user_regs *regs)
1879 unsigned int vector;
1881 asmlinkage void do_IRQ(struct cpu_user_regs *);
1882 fastcall void smp_apic_timer_interrupt(struct cpu_user_regs *);
1883 fastcall void smp_event_check_interrupt(void);
1884 fastcall void smp_invalidate_interrupt(void);
1885 fastcall void smp_call_function_interrupt(void);
1886 fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs);
1887 fastcall void smp_error_interrupt(struct cpu_user_regs *regs);
1888 fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs);
1889 #ifdef CONFIG_X86_MCE_P4THERMAL
1890 fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);
1891 #endif
1893 vector = __vmread(VM_EXIT_INTR_INFO);
1894 BUG_ON(!(vector & INTR_INFO_VALID_MASK));
1896 vector &= INTR_INFO_VECTOR_MASK;
1897 HVMTRACE_1D(INTR, vector);
1899 switch ( vector )
1901 case LOCAL_TIMER_VECTOR:
1902 smp_apic_timer_interrupt(regs);
1903 break;
1904 case EVENT_CHECK_VECTOR:
1905 smp_event_check_interrupt();
1906 break;
1907 case INVALIDATE_TLB_VECTOR:
1908 smp_invalidate_interrupt();
1909 break;
1910 case CALL_FUNCTION_VECTOR:
1911 smp_call_function_interrupt();
1912 break;
1913 case SPURIOUS_APIC_VECTOR:
1914 smp_spurious_interrupt(regs);
1915 break;
1916 case ERROR_APIC_VECTOR:
1917 smp_error_interrupt(regs);
1918 break;
1919 case PMU_APIC_VECTOR:
1920 smp_pmu_apic_interrupt(regs);
1921 break;
1922 #ifdef CONFIG_X86_MCE_P4THERMAL
1923 case THERMAL_APIC_VECTOR:
1924 smp_thermal_interrupt(regs);
1925 break;
1926 #endif
1927 default:
1928 regs->entry_vector = vector;
1929 do_IRQ(regs);
1930 break;
1934 static void wbinvd_ipi(void *info)
1936 wbinvd();
1939 static void vmx_wbinvd_intercept(void)
1941 if ( !has_arch_pdevs(current->domain) )
1942 return;
1944 if ( cpu_has_wbinvd_exiting )
1945 on_each_cpu(wbinvd_ipi, NULL, 1, 1);
1946 else
1947 wbinvd();
1950 static void ept_handle_violation(unsigned long qualification, paddr_t gpa)
1952 unsigned long gla_validity = qualification & EPT_GLA_VALIDITY_MASK;
1953 struct domain *d = current->domain;
1954 unsigned long gfn = gpa >> PAGE_SHIFT;
1955 mfn_t mfn;
1956 p2m_type_t t;
1958 if ( unlikely(qualification & EPT_GAW_VIOLATION) )
1960 gdprintk(XENLOG_ERR, "EPT violation: guest physical address %"PRIpaddr
1961 " exceeded its width limit.\n", gpa);
1962 goto crash;
1965 if ( unlikely(gla_validity == EPT_GLA_VALIDITY_RSVD) ||
1966 unlikely(gla_validity == EPT_GLA_VALIDITY_PDPTR_LOAD) )
1968 gdprintk(XENLOG_ERR, "EPT violation: reserved bit or "
1969 "pdptr load violation.\n");
1970 goto crash;
1973 mfn = gfn_to_mfn(d, gfn, &t);
1974 if ( (t != p2m_ram_ro) && p2m_is_ram(t) && paging_mode_log_dirty(d) )
1976 paging_mark_dirty(d, mfn_x(mfn));
1977 p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
1978 flush_tlb_mask(d->domain_dirty_cpumask);
1979 return;
1982 /* This can only happen in log-dirty mode, writing back A/D bits. */
1983 if ( unlikely(gla_validity == EPT_GLA_VALIDITY_GPT_WALK) )
1984 goto crash;
1986 ASSERT(gla_validity == EPT_GLA_VALIDITY_MATCH);
1987 handle_mmio();
1989 return;
1991 crash:
1992 domain_crash(d);
1995 static void vmx_failed_vmentry(unsigned int exit_reason,
1996 struct cpu_user_regs *regs)
1998 unsigned int failed_vmentry_reason = (uint16_t)exit_reason;
1999 unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);
2000 struct vcpu *curr = current;
2002 printk("Failed vm entry (exit reason 0x%x) ", exit_reason);
2003 switch ( failed_vmentry_reason )
2005 case EXIT_REASON_INVALID_GUEST_STATE:
2006 printk("caused by invalid guest state (%ld).\n", exit_qualification);
2007 break;
2008 case EXIT_REASON_MSR_LOADING:
2009 printk("caused by MSR entry %ld loading.\n", exit_qualification);
2010 break;
2011 case EXIT_REASON_MACHINE_CHECK:
2012 printk("caused by machine check.\n");
2013 HVMTRACE_0D(MCE);
2014 do_machine_check(regs);
2015 break;
2016 default:
2017 printk("reason not known yet!");
2018 break;
2021 printk("************* VMCS Area **************\n");
2022 vmcs_dump_vcpu(curr);
2023 printk("**************************************\n");
2025 domain_crash(curr->domain);
2028 asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
2030 unsigned int exit_reason, idtv_info;
2031 unsigned long exit_qualification, inst_len = 0;
2032 struct vcpu *v = current;
2034 if ( paging_mode_hap(v->domain) && hvm_paging_enabled(v) )
2035 v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] =
2036 __vmread(GUEST_CR3);
2038 exit_reason = __vmread(VM_EXIT_REASON);
2040 HVMTRACE_ND(VMEXIT64, 1/*cycles*/, 3, exit_reason,
2041 (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
2042 0, 0, 0);
2044 perfc_incra(vmexits, exit_reason);
2046 if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )
2047 local_irq_enable();
2049 if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
2050 return vmx_failed_vmentry(exit_reason, regs);
2052 hvm_maybe_deassert_evtchn_irq();
2054 /* Event delivery caused this intercept? Queue for redelivery. */
2055 idtv_info = __vmread(IDT_VECTORING_INFO);
2056 if ( unlikely(idtv_info & INTR_INFO_VALID_MASK) &&
2057 (exit_reason != EXIT_REASON_TASK_SWITCH) )
2059 if ( hvm_event_needs_reinjection((idtv_info>>8)&7, idtv_info&0xff) )
2061 /* See SDM 3B 25.7.1.1 and .2 for info about masking resvd bits. */
2062 __vmwrite(VM_ENTRY_INTR_INFO,
2063 idtv_info & ~INTR_INFO_RESVD_BITS_MASK);
2064 if ( idtv_info & INTR_INFO_DELIVER_CODE_MASK )
2065 __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
2066 __vmread(IDT_VECTORING_ERROR_CODE));
2069 /*
2070 * Clear NMI-blocking interruptibility info if an NMI delivery faulted.
2071 * Re-delivery will re-set it (see SDM 3B 25.7.1.2).
2072 */
2073 if ( (idtv_info & INTR_INFO_INTR_TYPE_MASK) == (X86_EVENTTYPE_NMI<<8) )
2074 __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
2075 __vmread(GUEST_INTERRUPTIBILITY_INFO) &
2076 ~VMX_INTR_SHADOW_NMI);
2079 switch ( exit_reason )
2081 case EXIT_REASON_EXCEPTION_NMI:
2083 /*
2084 * We don't set the software-interrupt exiting (INT n).
2085 * (1) We can get an exception (e.g. #PG) in the guest, or
2086 * (2) NMI
2087 */
2088 unsigned int intr_info, vector;
2090 intr_info = __vmread(VM_EXIT_INTR_INFO);
2091 BUG_ON(!(intr_info & INTR_INFO_VALID_MASK));
2093 vector = intr_info & INTR_INFO_VECTOR_MASK;
2095 /*
2096 * Re-set the NMI shadow if vmexit caused by a guest IRET fault (see 3B
2097 * 25.7.1.2, "Resuming Guest Software after Handling an Exception").
2098 * (NB. If we emulate this IRET for any reason, we should re-clear!)
2099 */
2100 if ( unlikely(intr_info & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&
2101 !(__vmread(IDT_VECTORING_INFO) & INTR_INFO_VALID_MASK) &&
2102 (vector != TRAP_double_fault) )
2103 __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
2104 __vmread(GUEST_INTERRUPTIBILITY_INFO)
2105 | VMX_INTR_SHADOW_NMI);
2107 perfc_incra(cause_vector, vector);
2109 switch ( vector )
2111 case TRAP_debug:
2112 /*
2113 * Updates DR6 where debugger can peek (See 3B 23.2.1,
2114 * Table 23-1, "Exit Qualification for Debug Exceptions").
2115 */
2116 exit_qualification = __vmread(EXIT_QUALIFICATION);
2117 write_debugreg(6, exit_qualification | 0xffff0ff0);
2118 if ( !v->domain->debugger_attached )
2119 goto exit_and_crash;
2120 domain_pause_for_debugger();
2121 break;
2122 case TRAP_int3:
2123 if ( !v->domain->debugger_attached )
2124 goto exit_and_crash;
2125 inst_len = __get_instruction_length(); /* Safe: INT3 */
2126 __update_guest_eip(inst_len);
2127 domain_pause_for_debugger();
2128 break;
2129 case TRAP_no_device:
2130 vmx_fpu_dirty_intercept();
2131 break;
2132 case TRAP_page_fault:
2133 exit_qualification = __vmread(EXIT_QUALIFICATION);
2134 regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
2136 HVM_DBG_LOG(DBG_LEVEL_VMMU,
2137 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
2138 (unsigned long)regs->eax, (unsigned long)regs->ebx,
2139 (unsigned long)regs->ecx, (unsigned long)regs->edx,
2140 (unsigned long)regs->esi, (unsigned long)regs->edi);
2142 if ( paging_fault(exit_qualification, regs) )
2144 if ( trace_will_trace_event(TRC_SHADOW) )
2145 break;
2146 if ( hvm_long_mode_enabled(v) )
2147 HVMTRACE_LONG_2D(PF_XEN, regs->error_code,
2148 TRC_PAR_LONG(exit_qualification) );
2149 else
2150 HVMTRACE_2D(PF_XEN,
2151 regs->error_code, exit_qualification );
2152 break;
2155 v->arch.hvm_vcpu.guest_cr[2] = exit_qualification;
2156 vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
2157 break;
2158 case TRAP_nmi:
2159 if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) !=
2160 (X86_EVENTTYPE_NMI << 8) )
2161 goto exit_and_crash;
2162 HVMTRACE_0D(NMI);
2163 do_nmi(regs); /* Real NMI, vector 2: normal processing. */
2164 break;
2165 case TRAP_machine_check:
2166 HVMTRACE_0D(MCE);
2167 do_machine_check(regs);
2168 break;
2169 default:
2170 goto exit_and_crash;
2172 break;
2174 case EXIT_REASON_EXTERNAL_INTERRUPT:
2175 vmx_do_extint(regs);
2176 break;
2177 case EXIT_REASON_TRIPLE_FAULT:
2178 hvm_triple_fault();
2179 break;
2180 case EXIT_REASON_PENDING_VIRT_INTR:
2181 /* Disable the interrupt window. */
2182 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2183 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
2184 v->arch.hvm_vmx.exec_control);
2185 break;
2186 case EXIT_REASON_PENDING_VIRT_NMI:
2187 /* Disable the NMI window. */
2188 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
2189 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
2190 v->arch.hvm_vmx.exec_control);
2191 break;
2192 case EXIT_REASON_TASK_SWITCH: {
2193 const enum hvm_task_switch_reason reasons[] = {
2194 TSW_call_or_int, TSW_iret, TSW_jmp, TSW_call_or_int };
2195 int32_t errcode = -1;
2196 exit_qualification = __vmread(EXIT_QUALIFICATION);
2197 if ( (idtv_info & INTR_INFO_VALID_MASK) &&
2198 (idtv_info & INTR_INFO_DELIVER_CODE_MASK) )
2199 errcode = __vmread(IDT_VECTORING_ERROR_CODE);
2200 hvm_task_switch((uint16_t)exit_qualification,
2201 reasons[(exit_qualification >> 30) & 3],
2202 errcode);
2203 break;
2205 case EXIT_REASON_CPUID:
2206 inst_len = __get_instruction_length(); /* Safe: CPUID */
2207 __update_guest_eip(inst_len);
2208 vmx_do_cpuid(regs);
2209 break;
2210 case EXIT_REASON_HLT:
2211 inst_len = __get_instruction_length(); /* Safe: HLT */
2212 __update_guest_eip(inst_len);
2213 hvm_hlt(regs->eflags);
2214 break;
2215 case EXIT_REASON_INVLPG:
2217 inst_len = __get_instruction_length(); /* Safe: INVLPG */
2218 __update_guest_eip(inst_len);
2219 exit_qualification = __vmread(EXIT_QUALIFICATION);
2220 vmx_invlpg_intercept(exit_qualification);
2221 break;
2223 case EXIT_REASON_RDTSC:
2224 inst_len = __get_instruction_length();
2225 __update_guest_eip(inst_len);
2226 hvm_rdtsc_intercept(regs);
2227 break;
2228 case EXIT_REASON_VMCALL:
2230 int rc;
2231 HVMTRACE_1D(VMMCALL, regs->eax);
2232 inst_len = __get_instruction_length(); /* Safe: VMCALL */
2233 rc = hvm_do_hypercall(regs);
2234 if ( rc != HVM_HCALL_preempted )
2236 __update_guest_eip(inst_len);
2237 if ( rc == HVM_HCALL_invalidate )
2238 send_invalidate_req();
2240 break;
2242 case EXIT_REASON_CR_ACCESS:
2244 exit_qualification = __vmread(EXIT_QUALIFICATION);
2245 inst_len = __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
2246 if ( vmx_cr_access(exit_qualification, regs) )
2247 __update_guest_eip(inst_len);
2248 break;
2250 case EXIT_REASON_DR_ACCESS:
2251 exit_qualification = __vmread(EXIT_QUALIFICATION);
2252 vmx_dr_access(exit_qualification, regs);
2253 break;
2254 case EXIT_REASON_MSR_READ:
2255 inst_len = __get_instruction_length(); /* Safe: RDMSR */
2256 if ( hvm_msr_read_intercept(regs) == X86EMUL_OKAY )
2257 __update_guest_eip(inst_len);
2258 break;
2259 case EXIT_REASON_MSR_WRITE:
2260 inst_len = __get_instruction_length(); /* Safe: WRMSR */
2261 if ( hvm_msr_write_intercept(regs) == X86EMUL_OKAY )
2262 __update_guest_eip(inst_len);
2263 break;
2265 case EXIT_REASON_MWAIT_INSTRUCTION:
2266 case EXIT_REASON_MONITOR_INSTRUCTION:
2267 case EXIT_REASON_VMCLEAR:
2268 case EXIT_REASON_VMLAUNCH:
2269 case EXIT_REASON_VMPTRLD:
2270 case EXIT_REASON_VMPTRST:
2271 case EXIT_REASON_VMREAD:
2272 case EXIT_REASON_VMRESUME:
2273 case EXIT_REASON_VMWRITE:
2274 case EXIT_REASON_VMXOFF:
2275 case EXIT_REASON_VMXON:
2276 vmx_inject_hw_exception(v, TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
2277 break;
2279 case EXIT_REASON_TPR_BELOW_THRESHOLD:
2280 break;
2282 case EXIT_REASON_IO_INSTRUCTION:
2283 case EXIT_REASON_APIC_ACCESS:
2284 if ( !handle_mmio() )
2285 hvm_inject_exception(TRAP_gp_fault, 0, 0);
2286 break;
2288 case EXIT_REASON_INVD:
2289 case EXIT_REASON_WBINVD:
2291 inst_len = __get_instruction_length(); /* Safe: INVD, WBINVD */
2292 __update_guest_eip(inst_len);
2293 vmx_wbinvd_intercept();
2294 break;
2297 case EXIT_REASON_EPT_VIOLATION:
2299 paddr_t gpa = __vmread(GUEST_PHYSICAL_ADDRESS);
2300 #ifdef __i386__
2301 gpa |= (paddr_t)__vmread(GUEST_PHYSICAL_ADDRESS_HIGH) << 32;
2302 #endif
2303 exit_qualification = __vmread(EXIT_QUALIFICATION);
2304 ept_handle_violation(exit_qualification, gpa);
2305 break;
2308 default:
2309 exit_and_crash:
2310 gdprintk(XENLOG_ERR, "Bad vmexit (reason %x)\n", exit_reason);
2311 domain_crash(v->domain);
2312 break;
2316 asmlinkage void vmx_trace_vmentry(void)
2318 HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
2321 /*
2322 * Local variables:
2323 * mode: C
2324 * c-set-style: "BSD"
2325 * c-basic-offset: 4
2326 * tab-width: 4
2327 * indent-tabs-mode: nil
2328 * End:
2329 */