ia64/xen-unstable

view xen/arch/x86/hvm/svm/svm.c @ 12609:62b0b520ea53

[HVM] Fix MSR access code.

- rdmsr/wrmsr always use ECX (not RCX) as register index.
- SVM still had the function names explicitly in the HVM_DBG_LOG() output
- the guest should (at the very minimum) see GP fault for MSRs
accesses to which even fault in Xen itself

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kfraser@localhost.localdomain
date Tue Nov 28 11:43:39 2006 +0000 (2006-11-28)
parents 93e657836d07
children 519a74928bd4
line source
1 /*
2 * svm.c: handling SVM architecture-related VM exits
3 * Copyright (c) 2004, Intel Corporation.
4 * Copyright (c) 2005, AMD Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/hypercall.h>
29 #include <xen/domain_page.h>
30 #include <asm/current.h>
31 #include <asm/io.h>
32 #include <asm/shadow.h>
33 #include <asm/regs.h>
34 #include <asm/cpufeature.h>
35 #include <asm/processor.h>
36 #include <asm/types.h>
37 #include <asm/msr.h>
38 #include <asm/spinlock.h>
39 #include <asm/hvm/hvm.h>
40 #include <asm/hvm/support.h>
41 #include <asm/hvm/io.h>
42 #include <asm/hvm/svm/svm.h>
43 #include <asm/hvm/svm/vmcb.h>
44 #include <asm/hvm/svm/emulate.h>
45 #include <asm/hvm/svm/vmmcall.h>
46 #include <asm/hvm/svm/intr.h>
47 #include <asm/x86_emulate.h>
48 #include <public/sched.h>
50 #define SVM_EXTRA_DEBUG
52 #define set_segment_register(name, value) \
53 __asm__ __volatile__ ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
55 /* External functions. We should move these to some suitable header file(s) */
57 extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
58 int inst_len);
59 extern asmlinkage void do_IRQ(struct cpu_user_regs *);
60 extern void svm_dump_inst(unsigned long eip);
61 extern int svm_dbg_on;
62 void svm_dump_regs(const char *from, struct cpu_user_regs *regs);
64 static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
65 struct cpu_user_regs *regs);
67 /* va of hardware host save area */
68 static void *hsa[NR_CPUS] __read_mostly;
70 /* vmcb used for extended host state */
71 static void *root_vmcb[NR_CPUS] __read_mostly;
73 /* physical address of above for host VMSAVE/VMLOAD */
74 u64 root_vmcb_pa[NR_CPUS] __read_mostly;
77 /* ASID API */
78 enum {
79 ASID_AVAILABLE = 0,
80 ASID_INUSE,
81 ASID_RETIRED
82 };
83 #define INITIAL_ASID 0
84 #define ASID_MAX 64
86 struct asid_pool {
87 spinlock_t asid_lock;
88 u32 asid[ASID_MAX];
89 };
91 static DEFINE_PER_CPU(struct asid_pool, asid_pool);
94 /*
95 * Initializes the POOL of ASID used by the guests per core.
96 */
97 void asidpool_init(int core)
98 {
99 int i;
101 spin_lock_init(&per_cpu(asid_pool,core).asid_lock);
103 /* Host ASID is always in use */
104 per_cpu(asid_pool,core).asid[INITIAL_ASID] = ASID_INUSE;
105 for ( i = 1; i < ASID_MAX; i++ )
106 per_cpu(asid_pool,core).asid[i] = ASID_AVAILABLE;
107 }
110 /* internal function to get the next available ASID */
111 static int asidpool_fetch_next(struct vmcb_struct *vmcb, int core)
112 {
113 int i;
114 for ( i = 1; i < ASID_MAX; i++ )
115 {
116 if ( per_cpu(asid_pool,core).asid[i] == ASID_AVAILABLE )
117 {
118 vmcb->guest_asid = i;
119 per_cpu(asid_pool,core).asid[i] = ASID_INUSE;
120 return i;
121 }
122 }
123 return -1;
124 }
127 /*
128 * This functions assigns on the passed VMCB, the next
129 * available ASID number. If none are available, the
130 * TLB flush flag is set, and all retireds ASID
131 * are made available.
132 *
133 * Returns: 1 -- sucess;
134 * 0 -- failure -- no more ASID numbers
135 * available.
136 */
137 int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
138 int oldcore, int newcore )
139 {
140 int i;
141 int res = 1;
142 static unsigned long cnt=0;
144 spin_lock(&per_cpu(asid_pool,oldcore).asid_lock);
145 if( retire_current && vmcb->guest_asid ) {
146 per_cpu(asid_pool,oldcore).asid[vmcb->guest_asid & (ASID_MAX-1)] =
147 ASID_RETIRED;
148 }
149 spin_unlock(&per_cpu(asid_pool,oldcore).asid_lock);
150 spin_lock(&per_cpu(asid_pool,newcore).asid_lock);
151 if( asidpool_fetch_next( vmcb, newcore ) < 0 ) {
152 if (svm_dbg_on)
153 printk( "SVM: tlb(%ld)\n", cnt++ );
154 /* FLUSH the TLB and all retired slots are made available */
155 vmcb->tlb_control = 1;
156 for( i = 1; i < ASID_MAX; i++ ) {
157 if( per_cpu(asid_pool,newcore).asid[i] == ASID_RETIRED ) {
158 per_cpu(asid_pool,newcore).asid[i] = ASID_AVAILABLE;
159 }
160 }
161 /* Get the First slot available */
162 res = asidpool_fetch_next( vmcb, newcore ) > 0;
163 }
164 spin_unlock(&per_cpu(asid_pool,newcore).asid_lock);
165 return res;
166 }
168 void asidpool_retire( struct vmcb_struct *vmcb, int core )
169 {
170 spin_lock(&per_cpu(asid_pool,core).asid_lock);
171 if( vmcb->guest_asid ) {
172 per_cpu(asid_pool,core).asid[vmcb->guest_asid & (ASID_MAX-1)] =
173 ASID_RETIRED;
174 }
175 spin_unlock(&per_cpu(asid_pool,core).asid_lock);
176 }
178 static inline void svm_inject_exception(struct vcpu *v, int trap,
179 int ev, int error_code)
180 {
181 eventinj_t event;
182 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
184 event.bytes = 0;
185 event.fields.v = 1;
186 event.fields.type = EVENTTYPE_EXCEPTION;
187 event.fields.vector = trap;
188 event.fields.ev = ev;
189 event.fields.errorcode = error_code;
191 ASSERT(vmcb->eventinj.fields.v == 0);
193 vmcb->eventinj = event;
194 v->arch.hvm_svm.inject_event=1;
195 }
197 static void stop_svm(void)
198 {
199 u32 eax, edx;
200 int cpu = smp_processor_id();
202 /* We turn off the EFER_SVME bit. */
203 rdmsr(MSR_EFER, eax, edx);
204 eax &= ~EFER_SVME;
205 wrmsr(MSR_EFER, eax, edx);
207 /* release the HSA */
208 free_host_save_area(hsa[cpu]);
209 hsa[cpu] = NULL;
210 wrmsr(MSR_K8_VM_HSAVE_PA, 0, 0 );
212 /* free up the root vmcb */
213 free_vmcb(root_vmcb[cpu]);
214 root_vmcb[cpu] = NULL;
215 root_vmcb_pa[cpu] = 0;
216 }
218 static void svm_store_cpu_guest_regs(
219 struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
220 {
221 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
223 if ( regs != NULL )
224 {
225 regs->eip = vmcb->rip;
226 regs->esp = vmcb->rsp;
227 regs->eflags = vmcb->rflags;
228 regs->cs = vmcb->cs.sel;
229 regs->ds = vmcb->ds.sel;
230 regs->es = vmcb->es.sel;
231 regs->ss = vmcb->ss.sel;
232 regs->gs = vmcb->gs.sel;
233 regs->fs = vmcb->fs.sel;
234 }
236 if ( crs != NULL )
237 {
238 /* Returning the guest's regs */
239 crs[0] = v->arch.hvm_svm.cpu_shadow_cr0;
240 crs[2] = v->arch.hvm_svm.cpu_cr2;
241 crs[3] = v->arch.hvm_svm.cpu_cr3;
242 crs[4] = v->arch.hvm_svm.cpu_shadow_cr4;
243 }
244 }
246 static int svm_paging_enabled(struct vcpu *v)
247 {
248 unsigned long cr0;
250 cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
252 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
253 }
255 static int svm_pae_enabled(struct vcpu *v)
256 {
257 unsigned long cr4;
259 if(!svm_paging_enabled(v))
260 return 0;
262 cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
264 return (cr4 & X86_CR4_PAE);
265 }
267 static int svm_long_mode_enabled(struct vcpu *v)
268 {
269 return test_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
270 }
272 #define IS_CANO_ADDRESS(add) 1
274 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
275 {
276 u64 msr_content = 0;
277 struct vcpu *vc = current;
278 struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
280 switch ((u32)regs->ecx)
281 {
282 case MSR_EFER:
283 msr_content = vmcb->efer;
284 msr_content &= ~EFER_SVME;
285 break;
287 case MSR_FS_BASE:
288 msr_content = vmcb->fs.base;
289 break;
291 case MSR_GS_BASE:
292 msr_content = vmcb->gs.base;
293 break;
295 case MSR_SHADOW_GS_BASE:
296 msr_content = vmcb->kerngsbase;
297 break;
299 case MSR_STAR:
300 msr_content = vmcb->star;
301 break;
303 case MSR_LSTAR:
304 msr_content = vmcb->lstar;
305 break;
307 case MSR_CSTAR:
308 msr_content = vmcb->cstar;
309 break;
311 case MSR_SYSCALL_MASK:
312 msr_content = vmcb->sfmask;
313 break;
314 default:
315 return 0;
316 }
318 HVM_DBG_LOG(DBG_LEVEL_2, "msr_content: %"PRIx64"\n",
319 msr_content);
321 regs->eax = (u32)(msr_content >> 0);
322 regs->edx = (u32)(msr_content >> 32);
323 return 1;
324 }
326 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
327 {
328 u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
329 struct vcpu *v = current;
330 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
332 HVM_DBG_LOG(DBG_LEVEL_1, "msr %x msr_content %"PRIx64"\n",
333 (u32)regs->ecx, msr_content);
335 switch ( (u32)regs->ecx )
336 {
337 case MSR_EFER:
338 #ifdef __x86_64__
339 /* offending reserved bit will cause #GP */
340 if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
341 {
342 printk("Trying to set reserved bit in EFER: %"PRIx64"\n",
343 msr_content);
344 svm_inject_exception(v, TRAP_gp_fault, 1, 0);
345 return 0;
346 }
348 /* LME: 0 -> 1 */
349 if ( msr_content & EFER_LME &&
350 !test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state))
351 {
352 if ( svm_paging_enabled(v) ||
353 !test_bit(SVM_CPU_STATE_PAE_ENABLED,
354 &v->arch.hvm_svm.cpu_state) )
355 {
356 printk("Trying to set LME bit when "
357 "in paging mode or PAE bit is not set\n");
358 svm_inject_exception(v, TRAP_gp_fault, 1, 0);
359 return 0;
360 }
361 set_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state);
362 }
364 /* We have already recorded that we want LME, so it will be set
365 * next time CR0 gets updated. So we clear that bit and continue.
366 */
367 if ((msr_content ^ vmcb->efer) & EFER_LME)
368 msr_content &= ~EFER_LME;
369 /* No update for LME/LMA since it have no effect */
370 #endif
371 vmcb->efer = msr_content | EFER_SVME;
372 break;
374 case MSR_FS_BASE:
375 case MSR_GS_BASE:
376 if ( !svm_long_mode_enabled(v) )
377 goto exit_and_crash;
379 if (!IS_CANO_ADDRESS(msr_content))
380 {
381 HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
382 svm_inject_exception(v, TRAP_gp_fault, 1, 0);
383 }
385 if (regs->ecx == MSR_FS_BASE)
386 vmcb->fs.base = msr_content;
387 else
388 vmcb->gs.base = msr_content;
389 break;
391 case MSR_SHADOW_GS_BASE:
392 vmcb->kerngsbase = msr_content;
393 break;
395 case MSR_STAR:
396 vmcb->star = msr_content;
397 break;
399 case MSR_LSTAR:
400 vmcb->lstar = msr_content;
401 break;
403 case MSR_CSTAR:
404 vmcb->cstar = msr_content;
405 break;
407 case MSR_SYSCALL_MASK:
408 vmcb->sfmask = msr_content;
409 break;
411 default:
412 return 0;
413 }
415 return 1;
417 exit_and_crash:
418 gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
419 domain_crash(v->domain);
420 return 1; /* handled */
421 }
424 #define loaddebug(_v,_reg) \
425 __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
426 #define savedebug(_v,_reg) \
427 __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
429 static inline void svm_save_dr(struct vcpu *v)
430 {
431 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
433 if ( !v->arch.hvm_vcpu.flag_dr_dirty )
434 return;
436 /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
437 v->arch.hvm_vcpu.flag_dr_dirty = 0;
438 v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
440 savedebug(&v->arch.guest_context, 0);
441 savedebug(&v->arch.guest_context, 1);
442 savedebug(&v->arch.guest_context, 2);
443 savedebug(&v->arch.guest_context, 3);
444 v->arch.guest_context.debugreg[6] = vmcb->dr6;
445 v->arch.guest_context.debugreg[7] = vmcb->dr7;
446 }
449 static inline void __restore_debug_registers(struct vcpu *v)
450 {
451 loaddebug(&v->arch.guest_context, 0);
452 loaddebug(&v->arch.guest_context, 1);
453 loaddebug(&v->arch.guest_context, 2);
454 loaddebug(&v->arch.guest_context, 3);
455 /* DR6 and DR7 are loaded from the VMCB. */
456 }
459 static inline void svm_restore_dr(struct vcpu *v)
460 {
461 if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) )
462 __restore_debug_registers(v);
463 }
466 static int svm_realmode(struct vcpu *v)
467 {
468 unsigned long cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
469 unsigned long eflags = v->arch.hvm_svm.vmcb->rflags;
471 return (eflags & X86_EFLAGS_VM) || !(cr0 & X86_CR0_PE);
472 }
474 static int svm_guest_x86_mode(struct vcpu *v)
475 {
476 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
478 if ( vmcb->efer & EFER_LMA )
479 return (vmcb->cs.attributes.fields.l ?
480 X86EMUL_MODE_PROT64 : X86EMUL_MODE_PROT32);
482 if ( svm_realmode(v) )
483 return X86EMUL_MODE_REAL;
485 return (vmcb->cs.attributes.fields.db ?
486 X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16);
487 }
489 void svm_update_host_cr3(struct vcpu *v)
490 {
491 /* SVM doesn't have a HOST_CR3 equivalent to update. */
492 }
494 unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
495 {
496 switch ( num )
497 {
498 case 0:
499 return v->arch.hvm_svm.cpu_shadow_cr0;
500 case 2:
501 return v->arch.hvm_svm.cpu_cr2;
502 case 3:
503 return v->arch.hvm_svm.cpu_cr3;
504 case 4:
505 return v->arch.hvm_svm.cpu_shadow_cr4;
506 default:
507 BUG();
508 }
509 return 0; /* dummy */
510 }
512 static unsigned long svm_get_segment_base(struct vcpu *v, enum segment seg)
513 {
514 switch ( seg )
515 {
516 case seg_cs: return v->arch.hvm_svm.vmcb->cs.base;
517 case seg_ds: return v->arch.hvm_svm.vmcb->ds.base;
518 case seg_es: return v->arch.hvm_svm.vmcb->es.base;
519 case seg_fs: return v->arch.hvm_svm.vmcb->fs.base;
520 case seg_gs: return v->arch.hvm_svm.vmcb->gs.base;
521 case seg_ss: return v->arch.hvm_svm.vmcb->ss.base;
522 case seg_tr: return v->arch.hvm_svm.vmcb->tr.base;
523 case seg_gdtr: return v->arch.hvm_svm.vmcb->gdtr.base;
524 case seg_idtr: return v->arch.hvm_svm.vmcb->idtr.base;
525 case seg_ldtr: return v->arch.hvm_svm.vmcb->ldtr.base;
526 }
527 BUG();
528 return 0;
529 }
531 /* Make sure that xen intercepts any FP accesses from current */
532 static void svm_stts(struct vcpu *v)
533 {
534 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
536 /*
537 * If the guest does not have TS enabled then we must cause and handle an
538 * exception on first use of the FPU. If the guest *does* have TS enabled
539 * then this is not necessary: no FPU activity can occur until the guest
540 * clears CR0.TS, and we will initialise the FPU when that happens.
541 */
542 if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
543 {
544 v->arch.hvm_svm.vmcb->exception_intercepts |= EXCEPTION_BITMAP_NM;
545 vmcb->cr0 |= X86_CR0_TS;
546 }
547 }
550 static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
551 {
552 v->arch.hvm_svm.vmcb->tsc_offset = offset;
553 }
556 static void svm_init_ap_context(
557 struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector)
558 {
559 memset(ctxt, 0, sizeof(*ctxt));
561 /*
562 * We execute the trampoline code in real mode. The trampoline vector
563 * passed to us is page alligned and is the physicall frame number for
564 * the code. We will execute this code in real mode.
565 */
566 ctxt->user_regs.eip = 0x0;
567 ctxt->user_regs.cs = (trampoline_vector << 8);
568 }
570 static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
571 {
572 char *p;
573 int i;
575 memset(hypercall_page, 0, PAGE_SIZE);
577 for ( i = 0; i < (PAGE_SIZE / 32); i++ )
578 {
579 p = (char *)(hypercall_page + (i * 32));
580 *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */
581 *(u32 *)(p + 1) = i;
582 *(u8 *)(p + 5) = 0x0f; /* vmmcall */
583 *(u8 *)(p + 6) = 0x01;
584 *(u8 *)(p + 7) = 0xd9;
585 *(u8 *)(p + 8) = 0xc3; /* ret */
586 }
588 /* Don't support HYPERVISOR_iret at the moment */
589 *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
590 }
593 int svm_dbg_on = 0;
595 static inline int svm_do_debugout(unsigned long exit_code)
596 {
597 int i;
599 static unsigned long counter = 0;
600 static unsigned long works[] =
601 {
602 VMEXIT_IOIO,
603 VMEXIT_HLT,
604 VMEXIT_CPUID,
605 VMEXIT_DR0_READ,
606 VMEXIT_DR1_READ,
607 VMEXIT_DR2_READ,
608 VMEXIT_DR3_READ,
609 VMEXIT_DR6_READ,
610 VMEXIT_DR7_READ,
611 VMEXIT_DR0_WRITE,
612 VMEXIT_DR1_WRITE,
613 VMEXIT_DR2_WRITE,
614 VMEXIT_DR3_WRITE,
615 VMEXIT_CR0_READ,
616 VMEXIT_CR0_WRITE,
617 VMEXIT_CR3_READ,
618 VMEXIT_CR4_READ,
619 VMEXIT_MSR,
620 VMEXIT_CR0_WRITE,
621 VMEXIT_CR3_WRITE,
622 VMEXIT_CR4_WRITE,
623 VMEXIT_EXCEPTION_PF,
624 VMEXIT_INTR,
625 VMEXIT_INVLPG,
626 VMEXIT_EXCEPTION_NM
627 };
630 #if 0
631 if (svm_dbg_on && exit_code != 0x7B)
632 return 1;
633 #endif
635 counter++;
637 #if 0
638 if ((exit_code == 0x4E
639 || exit_code == VMEXIT_CR0_READ
640 || exit_code == VMEXIT_CR0_WRITE)
641 && counter < 200000)
642 return 0;
644 if ((exit_code == 0x4E) && counter < 500000)
645 return 0;
646 #endif
648 for (i = 0; i < sizeof(works) / sizeof(works[0]); i++)
649 if (exit_code == works[i])
650 return 0;
652 return 1;
653 }
655 static void save_svm_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *ctxt)
656 {
657 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
659 ASSERT(vmcb);
661 ctxt->eax = vmcb->rax;
662 ctxt->ss = vmcb->ss.sel;
663 ctxt->esp = vmcb->rsp;
664 ctxt->eflags = vmcb->rflags;
665 ctxt->cs = vmcb->cs.sel;
666 ctxt->eip = vmcb->rip;
668 ctxt->gs = vmcb->gs.sel;
669 ctxt->fs = vmcb->fs.sel;
670 ctxt->es = vmcb->es.sel;
671 ctxt->ds = vmcb->ds.sel;
672 }
674 static void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
675 {
676 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
678 regs->eip = vmcb->rip;
679 regs->esp = vmcb->rsp;
680 regs->eflags = vmcb->rflags;
681 regs->cs = vmcb->cs.sel;
682 regs->ds = vmcb->ds.sel;
683 regs->es = vmcb->es.sel;
684 regs->ss = vmcb->ss.sel;
685 }
687 /* XXX Use svm_load_cpu_guest_regs instead */
688 static void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
689 {
690 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
691 u32 *intercepts = &v->arch.hvm_svm.vmcb->exception_intercepts;
693 /* Write the guest register value into VMCB */
694 vmcb->rax = regs->eax;
695 vmcb->ss.sel = regs->ss;
696 vmcb->rsp = regs->esp;
697 vmcb->rflags = regs->eflags | 2UL;
698 vmcb->cs.sel = regs->cs;
699 vmcb->rip = regs->eip;
700 if (regs->eflags & EF_TF)
701 *intercepts |= EXCEPTION_BITMAP_DB;
702 else
703 *intercepts &= ~EXCEPTION_BITMAP_DB;
704 }
706 static void svm_load_cpu_guest_regs(
707 struct vcpu *v, struct cpu_user_regs *regs)
708 {
709 svm_load_cpu_user_regs(v, regs);
710 }
712 static void arch_svm_do_launch(struct vcpu *v)
713 {
714 svm_do_launch(v);
716 if ( v->vcpu_id != 0 )
717 {
718 cpu_user_regs_t *regs = &current->arch.guest_context.user_regs;
719 u16 cs_sel = regs->cs;
720 /*
721 * This is the launch of an AP; set state so that we begin executing
722 * the trampoline code in real-mode.
723 */
724 svm_do_vmmcall_reset_to_realmode(v, regs);
725 /* Adjust the state to execute the trampoline code.*/
726 v->arch.hvm_svm.vmcb->rip = 0;
727 v->arch.hvm_svm.vmcb->cs.sel= cs_sel;
728 v->arch.hvm_svm.vmcb->cs.base = (cs_sel << 4);
729 }
731 reset_stack_and_jump(svm_asm_do_launch);
732 }
734 static void svm_ctxt_switch_from(struct vcpu *v)
735 {
736 hvm_freeze_time(v);
737 svm_save_dr(v);
738 }
740 static void svm_ctxt_switch_to(struct vcpu *v)
741 {
742 #ifdef __x86_64__
743 /*
744 * This is required, because VMRUN does consistency check
745 * and some of the DOM0 selectors are pointing to
746 * invalid GDT locations, and cause AMD processors
747 * to shutdown.
748 */
749 set_segment_register(ds, 0);
750 set_segment_register(es, 0);
751 set_segment_register(ss, 0);
752 #endif
753 svm_restore_dr(v);
754 }
756 static int svm_vcpu_initialise(struct vcpu *v)
757 {
758 int rc;
760 v->arch.schedule_tail = arch_svm_do_launch;
761 v->arch.ctxt_switch_from = svm_ctxt_switch_from;
762 v->arch.ctxt_switch_to = svm_ctxt_switch_to;
764 v->arch.hvm_svm.saved_irq_vector = -1;
766 if ( (rc = svm_create_vmcb(v)) != 0 )
767 {
768 dprintk(XENLOG_WARNING,
769 "Failed to create VMCB for vcpu %d: err=%d.\n",
770 v->vcpu_id, rc);
771 return rc;
772 }
774 return 0;
775 }
777 static void svm_vcpu_destroy(struct vcpu *v)
778 {
779 svm_destroy_vmcb(v);
780 }
782 int start_svm(void)
783 {
784 u32 eax, ecx, edx;
785 u32 phys_hsa_lo, phys_hsa_hi;
786 u64 phys_hsa;
787 int cpu = smp_processor_id();
789 /* Xen does not fill x86_capability words except 0. */
790 ecx = cpuid_ecx(0x80000001);
791 boot_cpu_data.x86_capability[5] = ecx;
793 if (!(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)))
794 return 0;
796 /* check whether SVM feature is disabled in BIOS */
797 rdmsr(MSR_K8_VM_CR, eax, edx);
798 if ( eax & K8_VMCR_SVME_DISABLE )
799 {
800 printk("AMD SVM Extension is disabled in BIOS.\n");
801 return 0;
802 }
804 if (!(hsa[cpu] = alloc_host_save_area()))
805 return 0;
807 rdmsr(MSR_EFER, eax, edx);
808 eax |= EFER_SVME;
809 wrmsr(MSR_EFER, eax, edx);
810 asidpool_init( cpu );
811 printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
813 /* Initialize the HSA for this core */
814 phys_hsa = (u64) virt_to_maddr(hsa[cpu]);
815 phys_hsa_lo = (u32) phys_hsa;
816 phys_hsa_hi = (u32) (phys_hsa >> 32);
817 wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
819 if (!(root_vmcb[cpu] = alloc_vmcb()))
820 return 0;
821 root_vmcb_pa[cpu] = virt_to_maddr(root_vmcb[cpu]);
823 if (cpu == 0)
824 setup_vmcb_dump();
826 /* Setup HVM interfaces */
827 hvm_funcs.disable = stop_svm;
829 hvm_funcs.vcpu_initialise = svm_vcpu_initialise;
830 hvm_funcs.vcpu_destroy = svm_vcpu_destroy;
832 hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
833 hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
835 hvm_funcs.realmode = svm_realmode;
836 hvm_funcs.paging_enabled = svm_paging_enabled;
837 hvm_funcs.long_mode_enabled = svm_long_mode_enabled;
838 hvm_funcs.pae_enabled = svm_pae_enabled;
839 hvm_funcs.guest_x86_mode = svm_guest_x86_mode;
840 hvm_funcs.get_guest_ctrl_reg = svm_get_ctrl_reg;
841 hvm_funcs.get_segment_base = svm_get_segment_base;
843 hvm_funcs.update_host_cr3 = svm_update_host_cr3;
845 hvm_funcs.stts = svm_stts;
846 hvm_funcs.set_tsc_offset = svm_set_tsc_offset;
848 hvm_funcs.init_ap_context = svm_init_ap_context;
849 hvm_funcs.init_hypercall_page = svm_init_hypercall_page;
851 hvm_enabled = 1;
853 return 1;
854 }
856 void arch_svm_do_resume(struct vcpu *v)
857 {
858 /* pinning VCPU to a different core? */
859 if ( v->arch.hvm_svm.launch_core == smp_processor_id()) {
860 hvm_do_resume( v );
861 reset_stack_and_jump( svm_asm_do_resume );
862 }
863 else {
864 if (svm_dbg_on)
865 printk("VCPU core pinned: %d to %d\n",
866 v->arch.hvm_svm.launch_core, smp_processor_id() );
867 v->arch.hvm_svm.launch_core = smp_processor_id();
868 hvm_migrate_timers( v );
869 hvm_do_resume( v );
870 reset_stack_and_jump( svm_asm_do_resume );
871 }
872 }
874 static int svm_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
875 {
876 struct vcpu *v = current;
877 unsigned long eip;
878 int result;
879 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
881 ASSERT(vmcb);
883 //#if HVM_DEBUG
884 eip = vmcb->rip;
885 HVM_DBG_LOG(DBG_LEVEL_VMMU,
886 "svm_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
887 va, eip, (unsigned long)regs->error_code);
888 //#endif
890 result = shadow_fault(va, regs);
892 if( result ) {
893 /* Let's make sure that the Guest TLB is flushed */
894 set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
895 }
897 return result;
898 }
901 static void svm_do_no_device_fault(struct vmcb_struct *vmcb)
902 {
903 struct vcpu *v = current;
905 setup_fpu(v);
906 vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
908 if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
909 vmcb->cr0 &= ~X86_CR0_TS;
910 }
913 static void svm_do_general_protection_fault(struct vcpu *v,
914 struct cpu_user_regs *regs)
915 {
916 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
917 unsigned long eip, error_code;
919 ASSERT(vmcb);
921 eip = vmcb->rip;
922 error_code = vmcb->exitinfo1;
924 if (vmcb->idtr.limit == 0) {
925 printk("Huh? We got a GP Fault with an invalid IDTR!\n");
926 svm_dump_vmcb(__func__, vmcb);
927 svm_dump_regs(__func__, regs);
928 svm_dump_inst(vmcb->rip);
929 domain_crash(v->domain);
930 return;
931 }
933 HVM_DBG_LOG(DBG_LEVEL_1,
934 "svm_general_protection_fault: eip = %lx, erro_code = %lx",
935 eip, error_code);
937 HVM_DBG_LOG(DBG_LEVEL_1,
938 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
939 (unsigned long)regs->eax, (unsigned long)regs->ebx,
940 (unsigned long)regs->ecx, (unsigned long)regs->edx,
941 (unsigned long)regs->esi, (unsigned long)regs->edi);
943 /* Reflect it back into the guest */
944 svm_inject_exception(v, TRAP_gp_fault, 1, error_code);
945 }
947 /* Reserved bits ECX: [31:14], [12:4], [2:1]*/
948 #define SVM_VCPU_CPUID_L1_ECX_RESERVED 0xffffdff6
949 /* Reserved bits EDX: [31:29], [27], [22:20], [18], [10] */
950 #define SVM_VCPU_CPUID_L1_EDX_RESERVED 0xe8740400
952 static void svm_vmexit_do_cpuid(struct vmcb_struct *vmcb, unsigned long input,
953 struct cpu_user_regs *regs)
954 {
955 unsigned int eax, ebx, ecx, edx;
956 unsigned long eip;
957 struct vcpu *v = current;
958 int inst_len;
960 ASSERT(vmcb);
962 eip = vmcb->rip;
964 HVM_DBG_LOG(DBG_LEVEL_1,
965 "do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
966 " (esi) %lx, (edi) %lx",
967 (unsigned long)regs->eax, (unsigned long)regs->ebx,
968 (unsigned long)regs->ecx, (unsigned long)regs->edx,
969 (unsigned long)regs->esi, (unsigned long)regs->edi);
971 if ( !cpuid_hypervisor_leaves(input, &eax, &ebx, &ecx, &edx) )
972 {
973 cpuid(input, &eax, &ebx, &ecx, &edx);
974 if (input == 0x00000001 || input == 0x80000001 )
975 {
976 if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
977 {
978 /* Since the apic is disabled, avoid any confusion
979 about SMP cpus being available */
980 clear_bit(X86_FEATURE_APIC, &edx);
981 }
982 #if CONFIG_PAGING_LEVELS >= 3
983 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
984 #endif
985 {
986 clear_bit(X86_FEATURE_PAE, &edx);
987 if (input == 0x80000001 )
988 clear_bit(X86_FEATURE_NX & 31, &edx);
989 }
990 clear_bit(X86_FEATURE_PSE36, &edx);
991 if (input == 0x00000001 )
992 {
993 /* Clear out reserved bits. */
994 ecx &= ~SVM_VCPU_CPUID_L1_ECX_RESERVED;
995 edx &= ~SVM_VCPU_CPUID_L1_EDX_RESERVED;
997 clear_bit(X86_FEATURE_MWAIT & 31, &ecx);
999 /* Guest should only see one logical processor.
1000 * See details on page 23 of AMD CPUID Specification.
1001 */
1002 clear_bit(X86_FEATURE_HT, &edx); /* clear the hyperthread bit */
1003 ebx &= 0xFF00FFFF; /* clear the logical processor count when HTT=0 */
1004 ebx |= 0x00010000; /* set to 1 just for precaution */
1006 else
1008 /* Clear the Cmp_Legacy bit
1009 * This bit is supposed to be zero when HTT = 0.
1010 * See details on page 23 of AMD CPUID Specification.
1011 */
1012 clear_bit(X86_FEATURE_CMP_LEGACY & 31, &ecx);
1013 /* Make SVM feature invisible to the guest. */
1014 clear_bit(X86_FEATURE_SVME & 31, &ecx);
1015 #ifdef __i386__
1016 /* Mask feature for Intel ia32e or AMD long mode. */
1017 clear_bit(X86_FEATURE_LAHF_LM & 31, &ecx);
1019 clear_bit(X86_FEATURE_LM & 31, &edx);
1020 clear_bit(X86_FEATURE_SYSCALL & 31, &edx);
1021 #endif
1022 /* So far, we do not support 3DNow for the guest. */
1023 clear_bit(X86_FEATURE_3DNOW & 31, &edx);
1024 clear_bit(X86_FEATURE_3DNOWEXT & 31, &edx);
1027 else if ( ( input == 0x80000007 ) || ( input == 0x8000000A ) )
1029 /* Mask out features of power management and SVM extension. */
1030 eax = ebx = ecx = edx = 0;
1032 else if ( input == 0x80000008 )
1034 /* Make sure Number of CPU core is 1 when HTT=0 */
1035 ecx &= 0xFFFFFF00;
1039 regs->eax = (unsigned long)eax;
1040 regs->ebx = (unsigned long)ebx;
1041 regs->ecx = (unsigned long)ecx;
1042 regs->edx = (unsigned long)edx;
1044 HVM_DBG_LOG(DBG_LEVEL_1,
1045 "svm_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, "
1046 "ebx=%x, ecx=%x, edx=%x",
1047 eip, input, eax, ebx, ecx, edx);
1049 inst_len = __get_instruction_length(vmcb, INSTR_CPUID, NULL);
1050 ASSERT(inst_len > 0);
1051 __update_guest_eip(vmcb, inst_len);
1055 static inline unsigned long *get_reg_p(unsigned int gpreg,
1056 struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
1058 unsigned long *reg_p = NULL;
1059 switch (gpreg)
1061 case SVM_REG_EAX:
1062 reg_p = (unsigned long *)&regs->eax;
1063 break;
1064 case SVM_REG_EBX:
1065 reg_p = (unsigned long *)&regs->ebx;
1066 break;
1067 case SVM_REG_ECX:
1068 reg_p = (unsigned long *)&regs->ecx;
1069 break;
1070 case SVM_REG_EDX:
1071 reg_p = (unsigned long *)&regs->edx;
1072 break;
1073 case SVM_REG_EDI:
1074 reg_p = (unsigned long *)&regs->edi;
1075 break;
1076 case SVM_REG_ESI:
1077 reg_p = (unsigned long *)&regs->esi;
1078 break;
1079 case SVM_REG_EBP:
1080 reg_p = (unsigned long *)&regs->ebp;
1081 break;
1082 case SVM_REG_ESP:
1083 reg_p = (unsigned long *)&vmcb->rsp;
1084 break;
1085 #ifdef __x86_64__
1086 case SVM_REG_R8:
1087 reg_p = (unsigned long *)&regs->r8;
1088 break;
1089 case SVM_REG_R9:
1090 reg_p = (unsigned long *)&regs->r9;
1091 break;
1092 case SVM_REG_R10:
1093 reg_p = (unsigned long *)&regs->r10;
1094 break;
1095 case SVM_REG_R11:
1096 reg_p = (unsigned long *)&regs->r11;
1097 break;
1098 case SVM_REG_R12:
1099 reg_p = (unsigned long *)&regs->r12;
1100 break;
1101 case SVM_REG_R13:
1102 reg_p = (unsigned long *)&regs->r13;
1103 break;
1104 case SVM_REG_R14:
1105 reg_p = (unsigned long *)&regs->r14;
1106 break;
1107 case SVM_REG_R15:
1108 reg_p = (unsigned long *)&regs->r15;
1109 break;
1110 #endif
1111 default:
1112 BUG();
1115 return reg_p;
1119 static inline unsigned long get_reg(unsigned int gpreg,
1120 struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
1122 unsigned long *gp;
1123 gp = get_reg_p(gpreg, regs, vmcb);
1124 return *gp;
1128 static inline void set_reg(unsigned int gpreg, unsigned long value,
1129 struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
1131 unsigned long *gp;
1132 gp = get_reg_p(gpreg, regs, vmcb);
1133 *gp = value;
1137 static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs)
1139 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
1141 v->arch.hvm_vcpu.flag_dr_dirty = 1;
1143 __restore_debug_registers(v);
1145 /* allow the guest full access to the debug registers */
1146 vmcb->dr_intercepts = 0;
1150 static void svm_get_prefix_info(
1151 struct vmcb_struct *vmcb,
1152 unsigned int dir, segment_selector_t **seg, unsigned int *asize)
1154 unsigned char inst[MAX_INST_LEN];
1155 int i;
1157 memset(inst, 0, MAX_INST_LEN);
1158 if (inst_copy_from_guest(inst, svm_rip2pointer(vmcb), sizeof(inst))
1159 != MAX_INST_LEN)
1161 gdprintk(XENLOG_ERR, "get guest instruction failed\n");
1162 domain_crash(current->domain);
1163 return;
1166 for (i = 0; i < MAX_INST_LEN; i++)
1168 switch (inst[i])
1170 case 0xf3: /* REPZ */
1171 case 0xf2: /* REPNZ */
1172 case 0xf0: /* LOCK */
1173 case 0x66: /* data32 */
1174 #ifdef __x86_64__
1175 /* REX prefixes */
1176 case 0x40:
1177 case 0x41:
1178 case 0x42:
1179 case 0x43:
1180 case 0x44:
1181 case 0x45:
1182 case 0x46:
1183 case 0x47:
1185 case 0x48:
1186 case 0x49:
1187 case 0x4a:
1188 case 0x4b:
1189 case 0x4c:
1190 case 0x4d:
1191 case 0x4e:
1192 case 0x4f:
1193 #endif
1194 continue;
1195 case 0x67: /* addr32 */
1196 *asize ^= 48; /* Switch 16/32 bits */
1197 continue;
1198 case 0x2e: /* CS */
1199 *seg = &vmcb->cs;
1200 continue;
1201 case 0x36: /* SS */
1202 *seg = &vmcb->ss;
1203 continue;
1204 case 0x26: /* ES */
1205 *seg = &vmcb->es;
1206 continue;
1207 case 0x64: /* FS */
1208 *seg = &vmcb->fs;
1209 continue;
1210 case 0x65: /* GS */
1211 *seg = &vmcb->gs;
1212 continue;
1213 case 0x3e: /* DS */
1214 *seg = &vmcb->ds;
1215 continue;
1216 default:
1217 break;
1219 return;
1224 /* Get the address of INS/OUTS instruction */
1225 static inline int svm_get_io_address(
1226 struct vcpu *v,
1227 struct cpu_user_regs *regs, unsigned int dir,
1228 unsigned long *count, unsigned long *addr)
1230 unsigned long reg;
1231 unsigned int asize = 0;
1232 unsigned int isize;
1233 int long_mode;
1234 ioio_info_t info;
1235 segment_selector_t *seg = NULL;
1236 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
1238 info.bytes = vmcb->exitinfo1;
1240 /* If we're in long mode, we shouldn't check the segment presence & limit */
1241 long_mode = vmcb->cs.attributes.fields.l && vmcb->efer & EFER_LMA;
1243 /* d field of cs.attributes is 1 for 32-bit, 0 for 16 or 64 bit.
1244 * l field combined with EFER_LMA -> longmode says whether it's 16 or 64 bit.
1245 */
1246 asize = (long_mode)?64:((vmcb->cs.attributes.fields.db)?32:16);
1249 /* The ins/outs instructions are single byte, so if we have got more
1250 * than one byte (+ maybe rep-prefix), we have some prefix so we need
1251 * to figure out what it is...
1252 */
1253 isize = vmcb->exitinfo2 - vmcb->rip;
1255 if (info.fields.rep)
1256 isize --;
1258 if (isize > 1)
1259 svm_get_prefix_info(vmcb, dir, &seg, &asize);
1261 ASSERT(dir == IOREQ_READ || dir == IOREQ_WRITE);
1263 if (dir == IOREQ_WRITE)
1265 reg = regs->esi;
1266 if (!seg) /* If no prefix, used DS. */
1267 seg = &vmcb->ds;
1269 else
1271 reg = regs->edi;
1272 seg = &vmcb->es; /* Note: This is ALWAYS ES. */
1275 /* If the segment isn't present, give GP fault! */
1276 if (!long_mode && !seg->attributes.fields.p)
1278 svm_inject_exception(v, TRAP_gp_fault, 1, seg->sel);
1279 return 0;
1282 if (asize == 16)
1284 *addr = (reg & 0xFFFF);
1285 *count = regs->ecx & 0xffff;
1287 else
1289 *addr = reg;
1290 *count = regs->ecx;
1293 if (!long_mode) {
1294 if (*addr > seg->limit)
1296 svm_inject_exception(v, TRAP_gp_fault, 1, seg->sel);
1297 return 0;
1299 else
1301 *addr += seg->base;
1306 return 1;
1310 static void svm_io_instruction(struct vcpu *v)
1312 struct cpu_user_regs *regs;
1313 struct hvm_io_op *pio_opp;
1314 unsigned int port;
1315 unsigned int size, dir, df;
1316 ioio_info_t info;
1317 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
1319 ASSERT(vmcb);
1320 pio_opp = &current->arch.hvm_vcpu.io_op;
1321 pio_opp->instr = INSTR_PIO;
1322 pio_opp->flags = 0;
1324 regs = &pio_opp->io_context;
1326 /* Copy current guest state into io instruction state structure. */
1327 memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
1328 hvm_store_cpu_guest_regs(v, regs, NULL);
1330 info.bytes = vmcb->exitinfo1;
1332 port = info.fields.port; /* port used to be addr */
1333 dir = info.fields.type; /* direction */
1334 df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
1336 if (info.fields.sz32)
1337 size = 4;
1338 else if (info.fields.sz16)
1339 size = 2;
1340 else
1341 size = 1;
1343 HVM_DBG_LOG(DBG_LEVEL_IO,
1344 "svm_io_instruction: port 0x%x eip=%x:%"PRIx64", "
1345 "exit_qualification = %"PRIx64,
1346 port, vmcb->cs.sel, vmcb->rip, info.bytes);
1348 /* string instruction */
1349 if (info.fields.str)
1351 unsigned long addr, count;
1352 int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
1354 if (!svm_get_io_address(v, regs, dir, &count, &addr))
1356 /* We failed to get a valid address, so don't do the IO operation -
1357 * it would just get worse if we do! Hopefully the guest is handing
1358 * gp-faults...
1359 */
1360 return;
1363 /* "rep" prefix */
1364 if (info.fields.rep)
1366 pio_opp->flags |= REPZ;
1368 else
1370 count = 1;
1373 /*
1374 * Handle string pio instructions that cross pages or that
1375 * are unaligned. See the comments in hvm_platform.c/handle_mmio()
1376 */
1377 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK))
1379 unsigned long value = 0;
1381 pio_opp->flags |= OVERLAP;
1382 pio_opp->addr = addr;
1384 if (dir == IOREQ_WRITE) /* OUTS */
1386 if (hvm_paging_enabled(current))
1387 (void)hvm_copy_from_guest_virt(&value, addr, size);
1388 else
1389 (void)hvm_copy_from_guest_phys(&value, addr, size);
1392 if (count == 1)
1393 regs->eip = vmcb->exitinfo2;
1395 send_pio_req(port, 1, size, value, dir, df, 0);
1397 else
1399 unsigned long last_addr = sign > 0 ? addr + count * size - 1
1400 : addr - (count - 1) * size;
1402 if ((addr & PAGE_MASK) != (last_addr & PAGE_MASK))
1404 if (sign > 0)
1405 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
1406 else
1407 count = (addr & ~PAGE_MASK) / size + 1;
1409 else
1410 regs->eip = vmcb->exitinfo2;
1412 send_pio_req(port, count, size, addr, dir, df, 1);
1415 else
1417 /*
1418 * On SVM, the RIP of the intruction following the IN/OUT is saved in
1419 * ExitInfo2
1420 */
1421 regs->eip = vmcb->exitinfo2;
1423 if (port == 0xe9 && dir == IOREQ_WRITE && size == 1)
1424 hvm_print_line(v, regs->eax); /* guest debug output */
1426 send_pio_req(port, 1, size, regs->eax, dir, df, 0);
1430 static int svm_set_cr0(unsigned long value)
1432 struct vcpu *v = current;
1433 unsigned long mfn;
1434 int paging_enabled;
1435 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
1436 unsigned long old_base_mfn;
1438 ASSERT(vmcb);
1440 /* We don't want to lose PG. ET is reserved and should be always be 1*/
1441 paging_enabled = svm_paging_enabled(v);
1442 value |= X86_CR0_ET;
1443 vmcb->cr0 = value | X86_CR0_PG | X86_CR0_WP;
1444 v->arch.hvm_svm.cpu_shadow_cr0 = value;
1446 /* TS cleared? Then initialise FPU now. */
1447 if ( !(value & X86_CR0_TS) )
1449 setup_fpu(v);
1450 vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
1453 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
1455 if ((value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled)
1457 /* The guest CR3 must be pointing to the guest physical. */
1458 mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
1459 if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain))
1461 gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
1462 v->arch.hvm_svm.cpu_cr3, mfn);
1463 domain_crash(v->domain);
1464 return 0;
1467 #if defined(__x86_64__)
1468 if (test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state)
1469 && !test_bit(SVM_CPU_STATE_PAE_ENABLED,
1470 &v->arch.hvm_svm.cpu_state))
1472 HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n");
1473 svm_inject_exception(v, TRAP_gp_fault, 1, 0);
1476 if (test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state))
1478 /* Here the PAE is should to be opened */
1479 HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
1480 set_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
1481 vmcb->efer |= EFER_LMA;
1483 #endif /* __x86_64__ */
1485 /* Now arch.guest_table points to machine physical. */
1486 old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
1487 v->arch.guest_table = pagetable_from_pfn(mfn);
1488 if ( old_base_mfn )
1489 put_page(mfn_to_page(old_base_mfn));
1490 shadow_update_paging_modes(v);
1492 HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
1493 (unsigned long) (mfn << PAGE_SHIFT));
1495 vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
1496 set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
1499 if ( !((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled )
1500 if ( v->arch.hvm_svm.cpu_cr3 ) {
1501 put_page(mfn_to_page(get_mfn_from_gpfn(
1502 v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
1503 v->arch.guest_table = pagetable_null();
1506 /*
1507 * SVM implements paged real-mode and when we return to real-mode
1508 * we revert back to the physical mappings that the domain builder
1509 * created.
1510 */
1511 if ((value & X86_CR0_PE) == 0) {
1512 if (value & X86_CR0_PG) {
1513 svm_inject_exception(v, TRAP_gp_fault, 1, 0);
1514 return 0;
1516 shadow_update_paging_modes(v);
1517 vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
1518 set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
1520 else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
1522 if ( svm_long_mode_enabled(v) )
1524 vmcb->efer &= ~EFER_LMA;
1525 clear_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
1527 /* we should take care of this kind of situation */
1528 shadow_update_paging_modes(v);
1529 vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
1530 set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
1533 return 1;
1536 /*
1537 * Read from control registers. CR0 and CR4 are read from the shadow.
1538 */
1539 static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
1541 unsigned long value = 0;
1542 struct vcpu *v = current;
1543 struct vlapic *vlapic = vcpu_vlapic(v);
1544 struct vmcb_struct *vmcb;
1546 vmcb = v->arch.hvm_svm.vmcb;
1547 ASSERT(vmcb);
1549 switch ( cr )
1551 case 0:
1552 value = v->arch.hvm_svm.cpu_shadow_cr0;
1553 if (svm_dbg_on)
1554 printk("CR0 read =%lx \n", value );
1555 break;
1556 case 2:
1557 value = vmcb->cr2;
1558 break;
1559 case 3:
1560 value = (unsigned long) v->arch.hvm_svm.cpu_cr3;
1561 if (svm_dbg_on)
1562 printk("CR3 read =%lx \n", value );
1563 break;
1564 case 4:
1565 value = (unsigned long) v->arch.hvm_svm.cpu_shadow_cr4;
1566 if (svm_dbg_on)
1567 printk("CR4 read=%lx\n", value);
1568 break;
1569 case 8:
1570 value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
1571 value = (value & 0xF0) >> 4;
1572 break;
1574 default:
1575 domain_crash(v->domain);
1576 return;
1579 set_reg(gp, value, regs, vmcb);
1581 HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
1585 static inline int svm_pgbit_test(struct vcpu *v)
1587 return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
1591 /*
1592 * Write to control registers
1593 */
1594 static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
1596 unsigned long value, old_cr, old_base_mfn, mfn;
1597 struct vcpu *v = current;
1598 struct vlapic *vlapic = vcpu_vlapic(v);
1599 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
1601 value = get_reg(gpreg, regs, vmcb);
1603 HVM_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
1604 HVM_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
1606 switch (cr)
1608 case 0:
1609 if (svm_dbg_on)
1610 printk("CR0 write =%lx \n", value );
1611 return svm_set_cr0(value);
1613 case 3:
1614 if (svm_dbg_on)
1615 printk("CR3 write =%lx \n", value );
1616 /* If paging is not enabled yet, simply copy the value to CR3. */
1617 if (!svm_paging_enabled(v)) {
1618 v->arch.hvm_svm.cpu_cr3 = value;
1619 break;
1621 set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
1623 /* We make a new one if the shadow does not exist. */
1624 if (value == v->arch.hvm_svm.cpu_cr3)
1626 /*
1627 * This is simple TLB flush, implying the guest has
1628 * removed some translation or changed page attributes.
1629 * We simply invalidate the shadow.
1630 */
1631 mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
1632 if (mfn != pagetable_get_pfn(v->arch.guest_table))
1633 goto bad_cr3;
1634 shadow_update_cr3(v);
1636 else
1638 /*
1639 * If different, make a shadow. Check if the PDBR is valid
1640 * first.
1641 */
1642 HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
1643 mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
1644 if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain))
1645 goto bad_cr3;
1647 old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
1648 v->arch.guest_table = pagetable_from_pfn(mfn);
1650 if (old_base_mfn)
1651 put_page(mfn_to_page(old_base_mfn));
1653 v->arch.hvm_svm.cpu_cr3 = value;
1654 update_cr3(v);
1655 vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
1656 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
1658 break;
1660 case 4: /* CR4 */
1661 if (svm_dbg_on)
1662 printk( "write cr4=%lx, cr0=%lx\n",
1663 value, v->arch.hvm_svm.cpu_shadow_cr0 );
1664 old_cr = v->arch.hvm_svm.cpu_shadow_cr4;
1665 if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
1667 set_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
1668 if ( svm_pgbit_test(v) )
1670 /* The guest is a 32-bit PAE guest. */
1671 #if CONFIG_PAGING_LEVELS >= 3
1672 unsigned long mfn, old_base_mfn;
1673 mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
1674 if ( !mfn_valid(mfn) ||
1675 !get_page(mfn_to_page(mfn), v->domain) )
1676 goto bad_cr3;
1678 /*
1679 * Now arch.guest_table points to machine physical.
1680 */
1682 old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
1683 v->arch.guest_table = pagetable_from_pfn(mfn);
1684 if ( old_base_mfn )
1685 put_page(mfn_to_page(old_base_mfn));
1686 shadow_update_paging_modes(v);
1688 HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
1689 (unsigned long) (mfn << PAGE_SHIFT));
1691 vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
1693 HVM_DBG_LOG(DBG_LEVEL_VMMU,
1694 "Update CR3 value = %lx, mfn = %lx",
1695 v->arch.hvm_svm.cpu_cr3, mfn);
1696 #endif
1699 else if (value & X86_CR4_PAE) {
1700 set_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
1701 } else {
1702 if (test_bit(SVM_CPU_STATE_LMA_ENABLED,
1703 &v->arch.hvm_svm.cpu_state)) {
1704 svm_inject_exception(v, TRAP_gp_fault, 1, 0);
1706 clear_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
1709 v->arch.hvm_svm.cpu_shadow_cr4 = value;
1710 vmcb->cr4 = value | SVM_CR4_HOST_MASK;
1712 /*
1713 * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
1714 * all TLB entries except global entries.
1715 */
1716 if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
1718 set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
1719 shadow_update_paging_modes(v);
1721 break;
1723 case 8:
1724 vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
1725 break;
1727 default:
1728 gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
1729 domain_crash(v->domain);
1730 return 0;
1733 return 1;
1735 bad_cr3:
1736 gdprintk(XENLOG_ERR, "Invalid CR3\n");
1737 domain_crash(v->domain);
1738 return 0;
1742 #define ARR_SIZE(x) (sizeof(x) / sizeof(x[0]))
1745 static int svm_cr_access(struct vcpu *v, unsigned int cr, unsigned int type,
1746 struct cpu_user_regs *regs)
1748 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
1749 int inst_len = 0;
1750 int index;
1751 unsigned int gpreg;
1752 unsigned long value;
1753 u8 buffer[MAX_INST_LEN];
1754 u8 prefix = 0;
1755 int result = 1;
1756 enum instruction_index list_a[] = {INSTR_MOV2CR, INSTR_CLTS, INSTR_LMSW};
1757 enum instruction_index list_b[] = {INSTR_MOVCR2, INSTR_SMSW};
1758 enum instruction_index match;
1760 ASSERT(vmcb);
1762 inst_copy_from_guest(buffer, svm_rip2pointer(vmcb), sizeof(buffer));
1764 /* get index to first actual instruction byte - as we will need to know
1765 where the prefix lives later on */
1766 index = skip_prefix_bytes(buffer, sizeof(buffer));
1768 if ( type == TYPE_MOV_TO_CR )
1770 inst_len = __get_instruction_length_from_list(
1771 vmcb, list_a, ARR_SIZE(list_a), &buffer[index], &match);
1773 else /* type == TYPE_MOV_FROM_CR */
1775 inst_len = __get_instruction_length_from_list(
1776 vmcb, list_b, ARR_SIZE(list_b), &buffer[index], &match);
1779 ASSERT(inst_len > 0);
1781 inst_len += index;
1783 /* Check for REX prefix - it's ALWAYS the last byte of any prefix bytes */
1784 if (index > 0 && (buffer[index-1] & 0xF0) == 0x40)
1785 prefix = buffer[index-1];
1787 HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long) vmcb->rip);
1789 switch (match)
1791 case INSTR_MOV2CR:
1792 gpreg = decode_src_reg(prefix, buffer[index+2]);
1793 result = mov_to_cr(gpreg, cr, regs);
1794 break;
1796 case INSTR_MOVCR2:
1797 gpreg = decode_src_reg(prefix, buffer[index+2]);
1798 mov_from_cr(cr, gpreg, regs);
1799 break;
1801 case INSTR_CLTS:
1802 /* TS being cleared means that it's time to restore fpu state. */
1803 setup_fpu(current);
1804 vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
1805 vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
1806 v->arch.hvm_svm.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
1807 break;
1809 case INSTR_LMSW:
1810 if (svm_dbg_on)
1811 svm_dump_inst(svm_rip2pointer(vmcb));
1813 gpreg = decode_src_reg(prefix, buffer[index+2]);
1814 value = get_reg(gpreg, regs, vmcb) & 0xF;
1816 if (svm_dbg_on)
1817 printk("CR0-LMSW value=%lx, reg=%d, inst_len=%d\n", value, gpreg,
1818 inst_len);
1820 value = (v->arch.hvm_svm.cpu_shadow_cr0 & ~0xF) | value;
1822 if (svm_dbg_on)
1823 printk("CR0-LMSW CR0 - New value=%lx\n", value);
1825 result = svm_set_cr0(value);
1826 break;
1828 case INSTR_SMSW:
1829 if (svm_dbg_on)
1830 svm_dump_inst(svm_rip2pointer(vmcb));
1831 value = v->arch.hvm_svm.cpu_shadow_cr0;
1832 gpreg = decode_src_reg(prefix, buffer[index+2]);
1833 set_reg(gpreg, value, regs, vmcb);
1835 if (svm_dbg_on)
1836 printk("CR0-SMSW value=%lx, reg=%d, inst_len=%d\n", value, gpreg,
1837 inst_len);
1838 break;
1840 default:
1841 BUG();
1844 ASSERT(inst_len);
1846 __update_guest_eip(vmcb, inst_len);
1848 return result;
1851 static inline void svm_do_msr_access(
1852 struct vcpu *v, struct cpu_user_regs *regs)
1854 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
1855 int inst_len;
1856 u64 msr_content=0;
1857 u32 ecx = regs->ecx, eax, edx;
1859 ASSERT(vmcb);
1861 HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x, exitinfo = %lx",
1862 ecx, (u32)regs->eax, (u32)regs->edx,
1863 (unsigned long)vmcb->exitinfo1);
1865 /* is it a read? */
1866 if (vmcb->exitinfo1 == 0)
1868 switch (ecx) {
1869 case MSR_IA32_TIME_STAMP_COUNTER:
1870 msr_content = hvm_get_guest_time(v);
1871 break;
1872 case MSR_IA32_SYSENTER_CS:
1873 msr_content = vmcb->sysenter_cs;
1874 break;
1875 case MSR_IA32_SYSENTER_ESP:
1876 msr_content = vmcb->sysenter_esp;
1877 break;
1878 case MSR_IA32_SYSENTER_EIP:
1879 msr_content = vmcb->sysenter_eip;
1880 break;
1881 case MSR_IA32_APICBASE:
1882 msr_content = vcpu_vlapic(v)->apic_base_msr;
1883 break;
1884 default:
1885 if (long_mode_do_msr_read(regs))
1886 goto done;
1888 if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
1889 rdmsr_safe(ecx, eax, edx) == 0 )
1891 regs->eax = eax;
1892 regs->edx = edx;
1893 goto done;
1895 svm_inject_exception(v, TRAP_gp_fault, 1, 0);
1896 return;
1898 regs->eax = msr_content & 0xFFFFFFFF;
1899 regs->edx = msr_content >> 32;
1901 done:
1902 HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
1903 ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
1905 inst_len = __get_instruction_length(vmcb, INSTR_RDMSR, NULL);
1907 else
1909 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
1911 switch (ecx)
1913 case MSR_IA32_TIME_STAMP_COUNTER:
1914 hvm_set_guest_time(v, msr_content);
1915 break;
1916 case MSR_IA32_SYSENTER_CS:
1917 vmcb->sysenter_cs = msr_content;
1918 break;
1919 case MSR_IA32_SYSENTER_ESP:
1920 vmcb->sysenter_esp = msr_content;
1921 break;
1922 case MSR_IA32_SYSENTER_EIP:
1923 vmcb->sysenter_eip = msr_content;
1924 break;
1925 case MSR_IA32_APICBASE:
1926 vlapic_msr_set(vcpu_vlapic(v), msr_content);
1927 break;
1928 default:
1929 if ( !long_mode_do_msr_write(regs) )
1930 wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
1931 break;
1934 inst_len = __get_instruction_length(vmcb, INSTR_WRMSR, NULL);
1937 __update_guest_eip(vmcb, inst_len);
1941 static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
1943 __update_guest_eip(vmcb, 1);
1945 /* Check for interrupt not handled or new interrupt. */
1946 if ( (vmcb->rflags & X86_EFLAGS_IF) &&
1947 (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) )
1948 return;
1950 hvm_hlt(vmcb->rflags);
1954 static void svm_vmexit_do_invd(struct vmcb_struct *vmcb)
1956 int inst_len;
1958 /* Invalidate the cache - we can't really do that safely - maybe we should
1959 * WBINVD, but I think it's just fine to completely ignore it - we should
1960 * have cache-snooping that solves it anyways. -- Mats P.
1961 */
1963 /* Tell the user that we did this - just in case someone runs some really
1964 * weird operating system and wants to know why it's not working...
1965 */
1966 printk("INVD instruction intercepted - ignored\n");
1968 inst_len = __get_instruction_length(vmcb, INSTR_INVD, NULL);
1969 __update_guest_eip(vmcb, inst_len);
1975 #ifdef XEN_DEBUGGER
1976 static void svm_debug_save_cpu_user_regs(struct vmcb_struct *vmcb,
1977 struct cpu_user_regs *regs)
1979 regs->eip = vmcb->rip;
1980 regs->esp = vmcb->rsp;
1981 regs->eflags = vmcb->rflags;
1983 regs->xcs = vmcb->cs.sel;
1984 regs->xds = vmcb->ds.sel;
1985 regs->xes = vmcb->es.sel;
1986 regs->xfs = vmcb->fs.sel;
1987 regs->xgs = vmcb->gs.sel;
1988 regs->xss = vmcb->ss.sel;
1992 static void svm_debug_restore_cpu_user_regs(struct cpu_user_regs *regs)
1994 vmcb->ss.sel = regs->xss;
1995 vmcb->rsp = regs->esp;
1996 vmcb->rflags = regs->eflags;
1997 vmcb->cs.sel = regs->xcs;
1998 vmcb->rip = regs->eip;
2000 vmcb->gs.sel = regs->xgs;
2001 vmcb->fs.sel = regs->xfs;
2002 vmcb->es.sel = regs->xes;
2003 vmcb->ds.sel = regs->xds;
2005 #endif
2008 void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs)
2010 struct vcpu *v = current;
2011 u8 opcode[MAX_INST_LEN], prefix, length = MAX_INST_LEN;
2012 unsigned long g_vaddr;
2013 int inst_len;
2014 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
2016 /*
2017 * Unknown how many bytes the invlpg instruction will take. Use the
2018 * maximum instruction length here
2019 */
2020 if (inst_copy_from_guest(opcode, svm_rip2pointer(vmcb), length) < length)
2022 gdprintk(XENLOG_ERR, "Error reading memory %d bytes\n", length);
2023 domain_crash(v->domain);
2024 return;
2027 if (invlpga)
2029 inst_len = __get_instruction_length(vmcb, INSTR_INVLPGA, opcode);
2030 ASSERT(inst_len > 0);
2031 __update_guest_eip(vmcb, inst_len);
2033 /*
2034 * The address is implicit on this instruction. At the moment, we don't
2035 * use ecx (ASID) to identify individual guests pages
2036 */
2037 g_vaddr = regs->eax;
2039 else
2041 /* What about multiple prefix codes? */
2042 prefix = (is_prefix(opcode[0])?opcode[0]:0);
2043 inst_len = __get_instruction_length(vmcb, INSTR_INVLPG, opcode);
2044 ASSERT(inst_len > 0);
2046 inst_len--;
2047 length -= inst_len;
2049 /*
2050 * Decode memory operand of the instruction including ModRM, SIB, and
2051 * displacement to get effective address and length in bytes. Assume
2052 * the system in either 32- or 64-bit mode.
2053 */
2054 g_vaddr = get_effective_addr_modrm64(vmcb, regs, prefix, inst_len,
2055 &opcode[inst_len], &length);
2057 inst_len += length;
2058 __update_guest_eip (vmcb, inst_len);
2061 /* Overkill, we may not this */
2062 set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
2063 shadow_invlpg(v, g_vaddr);
2067 /*
2068 * Reset to realmode causes execution to start at 0xF000:0xFFF0 in
2069 * 16-bit realmode. Basically, this mimics a processor reset.
2071 * returns 0 on success, non-zero otherwise
2072 */
2073 static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
2074 struct cpu_user_regs *regs)
2076 struct vmcb_struct *vmcb;
2078 ASSERT(v);
2079 ASSERT(regs);
2081 vmcb = v->arch.hvm_svm.vmcb;
2083 ASSERT(vmcb);
2085 /* clear the vmcb and user regs */
2086 memset(regs, 0, sizeof(struct cpu_user_regs));
2088 /* VMCB Control */
2089 vmcb->tsc_offset = 0;
2091 /* VMCB State */
2092 vmcb->cr0 = X86_CR0_ET | X86_CR0_PG | X86_CR0_WP;
2093 v->arch.hvm_svm.cpu_shadow_cr0 = X86_CR0_ET;
2095 vmcb->cr2 = 0;
2096 vmcb->efer = EFER_SVME;
2098 vmcb->cr4 = SVM_CR4_HOST_MASK;
2099 v->arch.hvm_svm.cpu_shadow_cr4 = 0;
2100 clear_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
2102 /* This will jump to ROMBIOS */
2103 vmcb->rip = 0xFFF0;
2105 /* setup the segment registers and all their hidden states */
2106 vmcb->cs.sel = 0xF000;
2107 vmcb->cs.attributes.bytes = 0x089b;
2108 vmcb->cs.limit = 0xffff;
2109 vmcb->cs.base = 0x000F0000;
2111 vmcb->ss.sel = 0x00;
2112 vmcb->ss.attributes.bytes = 0x0893;
2113 vmcb->ss.limit = 0xffff;
2114 vmcb->ss.base = 0x00;
2116 vmcb->ds.sel = 0x00;
2117 vmcb->ds.attributes.bytes = 0x0893;
2118 vmcb->ds.limit = 0xffff;
2119 vmcb->ds.base = 0x00;
2121 vmcb->es.sel = 0x00;
2122 vmcb->es.attributes.bytes = 0x0893;
2123 vmcb->es.limit = 0xffff;
2124 vmcb->es.base = 0x00;
2126 vmcb->fs.sel = 0x00;
2127 vmcb->fs.attributes.bytes = 0x0893;
2128 vmcb->fs.limit = 0xffff;
2129 vmcb->fs.base = 0x00;
2131 vmcb->gs.sel = 0x00;
2132 vmcb->gs.attributes.bytes = 0x0893;
2133 vmcb->gs.limit = 0xffff;
2134 vmcb->gs.base = 0x00;
2136 vmcb->ldtr.sel = 0x00;
2137 vmcb->ldtr.attributes.bytes = 0x0000;
2138 vmcb->ldtr.limit = 0x0;
2139 vmcb->ldtr.base = 0x00;
2141 vmcb->gdtr.sel = 0x00;
2142 vmcb->gdtr.attributes.bytes = 0x0000;
2143 vmcb->gdtr.limit = 0x0;
2144 vmcb->gdtr.base = 0x00;
2146 vmcb->tr.sel = 0;
2147 vmcb->tr.attributes.bytes = 0;
2148 vmcb->tr.limit = 0x0;
2149 vmcb->tr.base = 0;
2151 vmcb->idtr.sel = 0x00;
2152 vmcb->idtr.attributes.bytes = 0x0000;
2153 vmcb->idtr.limit = 0x3ff;
2154 vmcb->idtr.base = 0x00;
2156 vmcb->rax = 0;
2157 vmcb->rsp = 0;
2159 return 0;
2163 /*
2164 * svm_do_vmmcall - SVM VMMCALL handler
2166 * returns 0 on success, non-zero otherwise
2167 */
2168 static int svm_do_vmmcall(struct vcpu *v, struct cpu_user_regs *regs)
2170 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
2171 int inst_len;
2173 ASSERT(vmcb);
2174 ASSERT(regs);
2176 inst_len = __get_instruction_length(vmcb, INSTR_VMCALL, NULL);
2177 ASSERT(inst_len > 0);
2179 if ( regs->eax & 0x80000000 )
2181 /* VMMCALL sanity check */
2182 if ( vmcb->cpl > get_vmmcall_cpl(regs->edi) )
2184 printk("VMMCALL CPL check failed\n");
2185 return -1;
2188 /* handle the request */
2189 switch ( regs->eax )
2191 case VMMCALL_RESET_TO_REALMODE:
2192 if ( svm_do_vmmcall_reset_to_realmode(v, regs) )
2194 printk("svm_do_vmmcall_reset_to_realmode() failed\n");
2195 return -1;
2197 /* since we just reset the VMCB, return without adjusting
2198 * the eip */
2199 return 0;
2201 case VMMCALL_DEBUG:
2202 printk("DEBUG features not implemented yet\n");
2203 break;
2204 default:
2205 break;
2208 hvm_print_line(v, regs->eax); /* provides the current domain */
2210 else
2212 hvm_do_hypercall(regs);
2215 __update_guest_eip(vmcb, inst_len);
2216 return 0;
2220 void svm_dump_inst(unsigned long eip)
2222 u8 opcode[256];
2223 unsigned long ptr;
2224 int len;
2225 int i;
2227 ptr = eip & ~0xff;
2228 len = 0;
2230 if (hvm_copy_from_guest_virt(opcode, ptr, sizeof(opcode)) == 0)
2231 len = sizeof(opcode);
2233 printk("Code bytes around(len=%d) %lx:", len, eip);
2234 for (i = 0; i < len; i++)
2236 if ((i & 0x0f) == 0)
2237 printk("\n%08lx:", ptr+i);
2239 printk("%02x ", opcode[i]);
2242 printk("\n");
2246 void svm_dump_regs(const char *from, struct cpu_user_regs *regs)
2248 struct vcpu *v = current;
2249 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
2250 unsigned long pt = v->arch.hvm_vcpu.hw_cr3;
2252 printk("%s: guest registers from %s:\n", __func__, from);
2253 #if defined (__x86_64__)
2254 printk("rax: %016lx rbx: %016lx rcx: %016lx\n",
2255 regs->rax, regs->rbx, regs->rcx);
2256 printk("rdx: %016lx rsi: %016lx rdi: %016lx\n",
2257 regs->rdx, regs->rsi, regs->rdi);
2258 printk("rbp: %016lx rsp: %016lx r8: %016lx\n",
2259 regs->rbp, regs->rsp, regs->r8);
2260 printk("r9: %016lx r10: %016lx r11: %016lx\n",
2261 regs->r9, regs->r10, regs->r11);
2262 printk("r12: %016lx r13: %016lx r14: %016lx\n",
2263 regs->r12, regs->r13, regs->r14);
2264 printk("r15: %016lx cr0: %016lx cr3: %016lx\n",
2265 regs->r15, v->arch.hvm_svm.cpu_shadow_cr0, vmcb->cr3);
2266 #else
2267 printk("eax: %08x, ebx: %08x, ecx: %08x, edx: %08x\n",
2268 regs->eax, regs->ebx, regs->ecx, regs->edx);
2269 printk("edi: %08x, esi: %08x, ebp: %08x, esp: %08x\n",
2270 regs->edi, regs->esi, regs->ebp, regs->esp);
2271 printk("%s: guest cr0: %lx\n", __func__,
2272 v->arch.hvm_svm.cpu_shadow_cr0);
2273 printk("guest CR3 = %llx\n", vmcb->cr3);
2274 #endif
2275 printk("%s: pt = %lx\n", __func__, pt);
2279 void svm_dump_host_regs(const char *from)
2281 struct vcpu *v = current;
2282 unsigned long pt = pt = pagetable_get_paddr(v->arch.monitor_table);
2283 unsigned long cr3, cr0;
2284 printk("Host registers at %s\n", from);
2286 __asm__ __volatile__ ("\tmov %%cr0,%0\n"
2287 "\tmov %%cr3,%1\n"
2288 : "=r" (cr0), "=r"(cr3));
2289 printk("%s: pt = %lx, cr3 = %lx, cr0 = %lx\n", __func__, pt, cr3, cr0);
2292 #ifdef SVM_EXTRA_DEBUG
2293 static char *exit_reasons[] = {
2294 [VMEXIT_CR0_READ] = "CR0_READ",
2295 [VMEXIT_CR1_READ] = "CR1_READ",
2296 [VMEXIT_CR2_READ] = "CR2_READ",
2297 [VMEXIT_CR3_READ] = "CR3_READ",
2298 [VMEXIT_CR4_READ] = "CR4_READ",
2299 [VMEXIT_CR5_READ] = "CR5_READ",
2300 [VMEXIT_CR6_READ] = "CR6_READ",
2301 [VMEXIT_CR7_READ] = "CR7_READ",
2302 [VMEXIT_CR8_READ] = "CR8_READ",
2303 [VMEXIT_CR9_READ] = "CR9_READ",
2304 [VMEXIT_CR10_READ] = "CR10_READ",
2305 [VMEXIT_CR11_READ] = "CR11_READ",
2306 [VMEXIT_CR12_READ] = "CR12_READ",
2307 [VMEXIT_CR13_READ] = "CR13_READ",
2308 [VMEXIT_CR14_READ] = "CR14_READ",
2309 [VMEXIT_CR15_READ] = "CR15_READ",
2310 [VMEXIT_CR0_WRITE] = "CR0_WRITE",
2311 [VMEXIT_CR1_WRITE] = "CR1_WRITE",
2312 [VMEXIT_CR2_WRITE] = "CR2_WRITE",
2313 [VMEXIT_CR3_WRITE] = "CR3_WRITE",
2314 [VMEXIT_CR4_WRITE] = "CR4_WRITE",
2315 [VMEXIT_CR5_WRITE] = "CR5_WRITE",
2316 [VMEXIT_CR6_WRITE] = "CR6_WRITE",
2317 [VMEXIT_CR7_WRITE] = "CR7_WRITE",
2318 [VMEXIT_CR8_WRITE] = "CR8_WRITE",
2319 [VMEXIT_CR9_WRITE] = "CR9_WRITE",
2320 [VMEXIT_CR10_WRITE] = "CR10_WRITE",
2321 [VMEXIT_CR11_WRITE] = "CR11_WRITE",
2322 [VMEXIT_CR12_WRITE] = "CR12_WRITE",
2323 [VMEXIT_CR13_WRITE] = "CR13_WRITE",
2324 [VMEXIT_CR14_WRITE] = "CR14_WRITE",
2325 [VMEXIT_CR15_WRITE] = "CR15_WRITE",
2326 [VMEXIT_DR0_READ] = "DR0_READ",
2327 [VMEXIT_DR1_READ] = "DR1_READ",
2328 [VMEXIT_DR2_READ] = "DR2_READ",
2329 [VMEXIT_DR3_READ] = "DR3_READ",
2330 [VMEXIT_DR4_READ] = "DR4_READ",
2331 [VMEXIT_DR5_READ] = "DR5_READ",
2332 [VMEXIT_DR6_READ] = "DR6_READ",
2333 [VMEXIT_DR7_READ] = "DR7_READ",
2334 [VMEXIT_DR8_READ] = "DR8_READ",
2335 [VMEXIT_DR9_READ] = "DR9_READ",
2336 [VMEXIT_DR10_READ] = "DR10_READ",
2337 [VMEXIT_DR11_READ] = "DR11_READ",
2338 [VMEXIT_DR12_READ] = "DR12_READ",
2339 [VMEXIT_DR13_READ] = "DR13_READ",
2340 [VMEXIT_DR14_READ] = "DR14_READ",
2341 [VMEXIT_DR15_READ] = "DR15_READ",
2342 [VMEXIT_DR0_WRITE] = "DR0_WRITE",
2343 [VMEXIT_DR1_WRITE] = "DR1_WRITE",
2344 [VMEXIT_DR2_WRITE] = "DR2_WRITE",
2345 [VMEXIT_DR3_WRITE] = "DR3_WRITE",
2346 [VMEXIT_DR4_WRITE] = "DR4_WRITE",
2347 [VMEXIT_DR5_WRITE] = "DR5_WRITE",
2348 [VMEXIT_DR6_WRITE] = "DR6_WRITE",
2349 [VMEXIT_DR7_WRITE] = "DR7_WRITE",
2350 [VMEXIT_DR8_WRITE] = "DR8_WRITE",
2351 [VMEXIT_DR9_WRITE] = "DR9_WRITE",
2352 [VMEXIT_DR10_WRITE] = "DR10_WRITE",
2353 [VMEXIT_DR11_WRITE] = "DR11_WRITE",
2354 [VMEXIT_DR12_WRITE] = "DR12_WRITE",
2355 [VMEXIT_DR13_WRITE] = "DR13_WRITE",
2356 [VMEXIT_DR14_WRITE] = "DR14_WRITE",
2357 [VMEXIT_DR15_WRITE] = "DR15_WRITE",
2358 [VMEXIT_EXCEPTION_DE] = "EXCEPTION_DE",
2359 [VMEXIT_EXCEPTION_DB] = "EXCEPTION_DB",
2360 [VMEXIT_EXCEPTION_NMI] = "EXCEPTION_NMI",
2361 [VMEXIT_EXCEPTION_BP] = "EXCEPTION_BP",
2362 [VMEXIT_EXCEPTION_OF] = "EXCEPTION_OF",
2363 [VMEXIT_EXCEPTION_BR] = "EXCEPTION_BR",
2364 [VMEXIT_EXCEPTION_UD] = "EXCEPTION_UD",
2365 [VMEXIT_EXCEPTION_NM] = "EXCEPTION_NM",
2366 [VMEXIT_EXCEPTION_DF] = "EXCEPTION_DF",
2367 [VMEXIT_EXCEPTION_09] = "EXCEPTION_09",
2368 [VMEXIT_EXCEPTION_TS] = "EXCEPTION_TS",
2369 [VMEXIT_EXCEPTION_NP] = "EXCEPTION_NP",
2370 [VMEXIT_EXCEPTION_SS] = "EXCEPTION_SS",
2371 [VMEXIT_EXCEPTION_GP] = "EXCEPTION_GP",
2372 [VMEXIT_EXCEPTION_PF] = "EXCEPTION_PF",
2373 [VMEXIT_EXCEPTION_15] = "EXCEPTION_15",
2374 [VMEXIT_EXCEPTION_MF] = "EXCEPTION_MF",
2375 [VMEXIT_EXCEPTION_AC] = "EXCEPTION_AC",
2376 [VMEXIT_EXCEPTION_MC] = "EXCEPTION_MC",
2377 [VMEXIT_EXCEPTION_XF] = "EXCEPTION_XF",
2378 [VMEXIT_INTR] = "INTR",
2379 [VMEXIT_NMI] = "NMI",
2380 [VMEXIT_SMI] = "SMI",
2381 [VMEXIT_INIT] = "INIT",
2382 [VMEXIT_VINTR] = "VINTR",
2383 [VMEXIT_CR0_SEL_WRITE] = "CR0_SEL_WRITE",
2384 [VMEXIT_IDTR_READ] = "IDTR_READ",
2385 [VMEXIT_GDTR_READ] = "GDTR_READ",
2386 [VMEXIT_LDTR_READ] = "LDTR_READ",
2387 [VMEXIT_TR_READ] = "TR_READ",
2388 [VMEXIT_IDTR_WRITE] = "IDTR_WRITE",
2389 [VMEXIT_GDTR_WRITE] = "GDTR_WRITE",
2390 [VMEXIT_LDTR_WRITE] = "LDTR_WRITE",
2391 [VMEXIT_TR_WRITE] = "TR_WRITE",
2392 [VMEXIT_RDTSC] = "RDTSC",
2393 [VMEXIT_RDPMC] = "RDPMC",
2394 [VMEXIT_PUSHF] = "PUSHF",
2395 [VMEXIT_POPF] = "POPF",
2396 [VMEXIT_CPUID] = "CPUID",
2397 [VMEXIT_RSM] = "RSM",
2398 [VMEXIT_IRET] = "IRET",
2399 [VMEXIT_SWINT] = "SWINT",
2400 [VMEXIT_INVD] = "INVD",
2401 [VMEXIT_PAUSE] = "PAUSE",
2402 [VMEXIT_HLT] = "HLT",
2403 [VMEXIT_INVLPG] = "INVLPG",
2404 [VMEXIT_INVLPGA] = "INVLPGA",
2405 [VMEXIT_IOIO] = "IOIO",
2406 [VMEXIT_MSR] = "MSR",
2407 [VMEXIT_TASK_SWITCH] = "TASK_SWITCH",
2408 [VMEXIT_FERR_FREEZE] = "FERR_FREEZE",
2409 [VMEXIT_SHUTDOWN] = "SHUTDOWN",
2410 [VMEXIT_VMRUN] = "VMRUN",
2411 [VMEXIT_VMMCALL] = "VMMCALL",
2412 [VMEXIT_VMLOAD] = "VMLOAD",
2413 [VMEXIT_VMSAVE] = "VMSAVE",
2414 [VMEXIT_STGI] = "STGI",
2415 [VMEXIT_CLGI] = "CLGI",
2416 [VMEXIT_SKINIT] = "SKINIT",
2417 [VMEXIT_RDTSCP] = "RDTSCP",
2418 [VMEXIT_ICEBP] = "ICEBP",
2419 [VMEXIT_NPF] = "NPF"
2420 };
2421 #endif /* SVM_EXTRA_DEBUG */
2423 #ifdef SVM_WALK_GUEST_PAGES
2424 void walk_shadow_and_guest_pt(unsigned long gva)
2426 l2_pgentry_t gpde;
2427 l2_pgentry_t spde;
2428 l1_pgentry_t gpte;
2429 l1_pgentry_t spte;
2430 struct vcpu *v = current;
2431 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
2432 paddr_t gpa;
2434 gpa = shadow_gva_to_gpa(current, gva);
2435 printk("gva = %lx, gpa=%"PRIpaddr", gCR3=%x\n", gva, gpa, (u32)vmcb->cr3);
2436 if( !svm_paging_enabled(v) || mmio_space(gpa) )
2437 return;
2439 /* let's dump the guest and shadow page info */
2441 __guest_get_l2e(v, gva, &gpde);
2442 printk( "G-PDE = %x, flags=%x\n", gpde.l2, l2e_get_flags(gpde) );
2443 __shadow_get_l2e( v, gva, &spde );
2444 printk( "S-PDE = %x, flags=%x\n", spde.l2, l2e_get_flags(spde) );
2446 if ( unlikely(!(l2e_get_flags(gpde) & _PAGE_PRESENT)) )
2447 return;
2449 spte = l1e_empty();
2451 /* This is actually overkill - we only need to ensure the hl2 is in-sync.*/
2452 shadow_sync_va(v, gva);
2454 gpte.l1 = 0;
2455 __copy_from_user(&gpte, &linear_pg_table[ l1_linear_offset(gva) ],
2456 sizeof(gpte) );
2457 printk( "G-PTE = %x, flags=%x\n", gpte.l1, l1e_get_flags(gpte) );
2459 BUG(); // need to think about this, and convert usage of
2460 // phys_to_machine_mapping to use pagetable format...
2461 __copy_from_user( &spte, &phys_to_machine_mapping[ l1e_get_pfn( gpte ) ],
2462 sizeof(spte) );
2464 printk( "S-PTE = %x, flags=%x\n", spte.l1, l1e_get_flags(spte));
2466 #endif /* SVM_WALK_GUEST_PAGES */
2469 asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
2471 unsigned int exit_reason;
2472 unsigned long eip;
2473 struct vcpu *v = current;
2474 int error;
2475 int do_debug = 0;
2476 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
2478 ASSERT(vmcb);
2480 exit_reason = vmcb->exitcode;
2481 save_svm_cpu_user_regs(v, regs);
2483 v->arch.hvm_svm.inject_event = 0;
2485 if (exit_reason == VMEXIT_INVALID)
2487 svm_dump_vmcb(__func__, vmcb);
2488 goto exit_and_crash;
2491 #ifdef SVM_EXTRA_DEBUG
2493 #if defined(__i386__)
2494 #define rip eip
2495 #endif
2497 static unsigned long intercepts_counter = 0;
2499 if (svm_dbg_on && exit_reason == VMEXIT_EXCEPTION_PF)
2501 if (svm_paging_enabled(v) &&
2502 !mmio_space(shadow_gva_to_gpa(current, vmcb->exitinfo2)))
2504 printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64","
2505 "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64", "
2506 "gpa=%"PRIx64"\n", intercepts_counter,
2507 exit_reasons[exit_reason], exit_reason, regs->cs,
2508 (u64)regs->rip,
2509 (u64)vmcb->exitinfo1,
2510 (u64)vmcb->exitinfo2,
2511 (u64)vmcb->exitintinfo.bytes,
2512 (u64)shadow_gva_to_gpa(current, vmcb->exitinfo2));
2514 else
2516 printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64","
2517 "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64"\n",
2518 intercepts_counter,
2519 exit_reasons[exit_reason], exit_reason, regs->cs,
2520 (u64)regs->rip,
2521 (u64)vmcb->exitinfo1,
2522 (u64)vmcb->exitinfo2,
2523 (u64)vmcb->exitintinfo.bytes );
2526 else if ( svm_dbg_on
2527 && exit_reason != VMEXIT_IOIO
2528 && exit_reason != VMEXIT_INTR)
2531 if (exit_reasons[exit_reason])
2533 printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64","
2534 "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64"\n",
2535 intercepts_counter,
2536 exit_reasons[exit_reason], exit_reason, regs->cs,
2537 (u64)regs->rip,
2538 (u64)vmcb->exitinfo1,
2539 (u64)vmcb->exitinfo2,
2540 (u64)vmcb->exitintinfo.bytes);
2542 else
2544 printk("I%08ld,ExC=%d(0x%x),IP=%x:%"PRIx64","
2545 "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64"\n",
2546 intercepts_counter, exit_reason, exit_reason, regs->cs,
2547 (u64)regs->rip,
2548 (u64)vmcb->exitinfo1,
2549 (u64)vmcb->exitinfo2,
2550 (u64)vmcb->exitintinfo.bytes);
2554 #ifdef SVM_WALK_GUEST_PAGES
2555 if( exit_reason == VMEXIT_EXCEPTION_PF
2556 && ( ( vmcb->exitinfo2 == vmcb->rip )
2557 || vmcb->exitintinfo.bytes) )
2559 if ( svm_paging_enabled(v) &&
2560 !mmio_space(gva_to_gpa(vmcb->exitinfo2)) )
2561 walk_shadow_and_guest_pt(vmcb->exitinfo2);
2563 #endif
2565 intercepts_counter++;
2567 #if 0
2568 if (svm_dbg_on)
2569 do_debug = svm_do_debugout(exit_reason);
2570 #endif
2572 if (do_debug)
2574 printk("%s:+ guest_table = 0x%08x, monitor_table = 0x%08x, "
2575 "hw_cr3 = 0x%16lx\n",
2576 __func__,
2577 (int) v->arch.guest_table.pfn,
2578 (int) v->arch.monitor_table.pfn,
2579 (long unsigned int) v->arch.hvm_vcpu.hw_cr3);
2581 svm_dump_vmcb(__func__, vmcb);
2582 svm_dump_regs(__func__, regs);
2583 svm_dump_inst(svm_rip2pointer(vmcb));
2586 #if defined(__i386__)
2587 #undef rip
2588 #endif
2591 #endif /* SVM_EXTRA_DEBUG */
2594 perfc_incra(svmexits, exit_reason);
2595 eip = vmcb->rip;
2597 #ifdef SVM_EXTRA_DEBUG
2598 if (do_debug)
2600 printk("eip = %lx, exit_reason = %d (0x%x)\n",
2601 eip, exit_reason, exit_reason);
2603 #endif /* SVM_EXTRA_DEBUG */
2605 TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
2607 switch (exit_reason)
2609 case VMEXIT_EXCEPTION_DB:
2611 #ifdef XEN_DEBUGGER
2612 svm_debug_save_cpu_user_regs(regs);
2613 pdb_handle_exception(1, regs, 1);
2614 svm_debug_restore_cpu_user_regs(regs);
2615 #else
2616 svm_store_cpu_user_regs(regs, v);
2617 domain_pause_for_debugger();
2618 #endif
2620 break;
2622 case VMEXIT_NMI:
2623 break;
2625 case VMEXIT_SMI:
2626 /*
2627 * For asynchronous SMI's, we just need to allow global interrupts
2628 * so that the SMI is taken properly in the context of the host. The
2629 * standard code does a STGI after the VMEXIT which should accomplish
2630 * this task. Continue as normal and restart the guest.
2631 */
2632 break;
2634 case VMEXIT_INIT:
2635 /*
2636 * Nothing to do, in fact we should never get to this point.
2637 */
2638 break;
2640 case VMEXIT_EXCEPTION_BP:
2641 #ifdef XEN_DEBUGGER
2642 svm_debug_save_cpu_user_regs(regs);
2643 pdb_handle_exception(3, regs, 1);
2644 svm_debug_restore_cpu_user_regs(regs);
2645 #else
2646 if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
2647 domain_pause_for_debugger();
2648 else
2649 svm_inject_exception(v, TRAP_int3, 0, 0);
2650 #endif
2651 break;
2653 case VMEXIT_EXCEPTION_NM:
2654 svm_do_no_device_fault(vmcb);
2655 break;
2657 case VMEXIT_EXCEPTION_GP:
2658 /* This should probably not be trapped in the future */
2659 regs->error_code = vmcb->exitinfo1;
2660 svm_do_general_protection_fault(v, regs);
2661 break;
2663 case VMEXIT_EXCEPTION_PF:
2665 unsigned long va;
2666 va = vmcb->exitinfo2;
2667 regs->error_code = vmcb->exitinfo1;
2668 HVM_DBG_LOG(DBG_LEVEL_VMMU,
2669 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
2670 (unsigned long)regs->eax, (unsigned long)regs->ebx,
2671 (unsigned long)regs->ecx, (unsigned long)regs->edx,
2672 (unsigned long)regs->esi, (unsigned long)regs->edi);
2674 if (!(error = svm_do_page_fault(va, regs)))
2676 /* Inject #PG using Interruption-Information Fields */
2677 svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
2679 v->arch.hvm_svm.cpu_cr2 = va;
2680 vmcb->cr2 = va;
2681 TRACE_3D(TRC_VMX_INTR, v->domain->domain_id,
2682 VMEXIT_EXCEPTION_PF, va);
2684 break;
2687 case VMEXIT_EXCEPTION_DF:
2688 /* Debug info to hopefully help debug WHY the guest double-faulted. */
2689 svm_dump_vmcb(__func__, vmcb);
2690 svm_dump_regs(__func__, regs);
2691 svm_dump_inst(svm_rip2pointer(vmcb));
2692 svm_inject_exception(v, TRAP_double_fault, 1, 0);
2693 break;
2695 case VMEXIT_VINTR:
2696 vmcb->vintr.fields.irq = 0;
2697 vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR;
2698 break;
2700 case VMEXIT_INTR:
2701 break;
2703 case VMEXIT_INVD:
2704 svm_vmexit_do_invd(vmcb);
2705 break;
2707 case VMEXIT_GDTR_WRITE:
2708 printk("WRITE to GDTR\n");
2709 break;
2711 case VMEXIT_TASK_SWITCH:
2712 goto exit_and_crash;
2714 case VMEXIT_CPUID:
2715 svm_vmexit_do_cpuid(vmcb, regs->eax, regs);
2716 break;
2718 case VMEXIT_HLT:
2719 svm_vmexit_do_hlt(vmcb);
2720 break;
2722 case VMEXIT_INVLPG:
2723 svm_handle_invlpg(0, regs);
2724 break;
2726 case VMEXIT_INVLPGA:
2727 svm_handle_invlpg(1, regs);
2728 break;
2730 case VMEXIT_VMMCALL:
2731 svm_do_vmmcall(v, regs);
2732 break;
2734 case VMEXIT_CR0_READ:
2735 svm_cr_access(v, 0, TYPE_MOV_FROM_CR, regs);
2736 break;
2738 case VMEXIT_CR2_READ:
2739 svm_cr_access(v, 2, TYPE_MOV_FROM_CR, regs);
2740 break;
2742 case VMEXIT_CR3_READ:
2743 svm_cr_access(v, 3, TYPE_MOV_FROM_CR, regs);
2744 break;
2746 case VMEXIT_CR4_READ:
2747 svm_cr_access(v, 4, TYPE_MOV_FROM_CR, regs);
2748 break;
2750 case VMEXIT_CR8_READ:
2751 svm_cr_access(v, 8, TYPE_MOV_FROM_CR, regs);
2752 break;
2754 case VMEXIT_CR0_WRITE:
2755 svm_cr_access(v, 0, TYPE_MOV_TO_CR, regs);
2756 break;
2758 case VMEXIT_CR2_WRITE:
2759 svm_cr_access(v, 2, TYPE_MOV_TO_CR, regs);
2760 break;
2762 case VMEXIT_CR3_WRITE:
2763 svm_cr_access(v, 3, TYPE_MOV_TO_CR, regs);
2764 local_flush_tlb();
2765 break;
2767 case VMEXIT_CR4_WRITE:
2768 svm_cr_access(v, 4, TYPE_MOV_TO_CR, regs);
2769 break;
2771 case VMEXIT_CR8_WRITE:
2772 svm_cr_access(v, 8, TYPE_MOV_TO_CR, regs);
2773 break;
2775 case VMEXIT_DR0_WRITE ... VMEXIT_DR7_WRITE:
2776 svm_dr_access(v, regs);
2777 break;
2779 case VMEXIT_IOIO:
2780 svm_io_instruction(v);
2781 break;
2783 case VMEXIT_MSR:
2784 svm_do_msr_access(v, regs);
2785 break;
2787 case VMEXIT_SHUTDOWN:
2788 gdprintk(XENLOG_ERR, "Guest shutdown exit\n");
2789 goto exit_and_crash;
2791 default:
2792 exit_and_crash:
2793 gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, "
2794 "exitinfo1 = %"PRIx64", exitinfo2 = %"PRIx64"\n",
2795 exit_reason,
2796 (u64)vmcb->exitinfo1, (u64)vmcb->exitinfo2);
2797 domain_crash(v->domain);
2798 break;
2801 #ifdef SVM_EXTRA_DEBUG
2802 if (do_debug)
2804 printk("%s: Done switch on vmexit_code\n", __func__);
2805 svm_dump_regs(__func__, regs);
2808 if (do_debug)
2810 printk("vmexit_handler():- guest_table = 0x%08x, "
2811 "monitor_table = 0x%08x, hw_cr3 = 0x%16x\n",
2812 (int)v->arch.guest_table.pfn,
2813 (int)v->arch.monitor_table.pfn,
2814 (int)v->arch.hvm_vcpu.hw_cr3);
2815 printk("svm_vmexit_handler: Returning\n");
2817 #endif
2820 asmlinkage void svm_load_cr2(void)
2822 struct vcpu *v = current;
2824 local_irq_disable();
2825 asm volatile("mov %0,%%cr2": :"r" (v->arch.hvm_svm.cpu_cr2));
2828 asmlinkage void svm_asid(void)
2830 struct vcpu *v = current;
2831 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
2833 /*
2834 * if need to assign new asid, or if switching cores,
2835 * retire asid for the old core, and assign a new asid to the current core.
2836 */
2837 if ( test_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags ) ||
2838 ( v->arch.hvm_svm.asid_core != v->arch.hvm_svm.launch_core )) {
2839 /* recycle asid */
2840 if ( !asidpool_assign_next(vmcb, 1,
2841 v->arch.hvm_svm.asid_core,
2842 v->arch.hvm_svm.launch_core) )
2844 /* If we get here, we have a major problem */
2845 domain_crash_synchronous();
2848 v->arch.hvm_svm.asid_core = v->arch.hvm_svm.launch_core;
2849 clear_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags );
2853 /*
2854 * Local variables:
2855 * mode: C
2856 * c-set-style: "BSD"
2857 * c-basic-offset: 4
2858 * tab-width: 4
2859 * indent-tabs-mode: nil
2860 * End:
2861 */