ia64/xen-unstable

annotate xen/arch/x86/hvm/vmx/vmx.c @ 10908:a6cb8ba24a91

[HVM] Place all APIC registers into one page in native format.
With this change we can re-use code at include/asm-x86/apicdef.h,
making the code much cleaner. Also it help for future enhancement.

This patch does not change any logic except the change to
CONTROL_REG_ACCESS_NUM, which should be 0xf for CR8 access.

Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com
author kfraser@localhost.localdomain
date Wed Aug 02 10:07:03 2006 +0100 (2006-08-02)
parents 822c39808e62
children 7e7552112954
rev   line source
kaf24@8708 1 /*
kaf24@8708 2 * vmx.c: handling VMX architecture-related VM exits
kaf24@8708 3 * Copyright (c) 2004, Intel Corporation.
kaf24@8708 4 *
kaf24@8708 5 * This program is free software; you can redistribute it and/or modify it
kaf24@8708 6 * under the terms and conditions of the GNU General Public License,
kaf24@8708 7 * version 2, as published by the Free Software Foundation.
kaf24@8708 8 *
kaf24@8708 9 * This program is distributed in the hope it will be useful, but WITHOUT
kaf24@8708 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
kaf24@8708 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
kaf24@8708 12 * more details.
kaf24@8708 13 *
kaf24@8708 14 * You should have received a copy of the GNU General Public License along with
kaf24@8708 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
kaf24@8708 16 * Place - Suite 330, Boston, MA 02111-1307 USA.
kaf24@8708 17 *
kaf24@8708 18 */
kaf24@8708 19
kaf24@8708 20 #include <xen/config.h>
kaf24@8708 21 #include <xen/init.h>
kaf24@8708 22 #include <xen/lib.h>
kaf24@8708 23 #include <xen/trace.h>
kaf24@8708 24 #include <xen/sched.h>
kaf24@8708 25 #include <xen/irq.h>
kaf24@8708 26 #include <xen/softirq.h>
kaf24@8708 27 #include <xen/domain_page.h>
kaf24@8708 28 #include <asm/current.h>
kaf24@8708 29 #include <asm/io.h>
kaf24@8708 30 #include <asm/shadow.h>
kaf24@8708 31 #include <asm/regs.h>
kaf24@8708 32 #include <asm/cpufeature.h>
kaf24@8708 33 #include <asm/processor.h>
kaf24@8708 34 #include <asm/types.h>
kaf24@8708 35 #include <asm/msr.h>
kaf24@8708 36 #include <asm/spinlock.h>
kaf24@8708 37 #include <asm/hvm/hvm.h>
kaf24@8708 38 #include <asm/hvm/support.h>
kaf24@8708 39 #include <asm/hvm/vmx/vmx.h>
kaf24@8708 40 #include <asm/hvm/vmx/vmcs.h>
kaf24@10360 41 #include <asm/hvm/vmx/cpu.h>
kaf24@8708 42 #include <asm/shadow.h>
kaf24@8708 43 #if CONFIG_PAGING_LEVELS >= 3
kaf24@8708 44 #include <asm/shadow_64.h>
kaf24@8708 45 #endif
kaf24@8708 46 #include <public/sched.h>
kaf24@8708 47 #include <public/hvm/ioreq.h>
kaf24@8708 48 #include <asm/hvm/vpic.h>
kaf24@8708 49 #include <asm/hvm/vlapic.h>
kaf24@8708 50
kaf24@10183 51 static unsigned long trace_values[NR_CPUS][5];
kaf24@8708 52 #define TRACE_VMEXIT(index,value) trace_values[smp_processor_id()][index]=value
kaf24@8708 53
kaf24@9333 54 static void vmx_ctxt_switch_from(struct vcpu *v);
kaf24@9333 55 static void vmx_ctxt_switch_to(struct vcpu *v);
kaf24@9333 56
kfraser@10648 57 static int vmx_initialize_guest_resources(struct vcpu *v)
kaf24@8708 58 {
kfraser@10648 59 struct domain *d = v->domain;
kfraser@10648 60 struct vcpu *vc;
kfraser@10648 61 void *io_bitmap_a, *io_bitmap_b;
kfraser@10648 62 int rc;
kfraser@10648 63
kaf24@9333 64 v->arch.schedule_tail = arch_vmx_do_launch;
kaf24@9333 65 v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
kaf24@9333 66 v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
kaf24@8708 67
kfraser@10648 68 if ( v->vcpu_id != 0 )
kfraser@10648 69 return 1;
kaf24@8708 70
kfraser@10648 71 for_each_vcpu ( d, vc )
kfraser@10648 72 {
kfraser@10648 73 /* Initialize monitor page table */
kfraser@10648 74 vc->arch.monitor_table = pagetable_null();
kaf24@8708 75
kfraser@10648 76 memset(&vc->arch.hvm_vmx, 0, sizeof(struct arch_vmx_struct));
kfraser@10648 77
kfraser@10648 78 if ( (rc = vmx_create_vmcs(vc)) != 0 )
kfraser@10648 79 {
kfraser@10648 80 DPRINTK("Failed to create VMCS for vcpu %d: err=%d.\n",
kfraser@10648 81 vc->vcpu_id, rc);
kfraser@10648 82 return 0;
kfraser@10648 83 }
kfraser@10648 84
kfraser@10648 85 spin_lock_init(&vc->arch.hvm_vmx.vmcs_lock);
kfraser@10648 86
kfraser@10648 87 if ( (io_bitmap_a = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL )
kfraser@10648 88 {
kfraser@10648 89 DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n",
kfraser@10648 90 vc->vcpu_id);
kfraser@10648 91 return 0;
kfraser@10648 92 }
kfraser@10648 93
kfraser@10648 94 if ( (io_bitmap_b = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL )
kfraser@10648 95 {
kfraser@10648 96 DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n",
kfraser@10648 97 vc->vcpu_id);
kfraser@10648 98 return 0;
kfraser@10648 99 }
kfraser@10648 100
kfraser@10648 101 memset(io_bitmap_a, 0xff, 0x1000);
kfraser@10648 102 memset(io_bitmap_b, 0xff, 0x1000);
kfraser@10648 103
kfraser@10648 104 /* don't bother debug port access */
kfraser@10648 105 clear_bit(PC_DEBUG_PORT, io_bitmap_a);
kfraser@10648 106
kfraser@10648 107 vc->arch.hvm_vmx.io_bitmap_a = io_bitmap_a;
kfraser@10648 108 vc->arch.hvm_vmx.io_bitmap_b = io_bitmap_b;
kaf24@8708 109 }
kfraser@10648 110
kfraser@10648 111 /*
kfraser@10648 112 * Required to do this once per domain XXX todo: add a seperate function
kfraser@10648 113 * to do these.
kfraser@10648 114 */
kfraser@10648 115 memset(&d->shared_info->evtchn_mask[0], 0xff,
kfraser@10648 116 sizeof(d->shared_info->evtchn_mask));
kfraser@10648 117
kfraser@10648 118 /* Put the domain in shadow mode even though we're going to be using
kfraser@10648 119 * the shared 1:1 page table initially. It shouldn't hurt */
kfraser@10648 120 shadow_mode_enable(
kfraser@10648 121 d, SHM_enable|SHM_refcounts|SHM_translate|SHM_external|SHM_wr_pt_pte);
kfraser@10648 122
kfraser@10648 123 return 1;
kaf24@8708 124 }
kaf24@8708 125
kaf24@9304 126 static void vmx_relinquish_guest_resources(struct domain *d)
kaf24@8708 127 {
kaf24@9304 128 struct vcpu *v;
kaf24@8965 129
kaf24@9304 130 for_each_vcpu ( d, v )
kaf24@9304 131 {
kfraser@10648 132 vmx_destroy_vmcs(v);
kaf24@9457 133 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
kaf24@9457 134 continue;
kaf24@9304 135 free_monitor_pagetable(v);
kaf24@9304 136 kill_timer(&v->arch.hvm_vmx.hlt_timer);
kaf24@9304 137 if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
kaf24@9304 138 {
kaf24@9304 139 kill_timer(&VLAPIC(v)->vlapic_timer);
kfraser@10908 140 unmap_domain_page_global(VLAPIC(v)->regs);
kfraser@10908 141 free_domheap_page(VLAPIC(v)->regs_page);
kaf24@9304 142 xfree(VLAPIC(v));
kaf24@9304 143 }
kaf24@8708 144 }
kaf24@8708 145
kaf24@10182 146 kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer);
kaf24@9304 147
kaf24@9304 148 if ( d->arch.hvm_domain.shared_page_va )
kaf24@9304 149 unmap_domain_page_global(
kaf24@9304 150 (void *)d->arch.hvm_domain.shared_page_va);
kaf24@9304 151
kaf24@9304 152 shadow_direct_map_clean(d);
kaf24@8708 153 }
kaf24@8708 154
kaf24@8708 155 #ifdef __x86_64__
kaf24@9333 156
kaf24@8708 157 static struct vmx_msr_state percpu_msr[NR_CPUS];
kaf24@8708 158
kaf24@8708 159 static u32 msr_data_index[VMX_MSR_COUNT] =
kaf24@8708 160 {
kaf24@8708 161 MSR_LSTAR, MSR_STAR, MSR_CSTAR,
kaf24@8708 162 MSR_SYSCALL_MASK, MSR_EFER,
kaf24@8708 163 };
kaf24@8708 164
kaf24@9333 165 static void vmx_save_segments(struct vcpu *v)
kaf24@8708 166 {
kaf24@8708 167 rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.msr_content.shadow_gs);
kaf24@8708 168 }
kaf24@8708 169
kaf24@8708 170 /*
kaf24@8708 171 * To avoid MSR save/restore at every VM exit/entry time, we restore
kaf24@8708 172 * the x86_64 specific MSRs at domain switch time. Since those MSRs are
kaf24@8708 173 * are not modified once set for generic domains, we don't save them,
kaf24@8708 174 * but simply reset them to the values set at percpu_traps_init().
kaf24@8708 175 */
kaf24@9333 176 static void vmx_load_msrs(void)
kaf24@8708 177 {
kaf24@8708 178 struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
kaf24@8708 179 int i;
kaf24@8708 180
kaf24@8708 181 while ( host_state->flags )
kaf24@8708 182 {
kaf24@8708 183 i = find_first_set_bit(host_state->flags);
kaf24@8708 184 wrmsrl(msr_data_index[i], host_state->msr_items[i]);
kaf24@8708 185 clear_bit(i, &host_state->flags);
kaf24@8708 186 }
kaf24@8708 187 }
kaf24@8708 188
kaf24@8708 189 static void vmx_save_init_msrs(void)
kaf24@8708 190 {
kaf24@8708 191 struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
kaf24@8708 192 int i;
kaf24@8708 193
kaf24@8708 194 for ( i = 0; i < VMX_MSR_COUNT; i++ )
kaf24@8708 195 rdmsrl(msr_data_index[i], host_state->msr_items[i]);
kaf24@8708 196 }
kaf24@8708 197
kaf24@8708 198 #define CASE_READ_MSR(address) \
kaf24@8708 199 case MSR_ ## address: \
kaf24@8708 200 msr_content = msr->msr_items[VMX_INDEX_MSR_ ## address]; \
kaf24@8708 201 break
kaf24@8708 202
kaf24@8708 203 #define CASE_WRITE_MSR(address) \
kaf24@8708 204 case MSR_ ## address: \
kaf24@8708 205 { \
kaf24@8708 206 msr->msr_items[VMX_INDEX_MSR_ ## address] = msr_content; \
kaf24@8708 207 if (!test_bit(VMX_INDEX_MSR_ ## address, &msr->flags)) { \
kaf24@8708 208 set_bit(VMX_INDEX_MSR_ ## address, &msr->flags); \
kaf24@8708 209 } \
kaf24@8708 210 wrmsrl(MSR_ ## address, msr_content); \
kaf24@8708 211 set_bit(VMX_INDEX_MSR_ ## address, &host_state->flags); \
kaf24@8708 212 } \
kaf24@8708 213 break
kaf24@8708 214
kaf24@8708 215 #define IS_CANO_ADDRESS(add) 1
kaf24@8708 216 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
kaf24@8708 217 {
kaf24@9329 218 u64 msr_content = 0;
kaf24@9329 219 struct vcpu *v = current;
kaf24@9329 220 struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
kaf24@9329 221
kaf24@9329 222 switch ( regs->ecx ) {
kaf24@8708 223 case MSR_EFER:
kaf24@9329 224 HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content);
kaf24@8708 225 msr_content = msr->msr_items[VMX_INDEX_MSR_EFER];
kaf24@8708 226
kaf24@9329 227 /* the following code may be not needed */
kaf24@9329 228 if ( test_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state) )
kaf24@9329 229 msr_content |= EFER_LME;
kaf24@9329 230 else
kaf24@9329 231 msr_content &= ~EFER_LME;
kaf24@9329 232
kaf24@9329 233 if ( VMX_LONG_GUEST(v) )
kaf24@9329 234 msr_content |= EFER_LMA;
kaf24@9329 235 else
kaf24@9329 236 msr_content &= ~EFER_LMA;
kaf24@8708 237 break;
kaf24@9329 238
kaf24@8708 239 case MSR_FS_BASE:
kaf24@9329 240 if ( !(VMX_LONG_GUEST(v)) )
kaf24@8708 241 /* XXX should it be GP fault */
kaf24@8708 242 domain_crash_synchronous();
kaf24@9329 243
kaf24@8708 244 __vmread(GUEST_FS_BASE, &msr_content);
kaf24@8708 245 break;
kaf24@9329 246
kaf24@8708 247 case MSR_GS_BASE:
kaf24@9329 248 if ( !(VMX_LONG_GUEST(v)) )
kaf24@8708 249 domain_crash_synchronous();
kaf24@9329 250
kaf24@8708 251 __vmread(GUEST_GS_BASE, &msr_content);
kaf24@8708 252 break;
kaf24@9329 253
kaf24@8708 254 case MSR_SHADOW_GS_BASE:
kaf24@8708 255 msr_content = msr->shadow_gs;
kaf24@8708 256 break;
kaf24@8708 257
kaf24@9329 258 CASE_READ_MSR(STAR);
kaf24@9329 259 CASE_READ_MSR(LSTAR);
kaf24@9329 260 CASE_READ_MSR(CSTAR);
kaf24@9329 261 CASE_READ_MSR(SYSCALL_MASK);
kaf24@9329 262
kaf24@8708 263 default:
kaf24@8708 264 return 0;
kaf24@8708 265 }
kaf24@9329 266
kaf24@9329 267 HVM_DBG_LOG(DBG_LEVEL_2, "msr_content: 0x%"PRIx64, msr_content);
kaf24@9329 268
kaf24@8708 269 regs->eax = msr_content & 0xffffffff;
kaf24@8708 270 regs->edx = msr_content >> 32;
kaf24@9329 271
kaf24@8708 272 return 1;
kaf24@8708 273 }
kaf24@8708 274
kaf24@8708 275 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
kaf24@8708 276 {
kaf24@9329 277 u64 msr_content = regs->eax | ((u64)regs->edx << 32);
kaf24@9329 278 struct vcpu *v = current;
kaf24@9329 279 struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
kaf24@9329 280 struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
kaf24@8708 281
kaf24@9329 282 HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%lx msr_content 0x%"PRIx64"\n",
kaf24@9099 283 (unsigned long)regs->ecx, msr_content);
kaf24@8708 284
kaf24@9329 285 switch ( regs->ecx ) {
kaf24@8708 286 case MSR_EFER:
kaf24@9190 287 /* offending reserved bit will cause #GP */
kaf24@9329 288 if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
kaf24@9329 289 {
kaf24@9329 290 printk("trying to set reserved bit in EFER\n");
kfraser@10822 291 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
kaf24@9329 292 return 0;
kaf24@9329 293 }
kaf24@9190 294
kaf24@9329 295 /* LME: 0 -> 1 */
kaf24@9329 296 if ( msr_content & EFER_LME &&
kaf24@9329 297 !test_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state) )
kaf24@9329 298 {
kaf24@9329 299 if ( vmx_paging_enabled(v) ||
kaf24@9290 300 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
kaf24@9329 301 &v->arch.hvm_vmx.cpu_state) )
kaf24@9329 302 {
kaf24@9329 303 printk("trying to set LME bit when "
kaf24@9329 304 "in paging mode or PAE bit is not set\n");
kfraser@10822 305 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
kaf24@9329 306 return 0;
kaf24@8708 307 }
kaf24@9329 308
kaf24@9329 309 set_bit(VMX_CPU_STATE_LME_ENABLED, &v->arch.hvm_vmx.cpu_state);
kaf24@8708 310 }
kaf24@9190 311
kaf24@9329 312 msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content;
kaf24@8708 313 break;
kaf24@8708 314
kaf24@8708 315 case MSR_FS_BASE:
kaf24@8708 316 case MSR_GS_BASE:
kaf24@9329 317 if ( !(VMX_LONG_GUEST(v)) )
kaf24@8708 318 domain_crash_synchronous();
kaf24@9329 319
kaf24@9329 320 if ( !IS_CANO_ADDRESS(msr_content) )
kaf24@9329 321 {
kaf24@8708 322 HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
kfraser@10822 323 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
kaf24@9329 324 return 0;
kaf24@8708 325 }
kaf24@9329 326
kaf24@9329 327 if ( regs->ecx == MSR_FS_BASE )
kaf24@8708 328 __vmwrite(GUEST_FS_BASE, msr_content);
kaf24@8708 329 else
kaf24@8708 330 __vmwrite(GUEST_GS_BASE, msr_content);
kaf24@9329 331
kaf24@8708 332 break;
kaf24@8708 333
kaf24@8708 334 case MSR_SHADOW_GS_BASE:
kaf24@9329 335 if ( !(VMX_LONG_GUEST(v)) )
kaf24@8708 336 domain_crash_synchronous();
kaf24@9329 337
kaf24@9329 338 v->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
kaf24@8708 339 wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
kaf24@8708 340 break;
kaf24@8708 341
kaf24@9329 342 CASE_WRITE_MSR(STAR);
kaf24@9329 343 CASE_WRITE_MSR(LSTAR);
kaf24@9329 344 CASE_WRITE_MSR(CSTAR);
kaf24@9329 345 CASE_WRITE_MSR(SYSCALL_MASK);
kaf24@9329 346
kaf24@8708 347 default:
kaf24@8708 348 return 0;
kaf24@8708 349 }
kaf24@9329 350
kaf24@8708 351 return 1;
kaf24@8708 352 }
kaf24@8708 353
kaf24@9333 354 static void vmx_restore_msrs(struct vcpu *v)
kaf24@8708 355 {
kaf24@8708 356 int i = 0;
kaf24@8708 357 struct vmx_msr_state *guest_state;
kaf24@8708 358 struct vmx_msr_state *host_state;
kaf24@8708 359 unsigned long guest_flags ;
kaf24@8708 360
kaf24@8708 361 guest_state = &v->arch.hvm_vmx.msr_content;;
kaf24@8708 362 host_state = &percpu_msr[smp_processor_id()];
kaf24@8708 363
kaf24@8708 364 wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs);
kaf24@8708 365 guest_flags = guest_state->flags;
kaf24@8708 366 if (!guest_flags)
kaf24@8708 367 return;
kaf24@8708 368
kaf24@8708 369 while (guest_flags){
kaf24@8708 370 i = find_first_set_bit(guest_flags);
kaf24@8708 371
kaf24@8708 372 HVM_DBG_LOG(DBG_LEVEL_2,
kaf24@8708 373 "restore guest's index %d msr %lx with %lx\n",
kaf24@9333 374 i, (unsigned long)msr_data_index[i],
kaf24@9333 375 (unsigned long)guest_state->msr_items[i]);
kaf24@8708 376 set_bit(i, &host_state->flags);
kaf24@8708 377 wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
kaf24@8708 378 clear_bit(i, &guest_flags);
kaf24@8708 379 }
kaf24@8708 380 }
kaf24@10525 381
kaf24@8708 382 #else /* __i386__ */
kaf24@8708 383
kaf24@9333 384 #define vmx_save_segments(v) ((void)0)
kaf24@9333 385 #define vmx_load_msrs() ((void)0)
kaf24@9333 386 #define vmx_restore_msrs(v) ((void)0)
kaf24@9333 387 #define vmx_save_init_msrs() ((void)0)
kaf24@9333 388
kaf24@9333 389 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
kaf24@9333 390 {
kaf24@8708 391 return 0;
kaf24@8708 392 }
kaf24@9333 393
kaf24@9333 394 static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
kaf24@9333 395 {
kaf24@8708 396 return 0;
kaf24@8708 397 }
kaf24@9333 398
kaf24@9333 399 #endif /* __i386__ */
kaf24@9333 400
kaf24@10525 401 #define loaddebug(_v,_reg) \
kaf24@10525 402 __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
kaf24@10526 403 #define savedebug(_v,_reg) \
kaf24@10526 404 __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
kaf24@10526 405
kaf24@10526 406 static inline void vmx_save_dr(struct vcpu *v)
kaf24@10526 407 {
kaf24@10526 408 if ( v->arch.hvm_vcpu.flag_dr_dirty )
kaf24@10526 409 {
kaf24@10526 410 savedebug(&v->arch.guest_context, 0);
kaf24@10526 411 savedebug(&v->arch.guest_context, 1);
kaf24@10526 412 savedebug(&v->arch.guest_context, 2);
kaf24@10526 413 savedebug(&v->arch.guest_context, 3);
kaf24@10526 414 savedebug(&v->arch.guest_context, 6);
kaf24@10526 415
kaf24@10526 416 v->arch.hvm_vcpu.flag_dr_dirty = 0;
kaf24@10526 417
kaf24@10526 418 v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
kaf24@10526 419 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
kaf24@10526 420 v->arch.hvm_vcpu.u.vmx.exec_control);
kaf24@10526 421 }
kaf24@10526 422 }
kaf24@10525 423
kaf24@10525 424 static inline void __restore_debug_registers(struct vcpu *v)
kaf24@10525 425 {
kaf24@10525 426 loaddebug(&v->arch.guest_context, 0);
kaf24@10525 427 loaddebug(&v->arch.guest_context, 1);
kaf24@10525 428 loaddebug(&v->arch.guest_context, 2);
kaf24@10525 429 loaddebug(&v->arch.guest_context, 3);
kaf24@10525 430 /* No 4 and 5 */
kaf24@10525 431 loaddebug(&v->arch.guest_context, 6);
kaf24@10525 432 /* DR7 is loaded from the vmcs. */
kaf24@10525 433 }
kaf24@10525 434
kaf24@10525 435 /*
kaf24@10525 436 * DR7 is saved and restored on every vmexit. Other debug registers only
kaf24@10525 437 * need to be restored if their value is going to affect execution -- i.e.,
kaf24@10525 438 * if one of the breakpoints is enabled. So mask out all bits that don't
kaf24@10525 439 * enable some breakpoint functionality.
kaf24@10525 440 *
kaf24@10525 441 * This is in part necessary because bit 10 of DR7 is hardwired to 1, so a
kaf24@10525 442 * simple if( guest_dr7 ) will always return true. As long as we're masking,
kaf24@10525 443 * we might as well do it right.
kaf24@10525 444 */
kaf24@10525 445 #define DR7_ACTIVE_MASK 0xff
kaf24@10525 446
kaf24@10525 447 static inline void vmx_restore_dr(struct vcpu *v)
kaf24@10525 448 {
kaf24@10525 449 unsigned long guest_dr7;
kaf24@10525 450
kaf24@10525 451 __vmread(GUEST_DR7, &guest_dr7);
kaf24@10525 452
kaf24@10525 453 /* Assumes guest does not have DR access at time of context switch. */
kaf24@10525 454 if ( unlikely(guest_dr7 & DR7_ACTIVE_MASK) )
kaf24@10525 455 __restore_debug_registers(v);
kaf24@10525 456 }
kaf24@10525 457
kaf24@9334 458 static void vmx_freeze_time(struct vcpu *v)
kaf24@9334 459 {
kaf24@10182 460 struct periodic_time *pt=&v->domain->arch.hvm_domain.pl_time.periodic_tm;
kaf24@9334 461
kaf24@10182 462 if ( pt->enabled && pt->first_injected && !v->arch.hvm_vcpu.guest_time ) {
kaf24@10182 463 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
kaf24@10182 464 stop_timer(&(pt->timer));
kaf24@9432 465 }
kaf24@9334 466 }
kaf24@9334 467
kaf24@9333 468 static void vmx_ctxt_switch_from(struct vcpu *v)
kaf24@9333 469 {
kaf24@9334 470 vmx_freeze_time(v);
kaf24@9333 471 vmx_save_segments(v);
kaf24@9333 472 vmx_load_msrs();
kaf24@10526 473 vmx_save_dr(v);
kaf24@9333 474 }
kaf24@9333 475
kaf24@9333 476 static void vmx_ctxt_switch_to(struct vcpu *v)
kaf24@9333 477 {
kaf24@9333 478 vmx_restore_msrs(v);
kaf24@10525 479 vmx_restore_dr(v);
kaf24@9333 480 }
kaf24@8708 481
kfraser@10902 482 static void stop_vmx(void)
kaf24@8708 483 {
kaf24@8708 484 if (read_cr4() & X86_CR4_VMXE)
kaf24@8708 485 __vmxoff();
kaf24@8708 486 }
kaf24@8708 487
kaf24@8806 488 void vmx_migrate_timers(struct vcpu *v)
kaf24@8806 489 {
kaf24@10182 490 struct periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
kaf24@8806 491
kaf24@10182 492 if ( pt->enabled ) {
kaf24@10182 493 migrate_timer(&pt->timer, v->processor);
kaf24@10182 494 migrate_timer(&v->arch.hvm_vmx.hlt_timer, v->processor);
kaf24@10182 495 }
kaf24@8806 496 if ( hvm_apic_support(v->domain) && VLAPIC(v))
kaf24@8806 497 migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor);
kaf24@8806 498 }
kaf24@8806 499
kaf24@9436 500 static void vmx_store_cpu_guest_regs(
kaf24@9436 501 struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
kaf24@9436 502 {
kaf24@10356 503 vmx_vmcs_enter(v);
kaf24@9436 504
kaf24@9436 505 if ( regs != NULL )
kaf24@9436 506 {
kaf24@9436 507 __vmread(GUEST_RFLAGS, &regs->eflags);
kaf24@9436 508 __vmread(GUEST_SS_SELECTOR, &regs->ss);
kaf24@9436 509 __vmread(GUEST_CS_SELECTOR, &regs->cs);
kaf24@9436 510 __vmread(GUEST_DS_SELECTOR, &regs->ds);
kaf24@9436 511 __vmread(GUEST_ES_SELECTOR, &regs->es);
kaf24@9436 512 __vmread(GUEST_GS_SELECTOR, &regs->gs);
kaf24@9436 513 __vmread(GUEST_FS_SELECTOR, &regs->fs);
kaf24@9436 514 __vmread(GUEST_RIP, &regs->eip);
kaf24@9436 515 __vmread(GUEST_RSP, &regs->esp);
kaf24@9436 516 }
kaf24@9436 517
kaf24@9436 518 if ( crs != NULL )
kaf24@9436 519 {
kaf24@9436 520 __vmread(CR0_READ_SHADOW, &crs[0]);
kaf24@9436 521 __vmread(GUEST_CR3, &crs[3]);
kaf24@9436 522 __vmread(CR4_READ_SHADOW, &crs[4]);
kaf24@9436 523 }
kaf24@9436 524
kaf24@10356 525 vmx_vmcs_exit(v);
kaf24@8708 526 }
kaf24@8708 527
kaf24@9922 528 /*
kaf24@9922 529 * The VMX spec (section 4.3.1.2, Checks on Guest Segment
kaf24@9922 530 * Registers) says that virtual-8086 mode guests' segment
kaf24@9922 531 * base-address fields in the VMCS must be equal to their
kaf24@9922 532 * corresponding segment selector field shifted right by
kaf24@9922 533 * four bits upon vmentry.
kaf24@9922 534 *
kaf24@9922 535 * This function (called only for VM86-mode guests) fixes
kaf24@9922 536 * the bases to be consistent with the selectors in regs
kaf24@9922 537 * if they're not already. Without this, we can fail the
kaf24@9922 538 * vmentry check mentioned above.
kaf24@9922 539 */
kaf24@9922 540 static void fixup_vm86_seg_bases(struct cpu_user_regs *regs)
kaf24@9922 541 {
kaf24@9922 542 int err = 0;
kaf24@9922 543 unsigned long base;
kaf24@9922 544
kaf24@9922 545 err |= __vmread(GUEST_ES_BASE, &base);
kaf24@9922 546 if (regs->es << 4 != base)
kaf24@9922 547 err |= __vmwrite(GUEST_ES_BASE, regs->es << 4);
kaf24@9922 548 err |= __vmread(GUEST_CS_BASE, &base);
kaf24@9922 549 if (regs->cs << 4 != base)
kaf24@9922 550 err |= __vmwrite(GUEST_CS_BASE, regs->cs << 4);
kaf24@9922 551 err |= __vmread(GUEST_SS_BASE, &base);
kaf24@9922 552 if (regs->ss << 4 != base)
kaf24@9922 553 err |= __vmwrite(GUEST_SS_BASE, regs->ss << 4);
kaf24@9922 554 err |= __vmread(GUEST_DS_BASE, &base);
kaf24@9922 555 if (regs->ds << 4 != base)
kaf24@9922 556 err |= __vmwrite(GUEST_DS_BASE, regs->ds << 4);
kaf24@9922 557 err |= __vmread(GUEST_FS_BASE, &base);
kaf24@9922 558 if (regs->fs << 4 != base)
kaf24@9922 559 err |= __vmwrite(GUEST_FS_BASE, regs->fs << 4);
kaf24@9922 560 err |= __vmread(GUEST_GS_BASE, &base);
kaf24@9922 561 if (regs->gs << 4 != base)
kaf24@9922 562 err |= __vmwrite(GUEST_GS_BASE, regs->gs << 4);
kaf24@9922 563
kaf24@9922 564 BUG_ON(err);
kaf24@9922 565 }
kaf24@9922 566
kfraser@10902 567 static void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
kaf24@8708 568 {
kaf24@10356 569 vmx_vmcs_enter(v);
kaf24@9534 570
kaf24@8708 571 __vmwrite(GUEST_SS_SELECTOR, regs->ss);
kaf24@9534 572 __vmwrite(GUEST_DS_SELECTOR, regs->ds);
kaf24@9534 573 __vmwrite(GUEST_ES_SELECTOR, regs->es);
kaf24@9534 574 __vmwrite(GUEST_GS_SELECTOR, regs->gs);
kaf24@9534 575 __vmwrite(GUEST_FS_SELECTOR, regs->fs);
kaf24@9534 576
kaf24@8708 577 __vmwrite(GUEST_RSP, regs->esp);
kaf24@8708 578
kaf24@8708 579 __vmwrite(GUEST_RFLAGS, regs->eflags);
kaf24@8708 580 if (regs->eflags & EF_TF)
kaf24@8708 581 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
kaf24@8708 582 else
kaf24@8708 583 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
kaf24@9927 584 if (regs->eflags & EF_VM)
kaf24@9922 585 fixup_vm86_seg_bases(regs);
kaf24@8708 586
kaf24@8708 587 __vmwrite(GUEST_CS_SELECTOR, regs->cs);
kaf24@8708 588 __vmwrite(GUEST_RIP, regs->eip);
kaf24@8708 589
kaf24@10356 590 vmx_vmcs_exit(v);
kaf24@8708 591 }
kaf24@8708 592
kfraser@10902 593 static int vmx_realmode(struct vcpu *v)
kaf24@8708 594 {
kaf24@8708 595 unsigned long rflags;
kaf24@8708 596
kaf24@8708 597 __vmread(GUEST_RFLAGS, &rflags);
kaf24@8708 598 return rflags & X86_EFLAGS_VM;
kaf24@8708 599 }
kaf24@8708 600
kfraser@10902 601 static int vmx_instruction_length(struct vcpu *v)
kaf24@8708 602 {
kaf24@8708 603 unsigned long inst_len;
kaf24@8708 604
kaf24@8708 605 if (__vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len))
kaf24@8708 606 return 0;
kaf24@8708 607 return inst_len;
kaf24@8708 608 }
kaf24@8708 609
kfraser@10902 610 static unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
kaf24@8898 611 {
kaf24@8898 612 switch ( num )
kaf24@8898 613 {
kaf24@8898 614 case 0:
kaf24@8898 615 return v->arch.hvm_vmx.cpu_cr0;
kaf24@8898 616 case 2:
kaf24@8898 617 return v->arch.hvm_vmx.cpu_cr2;
kaf24@8898 618 case 3:
kaf24@8898 619 return v->arch.hvm_vmx.cpu_cr3;
kaf24@8898 620 default:
kaf24@8898 621 BUG();
kaf24@8898 622 }
kaf24@8898 623 return 0; /* dummy */
kaf24@8898 624 }
kaf24@8898 625
kaf24@9016 626 /* SMP VMX guest support */
kfraser@10902 627 static void vmx_init_ap_context(struct vcpu_guest_context *ctxt,
kaf24@9016 628 int vcpuid, int trampoline_vector)
kaf24@9016 629 {
kaf24@9016 630 int i;
kaf24@9016 631
kaf24@9016 632 memset(ctxt, 0, sizeof(*ctxt));
kaf24@9016 633
kaf24@9016 634 /*
kaf24@9016 635 * Initial register values:
kaf24@9016 636 */
kaf24@9016 637 ctxt->user_regs.eip = VMXASSIST_BASE;
kaf24@9016 638 ctxt->user_regs.edx = vcpuid;
kaf24@9016 639 ctxt->user_regs.ebx = trampoline_vector;
kaf24@9016 640
kaf24@9016 641 ctxt->flags = VGCF_HVM_GUEST;
kaf24@9016 642
kaf24@9016 643 /* Virtual IDT is empty at start-of-day. */
kaf24@9016 644 for ( i = 0; i < 256; i++ )
kaf24@9016 645 {
kaf24@9016 646 ctxt->trap_ctxt[i].vector = i;
kaf24@9016 647 ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
kaf24@9016 648 }
kaf24@9016 649
kaf24@9016 650 /* No callback handlers. */
kaf24@9016 651 #if defined(__i386__)
kaf24@9016 652 ctxt->event_callback_cs = FLAT_KERNEL_CS;
kaf24@9016 653 ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
kaf24@9016 654 #endif
kaf24@9016 655 }
kaf24@9016 656
kaf24@8708 657 void do_nmi(struct cpu_user_regs *);
kaf24@8708 658
kaf24@9224 659 static int check_vmx_controls(u32 ctrls, u32 msr)
kaf24@8708 660 {
kaf24@8708 661 u32 vmx_msr_low, vmx_msr_high;
kaf24@8708 662
kaf24@8708 663 rdmsr(msr, vmx_msr_low, vmx_msr_high);
kaf24@9224 664 if ( (ctrls < vmx_msr_low) || (ctrls > vmx_msr_high) )
kaf24@9224 665 {
kaf24@8708 666 printk("Insufficient VMX capability 0x%x, "
kaf24@8708 667 "msr=0x%x,low=0x%8x,high=0x%x\n",
kaf24@8708 668 ctrls, msr, vmx_msr_low, vmx_msr_high);
kaf24@8708 669 return 0;
kaf24@8708 670 }
kaf24@8708 671 return 1;
kaf24@8708 672 }
kaf24@8708 673
kfraser@10902 674 /* Setup HVM interfaces */
kfraser@10902 675 static void vmx_setup_hvm_funcs(void)
kfraser@10902 676 {
kfraser@10902 677 if ( hvm_enabled )
kfraser@10902 678 return;
kfraser@10902 679
kfraser@10902 680 hvm_funcs.disable = stop_vmx;
kfraser@10902 681
kfraser@10902 682 hvm_funcs.initialize_guest_resources = vmx_initialize_guest_resources;
kfraser@10902 683 hvm_funcs.relinquish_guest_resources = vmx_relinquish_guest_resources;
kfraser@10902 684
kfraser@10902 685 hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
kfraser@10902 686 hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
kfraser@10902 687
kfraser@10902 688 hvm_funcs.realmode = vmx_realmode;
kfraser@10902 689 hvm_funcs.paging_enabled = vmx_paging_enabled;
kfraser@10902 690 hvm_funcs.instruction_length = vmx_instruction_length;
kfraser@10902 691 hvm_funcs.get_guest_ctrl_reg = vmx_get_ctrl_reg;
kfraser@10902 692
kfraser@10902 693 hvm_funcs.init_ap_context = vmx_init_ap_context;
kfraser@10902 694 }
kfraser@10902 695
kfraser@10892 696 static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
kfraser@10892 697 {
kfraser@10892 698 char *p;
kfraser@10892 699 int i;
kfraser@10892 700
kfraser@10892 701 memset(hypercall_page, 0, PAGE_SIZE);
kfraser@10892 702
kfraser@10892 703 for ( i = 0; i < (PAGE_SIZE / 32); i++ )
kfraser@10892 704 {
kfraser@10892 705 p = (char *)(hypercall_page + (i * 32));
kfraser@10892 706 *(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */
kfraser@10892 707 *(u32 *)(p + 1) = i;
kfraser@10892 708 *(u8 *)(p + 5) = 0x0f; /* vmcall */
kfraser@10892 709 *(u8 *)(p + 6) = 0x01;
kfraser@10892 710 *(u8 *)(p + 7) = 0xc1;
kfraser@10892 711 *(u8 *)(p + 8) = 0xc3; /* ret */
kfraser@10892 712 }
kfraser@10892 713
kfraser@10892 714 /* Don't support HYPERVISOR_iret at the moment */
kfraser@10892 715 *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
kfraser@10892 716 }
kfraser@10892 717
kaf24@8708 718 int start_vmx(void)
kaf24@8708 719 {
kfraser@10648 720 u32 eax, edx;
kaf24@8708 721 struct vmcs_struct *vmcs;
kaf24@8708 722
kaf24@8708 723 /*
kaf24@8708 724 * Xen does not fill x86_capability words except 0.
kaf24@8708 725 */
kfraser@10648 726 boot_cpu_data.x86_capability[4] = cpuid_ecx(1);
kaf24@8708 727
kaf24@8708 728 if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
kaf24@8708 729 return 0;
kaf24@8708 730
kaf24@8708 731 rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
kaf24@8708 732
kfraser@10648 733 if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK )
kfraser@10648 734 {
kfraser@10648 735 if ( (eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0 )
kfraser@10648 736 {
kaf24@8708 737 printk("VMX disabled by Feature Control MSR.\n");
kaf24@8708 738 return 0;
kaf24@8708 739 }
kaf24@8708 740 }
kfraser@10648 741 else
kfraser@10648 742 {
kaf24@8708 743 wrmsr(IA32_FEATURE_CONTROL_MSR,
kaf24@8708 744 IA32_FEATURE_CONTROL_MSR_LOCK |
kaf24@8708 745 IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
kaf24@8708 746 }
kaf24@8708 747
kfraser@10648 748 if ( !check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
kfraser@10648 749 MSR_IA32_VMX_PINBASED_CTLS_MSR) )
kaf24@8708 750 return 0;
kfraser@10648 751 if ( !check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
kfraser@10648 752 MSR_IA32_VMX_PROCBASED_CTLS_MSR) )
kaf24@8708 753 return 0;
kfraser@10648 754 if ( !check_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
kfraser@10648 755 MSR_IA32_VMX_EXIT_CTLS_MSR) )
kaf24@8708 756 return 0;
kfraser@10648 757 if ( !check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
kfraser@10648 758 MSR_IA32_VMX_ENTRY_CTLS_MSR) )
kaf24@8708 759 return 0;
kaf24@8708 760
kfraser@10648 761 set_in_cr4(X86_CR4_VMXE);
kaf24@8708 762
kfraser@10648 763 vmx_init_vmcs_config();
kfraser@10648 764
kfraser@10648 765 if ( (vmcs = vmx_alloc_host_vmcs()) == NULL )
kfraser@10648 766 {
kfraser@10648 767 printk("Failed to allocate host VMCS\n");
kaf24@8708 768 return 0;
kaf24@8708 769 }
kaf24@8708 770
kfraser@10648 771 if ( __vmxon(virt_to_maddr(vmcs)) )
kfraser@10648 772 {
kaf24@10356 773 printk("VMXON failed\n");
kfraser@10648 774 vmx_free_host_vmcs(vmcs);
kaf24@10356 775 return 0;
kaf24@8708 776 }
kaf24@8708 777
kaf24@10356 778 printk("VMXON is done\n");
kaf24@10356 779
kaf24@8708 780 vmx_save_init_msrs();
kaf24@8708 781
kfraser@10902 782 vmx_setup_hvm_funcs();
kaf24@9016 783
kfraser@10892 784 hvm_funcs.init_hypercall_page = vmx_init_hypercall_page;
kfraser@10892 785
kaf24@8708 786 hvm_enabled = 1;
kaf24@8708 787
kaf24@8708 788 return 1;
kaf24@8708 789 }
kaf24@8708 790
kaf24@8708 791 /*
kaf24@8708 792 * Not all cases receive valid value in the VM-exit instruction length field.
kaf24@8708 793 */
kaf24@8708 794 #define __get_instruction_length(len) \
kaf24@8708 795 __vmread(VM_EXIT_INSTRUCTION_LEN, &(len)); \
kaf24@8708 796 if ((len) < 1 || (len) > 15) \
kaf24@8708 797 __hvm_bug(&regs);
kaf24@8708 798
kaf24@8708 799 static void inline __update_guest_eip(unsigned long inst_len)
kaf24@8708 800 {
kaf24@8708 801 unsigned long current_eip;
kaf24@8708 802
kaf24@8708 803 __vmread(GUEST_RIP, &current_eip);
kaf24@8708 804 __vmwrite(GUEST_RIP, current_eip + inst_len);
kaf24@9219 805 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
kaf24@8708 806 }
kaf24@8708 807
kaf24@8708 808
kaf24@8708 809 static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
kaf24@8708 810 {
kaf24@8708 811 unsigned long gpa; /* FIXME: PAE */
kaf24@8708 812 int result;
kaf24@8708 813
kaf24@8708 814 #if 0 /* keep for debugging */
kaf24@8708 815 {
kaf24@8708 816 unsigned long eip;
kaf24@8708 817
kaf24@8708 818 __vmread(GUEST_RIP, &eip);
kaf24@8708 819 HVM_DBG_LOG(DBG_LEVEL_VMMU,
kaf24@8708 820 "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
kaf24@8708 821 va, eip, (unsigned long)regs->error_code);
kaf24@8708 822 }
kaf24@8708 823 #endif
kaf24@8708 824
kaf24@8752 825 if ( !vmx_paging_enabled(current) )
kaf24@8752 826 {
kaf24@8752 827 /* construct 1-to-1 direct mapping */
kaf24@8752 828 if ( shadow_direct_map_fault(va, regs) )
kaf24@8752 829 return 1;
kaf24@8752 830
kaf24@8708 831 handle_mmio(va, va);
kaf24@8708 832 TRACE_VMEXIT (2,2);
kaf24@8708 833 return 1;
kaf24@8708 834 }
kaf24@8708 835 gpa = gva_to_gpa(va);
kaf24@8708 836
kaf24@8708 837 /* Use 1:1 page table to identify MMIO address space */
kaf24@8708 838 if ( mmio_space(gpa) ){
kaf24@8708 839 struct vcpu *v = current;
kaf24@8708 840 /* No support for APIC */
kaf24@8708 841 if (!hvm_apic_support(v->domain) && gpa >= 0xFEC00000) {
kaf24@8708 842 u32 inst_len;
kaf24@8708 843 __vmread(VM_EXIT_INSTRUCTION_LEN, &(inst_len));
kaf24@8708 844 __update_guest_eip(inst_len);
kaf24@8708 845 return 1;
kaf24@8708 846 }
kaf24@8708 847 TRACE_VMEXIT (2,2);
kaf24@10767 848 /* in the case of MMIO, we are more interested in gpa than in va */
kaf24@10767 849 TRACE_VMEXIT (4,gpa);
kaf24@8708 850 handle_mmio(va, gpa);
kaf24@8708 851 return 1;
kaf24@8708 852 }
kaf24@8708 853
kaf24@8708 854 result = shadow_fault(va, regs);
kaf24@8708 855 TRACE_VMEXIT (2,result);
kaf24@8708 856 #if 0
kaf24@8708 857 if ( !result )
kaf24@8708 858 {
kaf24@8708 859 __vmread(GUEST_RIP, &eip);
kaf24@8708 860 printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip);
kaf24@8708 861 }
kaf24@8708 862 #endif
kaf24@8708 863
kaf24@8708 864 return result;
kaf24@8708 865 }
kaf24@8708 866
kaf24@8708 867 static void vmx_do_no_device_fault(void)
kaf24@8708 868 {
kaf24@8708 869 unsigned long cr0;
kaf24@8708 870 struct vcpu *v = current;
kaf24@8708 871
kaf24@8708 872 setup_fpu(current);
kaf24@8852 873 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
kaf24@8849 874
kaf24@8849 875 /* Disable TS in guest CR0 unless the guest wants the exception too. */
kaf24@8708 876 __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
kaf24@8849 877 if ( !(cr0 & X86_CR0_TS) )
kaf24@8849 878 {
kaf24@8708 879 __vmread_vcpu(v, GUEST_CR0, &cr0);
kaf24@8708 880 cr0 &= ~X86_CR0_TS;
kaf24@8708 881 __vmwrite(GUEST_CR0, cr0);
kaf24@8708 882 }
kaf24@8708 883 }
kaf24@8708 884
kaf24@10360 885 #define bitmaskof(idx) (1U << ((idx)&31))
kaf24@9311 886 static void vmx_vmexit_do_cpuid(struct cpu_user_regs *regs)
kaf24@8708 887 {
kaf24@9311 888 unsigned int input = (unsigned int)regs->eax;
kaf24@9311 889 unsigned int count = (unsigned int)regs->ecx;
kaf24@8708 890 unsigned int eax, ebx, ecx, edx;
kaf24@8708 891 unsigned long eip;
kaf24@8708 892 struct vcpu *v = current;
kaf24@8708 893
kaf24@8708 894 __vmread(GUEST_RIP, &eip);
kaf24@8708 895
kaf24@9311 896 HVM_DBG_LOG(DBG_LEVEL_3, "(eax) 0x%08lx, (ebx) 0x%08lx, "
kaf24@9311 897 "(ecx) 0x%08lx, (edx) 0x%08lx, (esi) 0x%08lx, (edi) 0x%08lx",
kaf24@8708 898 (unsigned long)regs->eax, (unsigned long)regs->ebx,
kaf24@8708 899 (unsigned long)regs->ecx, (unsigned long)regs->edx,
kaf24@8708 900 (unsigned long)regs->esi, (unsigned long)regs->edi);
kaf24@8708 901
kaf24@10360 902 if ( input == CPUID_LEAF_0x4 )
kaf24@10360 903 {
kaf24@9311 904 cpuid_count(input, count, &eax, &ebx, &ecx, &edx);
kaf24@10360 905 eax &= NUM_CORES_RESET_MASK;
kaf24@10360 906 }
kfraser@10661 907 else if ( !cpuid_hypervisor_leaves(input, &eax, &ebx, &ecx, &edx) )
kaf24@10360 908 {
kaf24@9311 909 cpuid(input, &eax, &ebx, &ecx, &edx);
kaf24@8708 910
kaf24@10360 911 if ( input == CPUID_LEAF_0x1 )
kaf24@9803 912 {
kaf24@10360 913 /* mask off reserved bits */
kaf24@10360 914 ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED;
kaf24@9803 915
kaf24@10360 916 if ( !hvm_apic_support(v->domain) ||
kaf24@10360 917 !vlapic_global_enabled((VLAPIC(v))) )
kaf24@10360 918 {
kaf24@10360 919 /* Since the apic is disabled, avoid any
kaf24@10360 920 confusion about SMP cpus being available */
kaf24@8708 921
kaf24@10360 922 clear_bit(X86_FEATURE_APIC, &edx);
kaf24@10360 923 }
kaf24@10360 924
kaf24@8898 925 #if CONFIG_PAGING_LEVELS < 3
kaf24@10360 926 edx &= ~(bitmaskof(X86_FEATURE_PAE) |
kaf24@10360 927 bitmaskof(X86_FEATURE_PSE) |
kaf24@10360 928 bitmaskof(X86_FEATURE_PSE36));
kaf24@8898 929 #else
kaf24@10360 930 if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
kaf24@10360 931 {
kfraser@10646 932 if ( v->domain->arch.hvm_domain.pae_enabled )
kfraser@10646 933 clear_bit(X86_FEATURE_PSE36, &edx);
kfraser@10646 934 else
kfraser@10646 935 {
kaf24@10360 936 clear_bit(X86_FEATURE_PAE, &edx);
kfraser@10646 937 clear_bit(X86_FEATURE_PSE, &edx);
kfraser@10646 938 clear_bit(X86_FEATURE_PSE36, &edx);
kfraser@10646 939 }
kaf24@10360 940 }
kaf24@10360 941 #endif
kaf24@10360 942
kaf24@10360 943 ebx &= NUM_THREADS_RESET_MASK;
kaf24@10360 944
kaf24@10360 945 /* Unsupportable for virtualised CPUs. */
kaf24@10360 946 ecx &= ~(bitmaskof(X86_FEATURE_VMXE) |
kaf24@10360 947 bitmaskof(X86_FEATURE_EST) |
kaf24@10360 948 bitmaskof(X86_FEATURE_TM2) |
kaf24@10360 949 bitmaskof(X86_FEATURE_CID) |
kaf24@10360 950 bitmaskof(X86_FEATURE_MWAIT) );
kaf24@10360 951
kaf24@10360 952 edx &= ~( bitmaskof(X86_FEATURE_HT) |
kaf24@10360 953 bitmaskof(X86_FEATURE_MCA) |
kaf24@10360 954 bitmaskof(X86_FEATURE_MCE) |
kaf24@10360 955 bitmaskof(X86_FEATURE_ACPI) |
kaf24@10360 956 bitmaskof(X86_FEATURE_ACC) );
kaf24@10360 957 }
kaf24@10360 958 else if ( ( input == CPUID_LEAF_0x6 )
kaf24@10360 959 || ( input == CPUID_LEAF_0x9 )
kaf24@10360 960 || ( input == CPUID_LEAF_0xA ))
kaf24@8708 961 {
kaf24@10360 962 eax = ebx = ecx = edx = 0x0;
kaf24@10360 963 }
kaf24@10360 964 #ifdef __i386__
kaf24@10360 965 else if ( input == CPUID_LEAF_0x80000001 )
kaf24@10360 966 {
kaf24@10360 967 clear_bit(X86_FEATURE_LAHF_LM & 31, &ecx);
kaf24@10360 968
kaf24@10360 969 clear_bit(X86_FEATURE_LM & 31, &edx);
kaf24@10360 970 clear_bit(X86_FEATURE_SYSCALL & 31, &edx);
kaf24@8708 971 }
kaf24@8898 972 #endif
kaf24@8708 973 }
kaf24@8708 974
kaf24@8708 975 regs->eax = (unsigned long) eax;
kaf24@8708 976 regs->ebx = (unsigned long) ebx;
kaf24@8708 977 regs->ecx = (unsigned long) ecx;
kaf24@8708 978 regs->edx = (unsigned long) edx;
kaf24@8708 979
kaf24@9311 980 HVM_DBG_LOG(DBG_LEVEL_3, "eip@%lx, input: 0x%lx, "
kaf24@9311 981 "output: eax = 0x%08lx, ebx = 0x%08lx, "
kaf24@9311 982 "ecx = 0x%08lx, edx = 0x%08lx",
kaf24@9311 983 (unsigned long)eip, (unsigned long)input,
kaf24@9311 984 (unsigned long)eax, (unsigned long)ebx,
kaf24@9311 985 (unsigned long)ecx, (unsigned long)edx);
kaf24@8708 986 }
kaf24@8708 987
kaf24@8708 988 #define CASE_GET_REG_P(REG, reg) \
kaf24@8708 989 case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
kaf24@8708 990
kaf24@9950 991 #ifdef __i386__
kaf24@9950 992 #define CASE_EXTEND_GET_REG_P
kaf24@9950 993 #else
kaf24@9950 994 #define CASE_EXTEND_GET_REG_P \
kaf24@9950 995 CASE_GET_REG_P(R8, r8); \
kaf24@9950 996 CASE_GET_REG_P(R9, r9); \
kaf24@9950 997 CASE_GET_REG_P(R10, r10); \
kaf24@9950 998 CASE_GET_REG_P(R11, r11); \
kaf24@9950 999 CASE_GET_REG_P(R12, r12); \
kaf24@9950 1000 CASE_GET_REG_P(R13, r13); \
kaf24@9950 1001 CASE_GET_REG_P(R14, r14); \
kaf24@9950 1002 CASE_GET_REG_P(R15, r15)
kaf24@9950 1003 #endif
kaf24@9950 1004
kaf24@10527 1005 static void vmx_dr_access(unsigned long exit_qualification,
kaf24@10527 1006 struct cpu_user_regs *regs)
kaf24@8708 1007 {
kaf24@8708 1008 struct vcpu *v = current;
kaf24@8708 1009
kaf24@10527 1010 v->arch.hvm_vcpu.flag_dr_dirty = 1;
kaf24@8708 1011
kaf24@10527 1012 /* We could probably be smarter about this */
kaf24@10527 1013 __restore_debug_registers(v);
kaf24@10527 1014
kaf24@10527 1015 /* Allow guest direct access to DR registers */
kaf24@10527 1016 v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
kaf24@10527 1017 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
kaf24@10527 1018 v->arch.hvm_vcpu.u.vmx.exec_control);
kaf24@8708 1019 }
kaf24@8708 1020
kaf24@8708 1021 /*
kaf24@8708 1022 * Invalidate the TLB for va. Invalidate the shadow page corresponding
kaf24@8708 1023 * the address va.
kaf24@8708 1024 */
kaf24@8708 1025 static void vmx_vmexit_do_invlpg(unsigned long va)
kaf24@8708 1026 {
kaf24@8708 1027 unsigned long eip;
kaf24@8708 1028 struct vcpu *v = current;
kaf24@8708 1029
kaf24@8708 1030 __vmread(GUEST_RIP, &eip);
kaf24@8708 1031
kaf24@8708 1032 HVM_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg: eip=%lx, va=%lx",
kaf24@8708 1033 eip, va);
kaf24@8708 1034
kaf24@8708 1035 /*
kaf24@8708 1036 * We do the safest things first, then try to update the shadow
kaf24@8708 1037 * copying from guest
kaf24@8708 1038 */
kaf24@8708 1039 shadow_invlpg(v, va);
kaf24@8708 1040 }
kaf24@8708 1041
kaf24@8708 1042 static int check_for_null_selector(unsigned long eip)
kaf24@8708 1043 {
kaf24@8708 1044 unsigned char inst[MAX_INST_LEN];
kaf24@8708 1045 unsigned long sel;
kaf24@8708 1046 int i, inst_len;
kaf24@8708 1047 int inst_copy_from_guest(unsigned char *, unsigned long, int);
kaf24@8708 1048
kaf24@8708 1049 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
kaf24@8708 1050 memset(inst, 0, MAX_INST_LEN);
kaf24@8708 1051 if (inst_copy_from_guest(inst, eip, inst_len) != inst_len) {
kaf24@8708 1052 printf("check_for_null_selector: get guest instruction failed\n");
kaf24@8708 1053 domain_crash_synchronous();
kaf24@8708 1054 }
kaf24@8708 1055
kaf24@8708 1056 for (i = 0; i < inst_len; i++) {
kaf24@8708 1057 switch (inst[i]) {
kaf24@8708 1058 case 0xf3: /* REPZ */
kaf24@8708 1059 case 0xf2: /* REPNZ */
kaf24@8708 1060 case 0xf0: /* LOCK */
kaf24@8708 1061 case 0x66: /* data32 */
kaf24@8708 1062 case 0x67: /* addr32 */
kaf24@8708 1063 continue;
kaf24@8708 1064 case 0x2e: /* CS */
kaf24@8708 1065 __vmread(GUEST_CS_SELECTOR, &sel);
kaf24@8708 1066 break;
kaf24@8708 1067 case 0x36: /* SS */
kaf24@8708 1068 __vmread(GUEST_SS_SELECTOR, &sel);
kaf24@8708 1069 break;
kaf24@8708 1070 case 0x26: /* ES */
kaf24@8708 1071 __vmread(GUEST_ES_SELECTOR, &sel);
kaf24@8708 1072 break;
kaf24@8708 1073 case 0x64: /* FS */
kaf24@8708 1074 __vmread(GUEST_FS_SELECTOR, &sel);
kaf24@8708 1075 break;
kaf24@8708 1076 case 0x65: /* GS */
kaf24@8708 1077 __vmread(GUEST_GS_SELECTOR, &sel);
kaf24@8708 1078 break;
kaf24@8708 1079 case 0x3e: /* DS */
kaf24@8708 1080 /* FALLTHROUGH */
kaf24@8708 1081 default:
kaf24@8708 1082 /* DS is the default */
kaf24@8708 1083 __vmread(GUEST_DS_SELECTOR, &sel);
kaf24@8708 1084 }
kaf24@8708 1085 return sel == 0 ? 1 : 0;
kaf24@8708 1086 }
kaf24@8708 1087
kaf24@8708 1088 return 0;
kaf24@8708 1089 }
kaf24@8708 1090
kaf24@8708 1091 extern void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
kaf24@8708 1092 unsigned long count, int size, long value,
kaf24@8708 1093 int dir, int pvalid);
kaf24@8708 1094
kaf24@10763 1095 static void vmx_io_instruction(unsigned long exit_qualification,
kaf24@10763 1096 unsigned long inst_len)
kaf24@8708 1097 {
kaf24@10763 1098 struct cpu_user_regs *regs;
kaf24@10763 1099 struct hvm_io_op *pio_opp;
kaf24@8708 1100 unsigned long eip, cs, eflags;
kaf24@8708 1101 unsigned long port, size, dir;
kaf24@8708 1102 int vm86;
kaf24@8708 1103
kaf24@10763 1104 pio_opp = &current->arch.hvm_vcpu.io_op;
kaf24@10763 1105 pio_opp->instr = INSTR_PIO;
kaf24@10763 1106 pio_opp->flags = 0;
kaf24@10763 1107
kaf24@10763 1108 regs = &pio_opp->io_context;
kaf24@10763 1109
kaf24@10763 1110 /* Copy current guest state into io instruction state structure. */
kaf24@10763 1111 memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
kfraser@10903 1112 hvm_store_cpu_guest_regs(current, regs, NULL);
kaf24@8708 1113
kaf24@8708 1114 __vmread(GUEST_RIP, &eip);
kaf24@8708 1115 __vmread(GUEST_CS_SELECTOR, &cs);
kaf24@8708 1116 __vmread(GUEST_RFLAGS, &eflags);
kaf24@8708 1117 vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
kaf24@8708 1118
kaf24@9110 1119 HVM_DBG_LOG(DBG_LEVEL_IO,
kaf24@8708 1120 "vmx_io_instruction: vm86 %d, eip=%lx:%lx, "
kaf24@8708 1121 "exit_qualification = %lx",
kaf24@8708 1122 vm86, cs, eip, exit_qualification);
kaf24@8708 1123
kaf24@8708 1124 if (test_bit(6, &exit_qualification))
kaf24@8708 1125 port = (exit_qualification >> 16) & 0xFFFF;
kaf24@8708 1126 else
kaf24@8708 1127 port = regs->edx & 0xffff;
kaf24@9447 1128 TRACE_VMEXIT(1, port);
kaf24@8708 1129 size = (exit_qualification & 7) + 1;
kaf24@8708 1130 dir = test_bit(3, &exit_qualification); /* direction */
kaf24@8708 1131
kaf24@8708 1132 if (test_bit(4, &exit_qualification)) { /* string instruction */
kaf24@8708 1133 unsigned long addr, count = 1;
kaf24@8708 1134 int sign = regs->eflags & EF_DF ? -1 : 1;
kaf24@8708 1135
kaf24@8708 1136 __vmread(GUEST_LINEAR_ADDRESS, &addr);
kaf24@8708 1137
kaf24@8708 1138 /*
kaf24@8708 1139 * In protected mode, guest linear address is invalid if the
kaf24@8708 1140 * selector is null.
kaf24@8708 1141 */
kaf24@8708 1142 if (!vm86 && check_for_null_selector(eip))
kaf24@8708 1143 addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
kaf24@8708 1144
kaf24@8708 1145 if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
kaf24@10763 1146 pio_opp->flags |= REPZ;
kaf24@8708 1147 count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
kaf24@8708 1148 }
kaf24@8708 1149
kaf24@8708 1150 /*
kaf24@8708 1151 * Handle string pio instructions that cross pages or that
kaf24@8708 1152 * are unaligned. See the comments in hvm_domain.c/handle_mmio()
kaf24@8708 1153 */
kaf24@8708 1154 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
kaf24@8708 1155 unsigned long value = 0;
kaf24@8708 1156
kaf24@10763 1157 pio_opp->flags |= OVERLAP;
kaf24@8708 1158 if (dir == IOREQ_WRITE)
kaf24@8708 1159 hvm_copy(&value, addr, size, HVM_COPY_IN);
kaf24@8708 1160 send_pio_req(regs, port, 1, size, value, dir, 0);
kaf24@8708 1161 } else {
kaf24@8708 1162 if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
kaf24@8708 1163 if (sign > 0)
kaf24@8708 1164 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
kaf24@8708 1165 else
kaf24@8708 1166 count = (addr & ~PAGE_MASK) / size;
kaf24@8708 1167 } else
kaf24@8708 1168 __update_guest_eip(inst_len);
kaf24@8708 1169
kaf24@8708 1170 send_pio_req(regs, port, count, size, addr, dir, 1);
kaf24@8708 1171 }
kaf24@8708 1172 } else {
kaf24@8708 1173 if (port == 0xe9 && dir == IOREQ_WRITE && size == 1)
kaf24@8708 1174 hvm_print_line(current, regs->eax); /* guest debug output */
kaf24@8708 1175
kaf24@8708 1176 __update_guest_eip(inst_len);
kaf24@8708 1177 send_pio_req(regs, port, 1, size, regs->eax, dir, 0);
kaf24@8708 1178 }
kaf24@8708 1179 }
kaf24@8708 1180
kaf24@8708 1181 int
kaf24@8708 1182 vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
kaf24@8708 1183 {
kaf24@8708 1184 unsigned long inst_len;
kaf24@8708 1185 int error = 0;
kaf24@8708 1186
kaf24@8708 1187 error |= __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
kaf24@8708 1188 error |= __vmread(GUEST_RIP, &c->eip);
kaf24@8708 1189 c->eip += inst_len; /* skip transition instruction */
kaf24@8708 1190 error |= __vmread(GUEST_RSP, &c->esp);
kaf24@8708 1191 error |= __vmread(GUEST_RFLAGS, &c->eflags);
kaf24@8708 1192
kaf24@8708 1193 error |= __vmread(CR0_READ_SHADOW, &c->cr0);
kaf24@8708 1194 c->cr3 = v->arch.hvm_vmx.cpu_cr3;
kaf24@8708 1195 error |= __vmread(CR4_READ_SHADOW, &c->cr4);
kaf24@8708 1196
kaf24@8708 1197 error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
kaf24@8708 1198 error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
kaf24@8708 1199
kaf24@8708 1200 error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
kaf24@8708 1201 error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
kaf24@8708 1202
kaf24@8708 1203 error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
kaf24@8708 1204 error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
kaf24@8708 1205 error |= __vmread(GUEST_CS_BASE, &c->cs_base);
kaf24@8708 1206 error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
kaf24@8708 1207
kaf24@8708 1208 error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
kaf24@8708 1209 error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
kaf24@8708 1210 error |= __vmread(GUEST_DS_BASE, &c->ds_base);
kaf24@8708 1211 error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
kaf24@8708 1212
kaf24@8708 1213 error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
kaf24@8708 1214 error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
kaf24@8708 1215 error |= __vmread(GUEST_ES_BASE, &c->es_base);
kaf24@8708 1216 error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
kaf24@8708 1217
kaf24@8708 1218 error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
kaf24@8708 1219 error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
kaf24@8708 1220 error |= __vmread(GUEST_SS_BASE, &c->ss_base);
kaf24@8708 1221 error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
kaf24@8708 1222
kaf24@8708 1223 error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
kaf24@8708 1224 error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
kaf24@8708 1225 error |= __vmread(GUEST_FS_BASE, &c->fs_base);
kaf24@8708 1226 error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
kaf24@8708 1227
kaf24@8708 1228 error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
kaf24@8708 1229 error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
kaf24@8708 1230 error |= __vmread(GUEST_GS_BASE, &c->gs_base);
kaf24@8708 1231 error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
kaf24@8708 1232
kaf24@8708 1233 error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
kaf24@8708 1234 error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
kaf24@8708 1235 error |= __vmread(GUEST_TR_BASE, &c->tr_base);
kaf24@8708 1236 error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
kaf24@8708 1237
kaf24@8708 1238 error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
kaf24@8708 1239 error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
kaf24@8708 1240 error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
kaf24@8708 1241 error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
kaf24@8708 1242
kaf24@8708 1243 return !error;
kaf24@8708 1244 }
kaf24@8708 1245
kaf24@8708 1246 int
kaf24@8708 1247 vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
kaf24@8708 1248 {
kaf24@8708 1249 unsigned long mfn, old_cr4, old_base_mfn;
kaf24@8708 1250 int error = 0;
kaf24@8708 1251
kaf24@8708 1252 error |= __vmwrite(GUEST_RIP, c->eip);
kaf24@8708 1253 error |= __vmwrite(GUEST_RSP, c->esp);
kaf24@8708 1254 error |= __vmwrite(GUEST_RFLAGS, c->eflags);
kaf24@8708 1255
kaf24@8708 1256 error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
kaf24@8708 1257
kaf24@8708 1258 if (!vmx_paging_enabled(v)) {
kaf24@8708 1259 HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
kaf24@8708 1260 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table));
kaf24@8708 1261 goto skip_cr3;
kaf24@8708 1262 }
kaf24@8708 1263
kaf24@8708 1264 if (c->cr3 == v->arch.hvm_vmx.cpu_cr3) {
kaf24@8708 1265 /*
kaf24@8708 1266 * This is simple TLB flush, implying the guest has
kaf24@8708 1267 * removed some translation or changed page attributes.
kaf24@8708 1268 * We simply invalidate the shadow.
kaf24@8708 1269 */
kaf24@8736 1270 mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
kaf24@8708 1271 if (mfn != pagetable_get_pfn(v->arch.guest_table)) {
kaf24@8708 1272 printk("Invalid CR3 value=%x", c->cr3);
kaf24@8708 1273 domain_crash_synchronous();
kaf24@8708 1274 return 0;
kaf24@8708 1275 }
kaf24@8708 1276 shadow_sync_all(v->domain);
kaf24@8708 1277 } else {
kaf24@8708 1278 /*
kaf24@8708 1279 * If different, make a shadow. Check if the PDBR is valid
kaf24@8708 1280 * first.
kaf24@8708 1281 */
kaf24@8708 1282 HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
kaf24@8708 1283 if ((c->cr3 >> PAGE_SHIFT) > v->domain->max_pages) {
kaf24@8708 1284 printk("Invalid CR3 value=%x", c->cr3);
kaf24@8708 1285 domain_crash_synchronous();
kaf24@8708 1286 return 0;
kaf24@8708 1287 }
kaf24@8736 1288 mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
kaf24@8726 1289 if(!get_page(mfn_to_page(mfn), v->domain))
kaf24@8708 1290 return 0;
kaf24@8708 1291 old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
kaf24@10289 1292 v->arch.guest_table = pagetable_from_pfn(mfn);
kaf24@8708 1293 if (old_base_mfn)
kaf24@8726 1294 put_page(mfn_to_page(old_base_mfn));
kaf24@8708 1295 /*
kaf24@8708 1296 * arch.shadow_table should now hold the next CR3 for shadow
kaf24@8708 1297 */
kaf24@8708 1298 v->arch.hvm_vmx.cpu_cr3 = c->cr3;
kaf24@8898 1299 update_pagetables(v);
kaf24@8708 1300 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
kaf24@8708 1301 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
kaf24@8708 1302 }
kaf24@8708 1303
kaf24@8708 1304 skip_cr3:
kaf24@8708 1305
kaf24@8708 1306 error |= __vmread(CR4_READ_SHADOW, &old_cr4);
kaf24@8708 1307 error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
kaf24@8708 1308 error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
kaf24@8708 1309
kaf24@8708 1310 error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
kaf24@8708 1311 error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
kaf24@8708 1312
kaf24@8708 1313 error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
kaf24@8708 1314 error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
kaf24@8708 1315
kaf24@8708 1316 error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
kaf24@8708 1317 error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
kaf24@8708 1318 error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
kaf24@8708 1319 error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
kaf24@8708 1320
kaf24@8708 1321 error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
kaf24@8708 1322 error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
kaf24@8708 1323 error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
kaf24@8708 1324 error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
kaf24@8708 1325
kaf24@8708 1326 error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
kaf24@8708 1327 error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
kaf24@8708 1328 error |= __vmwrite(GUEST_ES_BASE, c->es_base);
kaf24@8708 1329 error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
kaf24@8708 1330
kaf24@8708 1331 error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
kaf24@8708 1332 error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
kaf24@8708 1333 error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
kaf24@8708 1334 error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
kaf24@8708 1335
kaf24@8708 1336 error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
kaf24@8708 1337 error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
kaf24@8708 1338 error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
kaf24@8708 1339 error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
kaf24@8708 1340
kaf24@8708 1341 error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
kaf24@8708 1342 error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
kaf24@8708 1343 error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
kaf24@8708 1344 error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
kaf24@8708 1345
kaf24@8708 1346 error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
kaf24@8708 1347 error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
kaf24@8708 1348 error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
kaf24@8708 1349 error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
kaf24@8708 1350
kaf24@8708 1351 error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
kaf24@8708 1352 error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
kaf24@8708 1353 error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
kaf24@8708 1354 error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
kaf24@8708 1355
kaf24@8708 1356 return !error;
kaf24@8708 1357 }
kaf24@8708 1358
kaf24@8708 1359 enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
kaf24@8708 1360
kaf24@8708 1361 int
kaf24@8708 1362 vmx_assist(struct vcpu *v, int mode)
kaf24@8708 1363 {
kaf24@8708 1364 struct vmx_assist_context c;
kaf24@8708 1365 u32 magic;
kaf24@8708 1366 u32 cp;
kaf24@8708 1367
kaf24@8708 1368 /* make sure vmxassist exists (this is not an error) */
kaf24@8708 1369 if (!hvm_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), HVM_COPY_IN))
kaf24@8708 1370 return 0;
kaf24@8708 1371 if (magic != VMXASSIST_MAGIC)
kaf24@8708 1372 return 0;
kaf24@8708 1373
kaf24@8708 1374 switch (mode) {
kaf24@8708 1375 /*
kaf24@8708 1376 * Transfer control to vmxassist.
kaf24@8708 1377 * Store the current context in VMXASSIST_OLD_CONTEXT and load
kaf24@8708 1378 * the new VMXASSIST_NEW_CONTEXT context. This context was created
kaf24@8708 1379 * by vmxassist and will transfer control to it.
kaf24@8708 1380 */
kaf24@8708 1381 case VMX_ASSIST_INVOKE:
kaf24@8708 1382 /* save the old context */
kaf24@8708 1383 if (!hvm_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), HVM_COPY_IN))
kaf24@8708 1384 goto error;
kaf24@8708 1385 if (cp != 0) {
kaf24@8708 1386 if (!vmx_world_save(v, &c))
kaf24@8708 1387 goto error;
kaf24@8708 1388 if (!hvm_copy(&c, cp, sizeof(c), HVM_COPY_OUT))
kaf24@8708 1389 goto error;
kaf24@8708 1390 }
kaf24@8708 1391
kaf24@8708 1392 /* restore the new context, this should activate vmxassist */
kaf24@8708 1393 if (!hvm_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), HVM_COPY_IN))
kaf24@8708 1394 goto error;
kaf24@8708 1395 if (cp != 0) {
kaf24@8708 1396 if (!hvm_copy(&c, cp, sizeof(c), HVM_COPY_IN))
kaf24@8708 1397 goto error;
kaf24@8708 1398 if (!vmx_world_restore(v, &c))
kaf24@8708 1399 goto error;
kaf24@8708 1400 return 1;
kaf24@8708 1401 }
kaf24@8708 1402 break;
kaf24@8708 1403
kaf24@8708 1404 /*
kaf24@8708 1405 * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE
kaf24@8708 1406 * above.
kaf24@8708 1407 */
kaf24@8708 1408 case VMX_ASSIST_RESTORE:
kaf24@8708 1409 /* save the old context */
kaf24@8708 1410 if (!hvm_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), HVM_COPY_IN))
kaf24@8708 1411 goto error;
kaf24@8708 1412 if (cp != 0) {
kaf24@8708 1413 if (!hvm_copy(&c, cp, sizeof(c), HVM_COPY_IN))
kaf24@8708 1414 goto error;
kaf24@8708 1415 if (!vmx_world_restore(v, &c))
kaf24@8708 1416 goto error;
kaf24@8708 1417 return 1;
kaf24@8708 1418 }
kaf24@8708 1419 break;
kaf24@8708 1420 }
kaf24@8708 1421
kaf24@8708 1422 error:
kaf24@8708 1423 printf("Failed to transfer to vmxassist\n");
kaf24@8708 1424 domain_crash_synchronous();
kaf24@8708 1425 return 0;
kaf24@8708 1426 }
kaf24@8708 1427
kaf24@8708 1428 static int vmx_set_cr0(unsigned long value)
kaf24@8708 1429 {
kaf24@8708 1430 struct vcpu *v = current;
kaf24@8708 1431 unsigned long mfn;
kaf24@8708 1432 unsigned long eip;
kaf24@8708 1433 int paging_enabled;
kaf24@8708 1434 unsigned long vm_entry_value;
kaf24@8708 1435 unsigned long old_cr0;
kaf24@8708 1436
kaf24@8708 1437 /*
kaf24@8708 1438 * CR0: We don't want to lose PE and PG.
kaf24@8708 1439 */
kaf24@8708 1440 __vmread_vcpu(v, CR0_READ_SHADOW, &old_cr0);
kaf24@8708 1441 paging_enabled = (old_cr0 & X86_CR0_PE) && (old_cr0 & X86_CR0_PG);
kaf24@8849 1442
kaf24@8852 1443 /* TS cleared? Then initialise FPU now. */
kaf24@8852 1444 if ( !(value & X86_CR0_TS) )
kaf24@8708 1445 {
kaf24@8849 1446 setup_fpu(v);
kaf24@8852 1447 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
kaf24@8708 1448 }
kaf24@8708 1449
kaf24@8708 1450 __vmwrite(GUEST_CR0, value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE);
kaf24@8708 1451 __vmwrite(CR0_READ_SHADOW, value);
kaf24@8708 1452
kaf24@8708 1453 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
kaf24@8708 1454
kaf24@8974 1455 if ( (value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled )
kaf24@8974 1456 {
kaf24@8708 1457 /*
kaf24@8974 1458 * Trying to enable guest paging.
kaf24@8708 1459 * The guest CR3 must be pointing to the guest physical.
kaf24@8708 1460 */
kaf24@8736 1461 if ( !VALID_MFN(mfn = get_mfn_from_gpfn(
kaf24@8708 1462 v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
kaf24@8726 1463 !get_page(mfn_to_page(mfn), v->domain) )
kaf24@8708 1464 {
kaf24@8708 1465 printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3);
kaf24@8708 1466 domain_crash_synchronous(); /* need to take a clean path */
kaf24@8708 1467 }
kaf24@8708 1468
kaf24@8708 1469 #if defined(__x86_64__)
kaf24@8974 1470 if ( test_bit(VMX_CPU_STATE_LME_ENABLED,
kaf24@8974 1471 &v->arch.hvm_vmx.cpu_state) &&
kaf24@8974 1472 !test_bit(VMX_CPU_STATE_PAE_ENABLED,
kaf24@8974 1473 &v->arch.hvm_vmx.cpu_state) )
kaf24@8974 1474 {
kaf24@8974 1475 HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enabled\n");
kfraser@10822 1476 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
kaf24@8708 1477 }
kaf24@8974 1478
kaf24@8974 1479 if ( test_bit(VMX_CPU_STATE_LME_ENABLED,
kaf24@8974 1480 &v->arch.hvm_vmx.cpu_state) )
kaf24@8974 1481 {
kaf24@8974 1482 /* Here the PAE is should be opened */
kaf24@8974 1483 HVM_DBG_LOG(DBG_LEVEL_1, "Enable long mode\n");
kaf24@8708 1484 set_bit(VMX_CPU_STATE_LMA_ENABLED,
kaf24@8708 1485 &v->arch.hvm_vmx.cpu_state);
kaf24@8974 1486
kaf24@8708 1487 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
kaf24@8708 1488 vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE;
kaf24@8708 1489 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
kaf24@8708 1490
kaf24@9420 1491 if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L4) )
kaf24@9420 1492 {
kaf24@8708 1493 printk("Unsupported guest paging levels\n");
kaf24@8708 1494 domain_crash_synchronous(); /* need to take a clean path */
kaf24@8708 1495 }
kaf24@8708 1496 }
kaf24@8708 1497 else
kaf24@8770 1498 #endif /* __x86_64__ */
kaf24@8708 1499 {
kaf24@8770 1500 #if CONFIG_PAGING_LEVELS >= 3
kaf24@9420 1501 /* seems it's a 32-bit or 32-bit PAE guest */
kaf24@9420 1502
kaf24@9420 1503 if ( test_bit(VMX_CPU_STATE_PAE_ENABLED,
kaf24@9420 1504 &v->arch.hvm_vmx.cpu_state) )
kaf24@9420 1505 {
kaf24@9420 1506 /* The guest enables PAE first and then it enables PG, it is
kaf24@9420 1507 * really a PAE guest */
kaf24@9420 1508 if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L3) )
kaf24@9420 1509 {
kaf24@9420 1510 printk("Unsupported guest paging levels\n");
kaf24@9420 1511 domain_crash_synchronous();
kaf24@9420 1512 }
kaf24@9420 1513 }
kaf24@9420 1514 else
kaf24@9420 1515 {
kaf24@9420 1516 if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L2) )
kaf24@9420 1517 {
kaf24@9420 1518 printk("Unsupported guest paging levels\n");
kaf24@9420 1519 domain_crash_synchronous(); /* need to take a clean path */
kaf24@9420 1520 }
kaf24@8708 1521 }
kaf24@8708 1522 #endif
kaf24@8708 1523 }
kaf24@8708 1524
kaf24@8708 1525 /*
kaf24@8708 1526 * Now arch.guest_table points to machine physical.
kaf24@8708 1527 */
kaf24@10289 1528 v->arch.guest_table = pagetable_from_pfn(mfn);
kaf24@8708 1529 update_pagetables(v);
kaf24@8708 1530
kaf24@8708 1531 HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
kaf24@8708 1532 (unsigned long) (mfn << PAGE_SHIFT));
kaf24@8708 1533
kaf24@8708 1534 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
kaf24@8708 1535 /*
kaf24@8708 1536 * arch->shadow_table should hold the next CR3 for shadow
kaf24@8708 1537 */
kaf24@8708 1538 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
kaf24@8708 1539 v->arch.hvm_vmx.cpu_cr3, mfn);
kaf24@8708 1540 }
kaf24@8708 1541
kaf24@8974 1542 if ( !((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled )
kaf24@8974 1543 if ( v->arch.hvm_vmx.cpu_cr3 ) {
kaf24@8736 1544 put_page(mfn_to_page(get_mfn_from_gpfn(
kaf24@8708 1545 v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
kaf24@10289 1546 v->arch.guest_table = pagetable_null();
kaf24@8708 1547 }
kaf24@8708 1548
kaf24@8708 1549 /*
kaf24@8708 1550 * VMX does not implement real-mode virtualization. We emulate
kaf24@8708 1551 * real-mode by performing a world switch to VMXAssist whenever
kaf24@8708 1552 * a partition disables the CR0.PE bit.
kaf24@8708 1553 */
kaf24@8974 1554 if ( (value & X86_CR0_PE) == 0 )
kaf24@8974 1555 {
kaf24@8708 1556 if ( value & X86_CR0_PG ) {
kaf24@8708 1557 /* inject GP here */
kfraser@10822 1558 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
kaf24@8708 1559 return 0;
kaf24@8708 1560 } else {
kaf24@8708 1561 /*
kaf24@8708 1562 * Disable paging here.
kaf24@8708 1563 * Same to PE == 1 && PG == 0
kaf24@8708 1564 */
kaf24@8974 1565 if ( test_bit(VMX_CPU_STATE_LMA_ENABLED,
kaf24@8974 1566 &v->arch.hvm_vmx.cpu_state) )
kaf24@8974 1567 {
kaf24@8708 1568 clear_bit(VMX_CPU_STATE_LMA_ENABLED,
kaf24@8708 1569 &v->arch.hvm_vmx.cpu_state);
kaf24@8708 1570 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
kaf24@8708 1571 vm_entry_value &= ~VM_ENTRY_CONTROLS_IA32E_MODE;
kaf24@8708 1572 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
kaf24@8708 1573 }
kaf24@8708 1574 }
kaf24@8708 1575
kaf24@8708 1576 clear_all_shadow_status(v->domain);
kaf24@8974 1577 if ( vmx_assist(v, VMX_ASSIST_INVOKE) ) {
kaf24@8708 1578 set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &v->arch.hvm_vmx.cpu_state);
kaf24@8708 1579 __vmread(GUEST_RIP, &eip);
kaf24@8708 1580 HVM_DBG_LOG(DBG_LEVEL_1,
kaf24@8708 1581 "Transfering control to vmxassist %%eip 0x%lx\n", eip);
kaf24@8708 1582 return 0; /* do not update eip! */
kaf24@8708 1583 }
kaf24@8974 1584 } else if ( test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
kaf24@8974 1585 &v->arch.hvm_vmx.cpu_state) )
kaf24@8974 1586 {
kaf24@8708 1587 __vmread(GUEST_RIP, &eip);
kaf24@8708 1588 HVM_DBG_LOG(DBG_LEVEL_1,
kaf24@8708 1589 "Enabling CR0.PE at %%eip 0x%lx\n", eip);
kaf24@8974 1590 if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
kaf24@8974 1591 {
kaf24@8708 1592 clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
kaf24@8708 1593 &v->arch.hvm_vmx.cpu_state);
kaf24@8708 1594 __vmread(GUEST_RIP, &eip);
kaf24@8708 1595 HVM_DBG_LOG(DBG_LEVEL_1,
kaf24@8708 1596 "Restoring to %%eip 0x%lx\n", eip);
kaf24@8708 1597 return 0; /* do not update eip! */
kaf24@8708 1598 }
kaf24@8708 1599 }
kaf24@9420 1600 else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
kaf24@9420 1601 {
kaf24@9420 1602 /* we should take care of this kind of situation */
kaf24@9420 1603 clear_all_shadow_status(v->domain);
kaf24@9420 1604 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->domain->arch.phys_table));
kaf24@9420 1605 }
kaf24@8708 1606
kaf24@8708 1607 return 1;
kaf24@8708 1608 }
kaf24@8708 1609
kaf24@9950 1610 #define CASE_SET_REG(REG, reg) \
kaf24@9950 1611 case REG_ ## REG: regs->reg = value; break
kaf24@9950 1612 #define CASE_GET_REG(REG, reg) \
kaf24@8708 1613 case REG_ ## REG: value = regs->reg; break
kaf24@8708 1614
kaf24@9950 1615 #define CASE_EXTEND_SET_REG \
kaf24@9950 1616 CASE_EXTEND_REG(S)
kaf24@9950 1617 #define CASE_EXTEND_GET_REG \
kaf24@9950 1618 CASE_EXTEND_REG(G)
kaf24@8708 1619
kaf24@8708 1620 #ifdef __i386__
kaf24@8708 1621 #define CASE_EXTEND_REG(T)
kaf24@8708 1622 #else
kaf24@9950 1623 #define CASE_EXTEND_REG(T) \
kaf24@9950 1624 CASE_ ## T ## ET_REG(R8, r8); \
kaf24@9950 1625 CASE_ ## T ## ET_REG(R9, r9); \
kaf24@8708 1626 CASE_ ## T ## ET_REG(R10, r10); \
kaf24@8708 1627 CASE_ ## T ## ET_REG(R11, r11); \
kaf24@8708 1628 CASE_ ## T ## ET_REG(R12, r12); \
kaf24@8708 1629 CASE_ ## T ## ET_REG(R13, r13); \
kaf24@8708 1630 CASE_ ## T ## ET_REG(R14, r14); \
kaf24@9950 1631 CASE_ ## T ## ET_REG(R15, r15)
kaf24@8708 1632 #endif
kaf24@8708 1633
kaf24@8708 1634 /*
kaf24@8708 1635 * Write to control registers
kaf24@8708 1636 */
kaf24@8708 1637 static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
kaf24@8708 1638 {
kaf24@8708 1639 unsigned long value;
kaf24@8708 1640 unsigned long old_cr;
kaf24@8708 1641 struct vcpu *v = current;
kaf24@8708 1642
kaf24@9950 1643 switch ( gp ) {
kaf24@9950 1644 CASE_GET_REG(EAX, eax);
kaf24@9950 1645 CASE_GET_REG(ECX, ecx);
kaf24@9950 1646 CASE_GET_REG(EDX, edx);
kaf24@9950 1647 CASE_GET_REG(EBX, ebx);
kaf24@9950 1648 CASE_GET_REG(EBP, ebp);
kaf24@9950 1649 CASE_GET_REG(ESI, esi);
kaf24@9950 1650 CASE_GET_REG(EDI, edi);
kaf24@9950 1651 CASE_EXTEND_GET_REG;
kaf24@9950 1652 case REG_ESP:
kaf24@9950 1653 __vmread(GUEST_RSP, &value);
kaf24@8708 1654 break;
kaf24@8708 1655 default:
kaf24@8708 1656 printk("invalid gp: %d\n", gp);
kaf24@8708 1657 __hvm_bug(regs);
kaf24@8708 1658 }
kaf24@8708 1659
kaf24@9950 1660 HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
kaf24@8708 1661
kaf24@9950 1662 switch ( cr ) {
kaf24@8708 1663 case 0:
kaf24@8708 1664 return vmx_set_cr0(value);
kaf24@8708 1665 case 3:
kaf24@8708 1666 {
kaf24@8708 1667 unsigned long old_base_mfn, mfn;
kaf24@8708 1668
kaf24@8708 1669 /*
kaf24@8708 1670 * If paging is not enabled yet, simply copy the value to CR3.
kaf24@8708 1671 */
kaf24@8708 1672 if (!vmx_paging_enabled(v)) {
kaf24@8708 1673 v->arch.hvm_vmx.cpu_cr3 = value;
kaf24@8708 1674 break;
kaf24@8708 1675 }
kaf24@8708 1676
kaf24@8708 1677 /*
kaf24@8708 1678 * We make a new one if the shadow does not exist.
kaf24@8708 1679 */
kaf24@8708 1680 if (value == v->arch.hvm_vmx.cpu_cr3) {
kaf24@8708 1681 /*
kaf24@8708 1682 * This is simple TLB flush, implying the guest has
kaf24@8708 1683 * removed some translation or changed page attributes.
kaf24@8708 1684 * We simply invalidate the shadow.
kaf24@8708 1685 */
kaf24@8736 1686 mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
kaf24@8708 1687 if (mfn != pagetable_get_pfn(v->arch.guest_table))
kaf24@8708 1688 __hvm_bug(regs);
kaf24@8708 1689 shadow_sync_all(v->domain);
kaf24@8708 1690 } else {
kaf24@8708 1691 /*
kaf24@8708 1692 * If different, make a shadow. Check if the PDBR is valid
kaf24@8708 1693 * first.
kaf24@8708 1694 */
kaf24@8708 1695 HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
kaf24@8708 1696 if ( ((value >> PAGE_SHIFT) > v->domain->max_pages ) ||
kaf24@8736 1697 !VALID_MFN(mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT)) ||
kaf24@8726 1698 !get_page(mfn_to_page(mfn), v->domain) )
kaf24@8708 1699 {
kaf24@8708 1700 printk("Invalid CR3 value=%lx", value);
kaf24@8708 1701 domain_crash_synchronous(); /* need to take a clean path */
kaf24@8708 1702 }
kaf24@8708 1703 old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
kaf24@10289 1704 v->arch.guest_table = pagetable_from_pfn(mfn);
kaf24@8708 1705 if (old_base_mfn)
kaf24@8726 1706 put_page(mfn_to_page(old_base_mfn));
kaf24@8708 1707 /*
kaf24@8708 1708 * arch.shadow_table should now hold the next CR3 for shadow
kaf24@8708 1709 */
kaf24@8898 1710 #if CONFIG_PAGING_LEVELS >= 3
kaf24@8898 1711 if ( v->domain->arch.ops->guest_paging_levels == PAGING_L3 )
kaf24@8898 1712 shadow_sync_all(v->domain);
kaf24@8898 1713 #endif
kaf24@8898 1714
kaf24@8708 1715 v->arch.hvm_vmx.cpu_cr3 = value;
kaf24@8898 1716 update_pagetables(v);
kaf24@8708 1717 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
kaf24@8708 1718 value);
kaf24@8708 1719 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
kaf24@8708 1720 }
kaf24@8708 1721 break;
kaf24@8708 1722 }
kaf24@8708 1723 case 4: /* CR4 */
kaf24@8708 1724 {
kaf24@8974 1725 __vmread(CR4_READ_SHADOW, &old_cr);
kaf24@8898 1726
kaf24@8974 1727 if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
kaf24@8898 1728 {
kaf24@8708 1729 set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
kaf24@8898 1730
kaf24@8974 1731 if ( vmx_pgbit_test(v) )
kaf24@8898 1732 {
kaf24@9420 1733 /* The guest is a 32-bit PAE guest. */
kaf24@10483 1734 #if CONFIG_PAGING_LEVELS >= 3
kaf24@8898 1735 unsigned long mfn, old_base_mfn;
kaf24@8898 1736
kaf24@9420 1737 if( !shadow_set_guest_paging_levels(v->domain, PAGING_L3) )
kaf24@8898 1738 {
kaf24@8898 1739 printk("Unsupported guest paging levels\n");
kaf24@8898 1740 domain_crash_synchronous(); /* need to take a clean path */
kaf24@8898 1741 }
kaf24@8898 1742
kaf24@8898 1743 if ( !VALID_MFN(mfn = get_mfn_from_gpfn(
kaf24@8898 1744 v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
kaf24@8974 1745 !get_page(mfn_to_page(mfn), v->domain) )
kaf24@8898 1746 {
kaf24@8898 1747 printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3);
kaf24@8898 1748 domain_crash_synchronous(); /* need to take a clean path */
kaf24@8898 1749 }
kaf24@8898 1750
kaf24@8898 1751 old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
kaf24@8898 1752 if ( old_base_mfn )
kaf24@8898 1753 put_page(mfn_to_page(old_base_mfn));
kaf24@8898 1754
kaf24@8898 1755 /*
kaf24@8898 1756 * Now arch.guest_table points to machine physical.
kaf24@8898 1757 */
kaf24@8898 1758
kaf24@10289 1759 v->arch.guest_table = pagetable_from_pfn(mfn);
kaf24@8898 1760 update_pagetables(v);
kaf24@8898 1761
kaf24@8898 1762 HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
kaf24@8898 1763 (unsigned long) (mfn << PAGE_SHIFT));
kaf24@8898 1764
kaf24@8898 1765 __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
kaf24@8898 1766
kaf24@8898 1767 /*
kaf24@8898 1768 * arch->shadow_table should hold the next CR3 for shadow
kaf24@8898 1769 */
kaf24@8898 1770
kaf24@8898 1771 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
kaf24@8898 1772 v->arch.hvm_vmx.cpu_cr3, mfn);
kaf24@8898 1773 #endif
kaf24@8974 1774 }
kaf24@8898 1775 else
kaf24@8898 1776 {
kaf24@9420 1777 /* The guest is a 64 bit or 32-bit PAE guest. */
kaf24@10483 1778 #if CONFIG_PAGING_LEVELS >= 3
kaf24@9420 1779 if ( (v->domain->arch.ops != NULL) &&
kaf24@9420 1780 v->domain->arch.ops->guest_paging_levels == PAGING_L2)
kaf24@8898 1781 {
kaf24@9420 1782 /* Seems the guest first enables PAE without enabling PG,
kaf24@9420 1783 * it must enable PG after that, and it is a 32-bit PAE
kaf24@9420 1784 * guest */
kaf24@9420 1785
kaf24@9420 1786 if ( !shadow_set_guest_paging_levels(v->domain,
kaf24@9420 1787 PAGING_L3) )
kaf24@9420 1788 {
kaf24@9420 1789 printk("Unsupported guest paging levels\n");
kaf24@9420 1790 /* need to take a clean path */
kaf24@9420 1791 domain_crash_synchronous();
kaf24@9420 1792 }
kaf24@9420 1793 }
kaf24@8898 1794 #endif
kaf24@8898 1795 }
kaf24@8898 1796 }
kaf24@8898 1797 else if ( value & X86_CR4_PAE )
kaf24@8898 1798 set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
kaf24@8898 1799 else
kaf24@8898 1800 {
kaf24@8898 1801 if ( test_bit(VMX_CPU_STATE_LMA_ENABLED, &v->arch.hvm_vmx.cpu_state) )
kfraser@10822 1802 vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
kaf24@8898 1803
kaf24@8708 1804 clear_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
kaf24@8708 1805 }
kaf24@8708 1806
kaf24@8708 1807 __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
kaf24@8708 1808 __vmwrite(CR4_READ_SHADOW, value);
kaf24@8708 1809
kaf24@8708 1810 /*
kaf24@8708 1811 * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
kaf24@8708 1812 * all TLB entries except global entries.
kaf24@8708 1813 */
kaf24@8898 1814 if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
kaf24@8708 1815 shadow_sync_all(v->domain);
kaf24@8898 1816
kaf24@8708 1817 break;
kaf24@8708 1818 }
kaf24@8708 1819 default:
kaf24@8708 1820 printk("invalid cr: %d\n", gp);
kaf24@8708 1821 __hvm_bug(regs);
kaf24@8708 1822 }
kaf24@8708 1823
kaf24@8708 1824 return 1;
kaf24@8708 1825 }
kaf24@8708 1826
kaf24@8708 1827 /*
kaf24@8708 1828 * Read from control registers. CR0 and CR4 are read from the shadow.
kaf24@8708 1829 */
kaf24@8708 1830 static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
kaf24@8708 1831 {
kaf24@8708 1832 unsigned long value;
kaf24@8708 1833 struct vcpu *v = current;
kaf24@8708 1834
kaf24@9950 1835 if ( cr != 3 )
kaf24@8708 1836 __hvm_bug(regs);
kaf24@8708 1837
kaf24@8708 1838 value = (unsigned long) v->arch.hvm_vmx.cpu_cr3;
kaf24@8708 1839
kaf24@9950 1840 switch ( gp ) {
kaf24@9950 1841 CASE_SET_REG(EAX, eax);
kaf24@9950 1842 CASE_SET_REG(ECX, ecx);
kaf24@9950 1843 CASE_SET_REG(EDX, edx);
kaf24@9950 1844 CASE_SET_REG(EBX, ebx);
kaf24@9950 1845 CASE_SET_REG(EBP, ebp);
kaf24@9950 1846 CASE_SET_REG(ESI, esi);
kaf24@9950 1847 CASE_SET_REG(EDI, edi);
kaf24@9950 1848 CASE_EXTEND_SET_REG;
kaf24@9950 1849 case REG_ESP:
kaf24@9950 1850 __vmwrite(GUEST_RSP, value);
kaf24@8708 1851 regs->esp = value;
kaf24@8708 1852 break;
kaf24@8708 1853 default:
kaf24@8708 1854 printk("invalid gp: %d\n", gp);
kaf24@8708 1855 __hvm_bug(regs);
kaf24@8708 1856 }
kaf24@8708 1857
kaf24@9950 1858 HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
kaf24@8708 1859 }
kaf24@8708 1860
kaf24@8708 1861 static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs)
kaf24@8708 1862 {
kaf24@8708 1863 unsigned int gp, cr;
kaf24@8708 1864 unsigned long value;
kaf24@8708 1865 struct vcpu *v = current;
kaf24@8708 1866
kaf24@8708 1867 switch (exit_qualification & CONTROL_REG_ACCESS_TYPE) {
kaf24@8708 1868 case TYPE_MOV_TO_CR:
kaf24@8708 1869 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
kaf24@8708 1870 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
kaf24@8708 1871 TRACE_VMEXIT(1,TYPE_MOV_TO_CR);
kaf24@8708 1872 TRACE_VMEXIT(2,cr);
kaf24@8708 1873 TRACE_VMEXIT(3,gp);
kaf24@8708 1874 return mov_to_cr(gp, cr, regs);
kaf24@8708 1875 case TYPE_MOV_FROM_CR:
kaf24@8708 1876 gp = exit_qualification & CONTROL_REG_ACCESS_REG;
kaf24@8708 1877 cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
kaf24@8708 1878 TRACE_VMEXIT(1,TYPE_MOV_FROM_CR);
kaf24@8708 1879 TRACE_VMEXIT(2,cr);
kaf24@8708 1880 TRACE_VMEXIT(3,gp);
kaf24@8708 1881 mov_from_cr(cr, gp, regs);
kaf24@8708 1882 break;
kaf24@8708 1883 case TYPE_CLTS:
kaf24@8708 1884 TRACE_VMEXIT(1,TYPE_CLTS);
kaf24@8849 1885
kaf24@8849 1886 /* We initialise the FPU now, to avoid needing another vmexit. */
kaf24@8852 1887 setup_fpu(v);
kaf24@8852 1888 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
kaf24@8708 1889
kaf24@8708 1890 __vmread_vcpu(v, GUEST_CR0, &value);
kaf24@8708 1891 value &= ~X86_CR0_TS; /* clear TS */
kaf24@8708 1892 __vmwrite(GUEST_CR0, value);
kaf24@8708 1893
kaf24@8708 1894 __vmread_vcpu(v, CR0_READ_SHADOW, &value);
kaf24@8708 1895 value &= ~X86_CR0_TS; /* clear TS */
kaf24@8708 1896 __vmwrite(CR0_READ_SHADOW, value);
kaf24@8708 1897 break;
kaf24@8708 1898 case TYPE_LMSW:
kaf24@8708 1899 TRACE_VMEXIT(1,TYPE_LMSW);
kaf24@8708 1900 __vmread_vcpu(v, CR0_READ_SHADOW, &value);
kaf24@8708 1901 value = (value & ~0xF) |
kaf24@8708 1902 (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
kaf24@8708 1903 return vmx_set_cr0(value);
kaf24@8708 1904 break;
kaf24@8708 1905 default:
kaf24@8708 1906 __hvm_bug(regs);
kaf24@8708 1907 break;
kaf24@8708 1908 }
kaf24@8708 1909 return 1;
kaf24@8708 1910 }
kaf24@8708 1911
kaf24@8708 1912 static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
kaf24@8708 1913 {
kaf24@8708 1914 u64 msr_content = 0;
kfraser@10892 1915 u32 eax, edx;
kaf24@8708 1916 struct vcpu *v = current;
kaf24@8708 1917
kaf24@8708 1918 HVM_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
kaf24@8708 1919 (unsigned long)regs->ecx, (unsigned long)regs->eax,
kaf24@8708 1920 (unsigned long)regs->edx);
kaf24@8708 1921 switch (regs->ecx) {
kaf24@8708 1922 case MSR_IA32_TIME_STAMP_COUNTER:
kaf24@10182 1923 msr_content = hvm_get_guest_time(v);
kaf24@8708 1924 break;
kaf24@8708 1925 case MSR_IA32_SYSENTER_CS:
kaf24@8708 1926 __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content);
kaf24@8708 1927 break;
kaf24@8708 1928 case MSR_IA32_SYSENTER_ESP:
kaf24@8708 1929 __vmread(GUEST_SYSENTER_ESP, &msr_content);
kaf24@8708 1930 break;
kaf24@8708 1931 case MSR_IA32_SYSENTER_EIP:
kaf24@8708 1932 __vmread(GUEST_SYSENTER_EIP, &msr_content);
kaf24@8708 1933 break;
kaf24@8708 1934 case MSR_IA32_APICBASE:
kaf24@8708 1935 msr_content = VLAPIC(v) ? VLAPIC(v)->apic_base_msr : 0;
kaf24@8708 1936 break;
kaf24@8708 1937 default:
kfraser@10892 1938 if (long_mode_do_msr_read(regs))
kaf24@8708 1939 return;
kfraser@10892 1940
kfraser@10892 1941 if ( rdmsr_hypervisor_regs(regs->ecx, &eax, &edx) )
kfraser@10892 1942 {
kfraser@10892 1943 regs->eax = eax;
kfraser@10892 1944 regs->edx = edx;
kfraser@10892 1945 return;
kfraser@10892 1946 }
kfraser@10892 1947
kaf24@8846 1948 rdmsr_safe(regs->ecx, regs->eax, regs->edx);
kaf24@8708 1949 break;
kaf24@8708 1950 }
kaf24@8708 1951
kaf24@8708 1952 regs->eax = msr_content & 0xFFFFFFFF;
kaf24@8708 1953 regs->edx = msr_content >> 32;
kaf24@8708 1954
kaf24@8708 1955 HVM_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read returns: "
kaf24@8708 1956 "ecx=%lx, eax=%lx, edx=%lx",
kaf24@8708 1957 (unsigned long)regs->ecx, (unsigned long)regs->eax,
kaf24@8708 1958 (unsigned long)regs->edx);
kaf24@8708 1959 }
kaf24@8708 1960
kaf24@8708 1961 static inline void vmx_do_msr_write(struct cpu_user_regs *regs)
kaf24@8708 1962 {
kaf24@8708 1963 u64 msr_content;
kaf24@8708 1964 struct vcpu *v = current;
kaf24@8708 1965
kaf24@8708 1966 HVM_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write: ecx=%lx, eax=%lx, edx=%lx",
kaf24@8708 1967 (unsigned long)regs->ecx, (unsigned long)regs->eax,
kaf24@8708 1968 (unsigned long)regs->edx);
kaf24@8708 1969
kaf24@8708 1970 msr_content = (regs->eax & 0xFFFFFFFF) | ((u64)regs->edx << 32);
kaf24@8708 1971
kaf24@8708 1972 switch (regs->ecx) {
kaf24@8708 1973 case MSR_IA32_TIME_STAMP_COUNTER:
kaf24@9334 1974 set_guest_time(v, msr_content);
kaf24@8708 1975 break;
kaf24@8708 1976 case MSR_IA32_SYSENTER_CS:
kaf24@8708 1977 __vmwrite(GUEST_SYSENTER_CS, msr_content);
kaf24@8708 1978 break;
kaf24@8708 1979 case MSR_IA32_SYSENTER_ESP:
kaf24@8708 1980 __vmwrite(GUEST_SYSENTER_ESP, msr_content);
kaf24@8708 1981 break;
kaf24@8708 1982 case MSR_IA32_SYSENTER_EIP:
kaf24@8708 1983 __vmwrite(GUEST_SYSENTER_EIP, msr_content);
kaf24@8708 1984 break;
kaf24@8708 1985 case MSR_IA32_APICBASE:
kaf24@8708 1986 vlapic_msr_set(VLAPIC(v), msr_content);
kaf24@8708 1987 break;
kaf24@8708 1988 default:
kfraser@10892 1989 if ( !long_mode_do_msr_write(regs) )
kfraser@10892 1990 wrmsr_hypervisor_regs(regs->ecx, regs->eax, regs->edx);
kaf24@8708 1991 break;
kaf24@8708 1992 }
kaf24@8708 1993
kaf24@8708 1994 HVM_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write returns: "
kaf24@8708 1995 "ecx=%lx, eax=%lx, edx=%lx",
kaf24@8708 1996 (unsigned long)regs->ecx, (unsigned long)regs->eax,
kaf24@8708 1997 (unsigned long)regs->edx);
kaf24@8708 1998 }
kaf24@8708 1999
kaf24@8708 2000 /*
kaf24@8708 2001 * Need to use this exit to reschedule
kaf24@8708 2002 */
kaf24@8708 2003 void vmx_vmexit_do_hlt(void)
kaf24@8708 2004 {
kaf24@8708 2005 struct vcpu *v=current;
kaf24@10182 2006 struct periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
kaf24@8708 2007 s_time_t next_pit=-1,next_wakeup;
kaf24@8708 2008
kaf24@8843 2009 if ( !v->vcpu_id )
kaf24@10182 2010 next_pit = get_scheduled(v, pt->irq, pt);
kaf24@8708 2011 next_wakeup = get_apictime_scheduled(v);
kaf24@8843 2012 if ( (next_pit != -1 && next_pit < next_wakeup) || next_wakeup == -1 )
kaf24@8708 2013 next_wakeup = next_pit;
kaf24@8708 2014 if ( next_wakeup != - 1 )
kaf24@8708 2015 set_timer(&current->arch.hvm_vmx.hlt_timer, next_wakeup);
kaf24@8843 2016 hvm_safe_block();
kaf24@8708 2017 }
kaf24@8708 2018
kaf24@8708 2019 static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
kaf24@8708 2020 {
kaf24@8708 2021 unsigned int vector;
kaf24@8708 2022 int error;
kaf24@8708 2023
kaf24@8708 2024 asmlinkage void do_IRQ(struct cpu_user_regs *);
kaf24@8846 2025 fastcall void smp_apic_timer_interrupt(struct cpu_user_regs *);
kaf24@8846 2026 fastcall void smp_event_check_interrupt(void);
kaf24@8846 2027 fastcall void smp_invalidate_interrupt(void);
kaf24@8846 2028 fastcall void smp_call_function_interrupt(void);
kaf24@8846 2029 fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs);
kaf24@8846 2030 fastcall void smp_error_interrupt(struct cpu_user_regs *regs);
kaf24@8957 2031 #ifdef CONFIG_X86_MCE_P4THERMAL
kaf24@8957 2032 fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);
kaf24@8957 2033 #endif
kaf24@8708 2034
kaf24@8708 2035 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
kaf24@8708 2036 && !(vector & INTR_INFO_VALID_MASK))
kaf24@8708 2037 __hvm_bug(regs);
kaf24@8708 2038
kaf24@9672 2039 vector &= INTR_INFO_VECTOR_MASK;
kaf24@9447 2040 TRACE_VMEXIT(1,vector);
kaf24@8708 2041
kaf24@8708 2042 switch(vector) {
kaf24@8708 2043 case LOCAL_TIMER_VECTOR:
kaf24@8708 2044 smp_apic_timer_interrupt(regs);
kaf24@8708 2045 break;
kaf24@8708 2046 case EVENT_CHECK_VECTOR:
kaf24@8708 2047 smp_event_check_interrupt();
kaf24@8708 2048 break;
kaf24@8708 2049 case INVALIDATE_TLB_VECTOR:
kaf24@8708 2050 smp_invalidate_interrupt();
kaf24@8708 2051 break;
kaf24@8708 2052 case CALL_FUNCTION_VECTOR:
kaf24@8708 2053 smp_call_function_interrupt();
kaf24@8708 2054 break;
kaf24@8708 2055 case SPURIOUS_APIC_VECTOR:
kaf24@8708 2056 smp_spurious_interrupt(regs);
kaf24@8708 2057 break;
kaf24@8708 2058 case ERROR_APIC_VECTOR:
kaf24@8708 2059 smp_error_interrupt(regs);
kaf24@8708 2060 break;
kaf24@8957 2061 #ifdef CONFIG_X86_MCE_P4THERMAL
kaf24@8957 2062 case THERMAL_APIC_VECTOR:
kaf24@8957 2063 smp_thermal_interrupt(regs);
kaf24@8957 2064 break;
kaf24@8957 2065 #endif
kaf24@8708 2066 default:
kaf24@8708 2067 regs->entry_vector = vector;
kaf24@8708 2068 do_IRQ(regs);
kaf24@8708 2069 break;
kaf24@8708 2070 }
kaf24@8708 2071 }
kaf24@8708 2072
kaf24@8708 2073 #if defined (__x86_64__)
kaf24@8708 2074 void store_cpu_user_regs(struct cpu_user_regs *regs)
kaf24@8708 2075 {
kaf24@8708 2076 __vmread(GUEST_SS_SELECTOR, &regs->ss);
kaf24@8708 2077 __vmread(GUEST_RSP, &regs->rsp);
kaf24@8708 2078 __vmread(GUEST_RFLAGS, &regs->rflags);
kaf24@8708 2079 __vmread(GUEST_CS_SELECTOR, &regs->cs);
kaf24@8708 2080 __vmread(GUEST_DS_SELECTOR, &regs->ds);
kaf24@8708 2081 __vmread(GUEST_ES_SELECTOR, &regs->es);
kaf24@8708 2082 __vmread(GUEST_RIP, &regs->rip);
kaf24@8708 2083 }
kaf24@8708 2084 #elif defined (__i386__)
kaf24@8708 2085 void store_cpu_user_regs(struct cpu_user_regs *regs)
kaf24@8708 2086 {
kaf24@8708 2087 __vmread(GUEST_SS_SELECTOR, &regs->ss);
kaf24@8708 2088 __vmread(GUEST_RSP, &regs->esp);
kaf24@8708 2089 __vmread(GUEST_RFLAGS, &regs->eflags);
kaf24@8708 2090 __vmread(GUEST_CS_SELECTOR, &regs->cs);
kaf24@8708 2091 __vmread(GUEST_DS_SELECTOR, &regs->ds);
kaf24@8708 2092 __vmread(GUEST_ES_SELECTOR, &regs->es);
kaf24@8708 2093 __vmread(GUEST_RIP, &regs->eip);
kaf24@8708 2094 }
kaf24@8708 2095 #endif
kaf24@8708 2096
kaf24@8708 2097 #ifdef XEN_DEBUGGER
kaf24@8708 2098 void save_cpu_user_regs(struct cpu_user_regs *regs)
kaf24@8708 2099 {
kaf24@8708 2100 __vmread(GUEST_SS_SELECTOR, &regs->xss);
kaf24@8708 2101 __vmread(GUEST_RSP, &regs->esp);
kaf24@8708 2102 __vmread(GUEST_RFLAGS, &regs->eflags);
kaf24@8708 2103 __vmread(GUEST_CS_SELECTOR, &regs->xcs);
kaf24@8708 2104 __vmread(GUEST_RIP, &regs->eip);
kaf24@8708 2105
kaf24@8708 2106 __vmread(GUEST_GS_SELECTOR, &regs->xgs);
kaf24@8708 2107 __vmread(GUEST_FS_SELECTOR, &regs->xfs);
kaf24@8708 2108 __vmread(GUEST_ES_SELECTOR, &regs->xes);
kaf24@8708 2109 __vmread(GUEST_DS_SELECTOR, &regs->xds);
kaf24@8708 2110 }
kaf24@8708 2111
kaf24@8708 2112 void restore_cpu_user_regs(struct cpu_user_regs *regs)
kaf24@8708 2113 {
kaf24@8708 2114 __vmwrite(GUEST_SS_SELECTOR, regs->xss);
kaf24@8708 2115 __vmwrite(GUEST_RSP, regs->esp);
kaf24@8708 2116 __vmwrite(GUEST_RFLAGS, regs->eflags);
kaf24@8708 2117 __vmwrite(GUEST_CS_SELECTOR, regs->xcs);
kaf24@8708 2118 __vmwrite(GUEST_RIP, regs->eip);
kaf24@8708 2119
kaf24@8708 2120 __vmwrite(GUEST_GS_SELECTOR, regs->xgs);
kaf24@8708 2121 __vmwrite(GUEST_FS_SELECTOR, regs->xfs);
kaf24@8708 2122 __vmwrite(GUEST_ES_SELECTOR, regs->xes);
kaf24@8708 2123 __vmwrite(GUEST_DS_SELECTOR, regs->xds);
kaf24@8708 2124 }
kaf24@8708 2125 #endif
kaf24@8708 2126
kaf24@8708 2127 asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
kaf24@8708 2128 {
kaf24@9804 2129 unsigned int exit_reason;
kaf24@8708 2130 unsigned long exit_qualification, eip, inst_len = 0;
kaf24@8708 2131 struct vcpu *v = current;
kaf24@8708 2132 int error;
kaf24@8708 2133
kaf24@10311 2134 error = __vmread(VM_EXIT_REASON, &exit_reason);
kaf24@10311 2135 BUG_ON(error);
kaf24@8708 2136
kaf24@8708 2137 perfc_incra(vmexits, exit_reason);
kaf24@8708 2138
kaf24@10311 2139 if ( (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT) &&
kaf24@10311 2140 (exit_reason != EXIT_REASON_VMCALL) &&
kaf24@10311 2141 (exit_reason != EXIT_REASON_IO_INSTRUCTION) )
kaf24@8708 2142 HVM_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
kaf24@8708 2143
kaf24@10311 2144 if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )
kaf24@10311 2145 local_irq_enable();
kaf24@10311 2146
kaf24@10311 2147 if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
kaf24@10311 2148 {
kfraser@10658 2149 unsigned int failed_vmentry_reason = exit_reason & 0xFFFF;
kfraser@10658 2150
kfraser@10658 2151 __vmread(EXIT_QUALIFICATION, &exit_qualification);
kfraser@10658 2152 printk("Failed vm entry (exit reason 0x%x) ", exit_reason);
kfraser@10658 2153 switch ( failed_vmentry_reason ) {
kfraser@10658 2154 case EXIT_REASON_INVALID_GUEST_STATE:
kfraser@10658 2155 printk("caused by invalid guest state (%ld).\n", exit_qualification);
kfraser@10658 2156 break;
kfraser@10658 2157 case EXIT_REASON_MSR_LOADING:
kfraser@10658 2158 printk("caused by MSR entry %ld loading.\n", exit_qualification);
kfraser@10658 2159 break;
kfraser@10658 2160 case EXIT_REASON_MACHINE_CHECK:
kfraser@10658 2161 printk("caused by machine check.\n");
kfraser@10658 2162 break;
kfraser@10658 2163 default:
kfraser@10658 2164 printk("reason not known yet!");
kfraser@10658 2165 break;
kfraser@10658 2166 }
kfraser@10658 2167
kfraser@10658 2168 printk("************* VMCS Area **************\n");
kaf24@10079 2169 vmcs_dump_vcpu();
kaf24@10079 2170 printk("**************************************\n");
kaf24@8708 2171 domain_crash_synchronous();
kaf24@8708 2172 }
kaf24@8708 2173
kaf24@9672 2174 __vmread(GUEST_RIP, &eip);
kaf24@9672 2175 TRACE_VMEXIT(0,exit_reason);
kaf24@8708 2176
kaf24@10311 2177 switch ( exit_reason )
kaf24@10311 2178 {
kaf24@8708 2179 case EXIT_REASON_EXCEPTION_NMI:
kaf24@8708 2180 {
kaf24@8708 2181 /*
kaf24@8708 2182 * We don't set the software-interrupt exiting (INT n).
kaf24@8708 2183 * (1) We can get an exception (e.g. #PG) in the guest, or
kaf24@8708 2184 * (2) NMI
kaf24@8708 2185 */
kaf24@8708 2186 int error;
kaf24@8708 2187 unsigned int vector;
kaf24@8708 2188 unsigned long va;
kaf24@8708 2189
kaf24@8708 2190 if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
kaf24@8708 2191 || !(vector & INTR_INFO_VALID_MASK))
kaf24@8708 2192 __hvm_bug(&regs);
kaf24@9672 2193 vector &= INTR_INFO_VECTOR_MASK;
kaf24@8708 2194
kaf24@8708 2195 TRACE_VMEXIT(1,vector);
kaf24@8708 2196 perfc_incra(cause_vector, vector);
kaf24@8708 2197
kaf24@8708 2198 switch (vector) {
kaf24@8708 2199 #ifdef XEN_DEBUGGER
kaf24@8708 2200 case TRAP_debug:
kaf24@8708 2201 {
kaf24@8708 2202 save_cpu_user_regs(&regs);
kaf24@8708 2203 pdb_handle_exception(1, &regs, 1);
kaf24@8708 2204 restore_cpu_user_regs(&regs);
kaf24@8708 2205 break;
kaf24@8708 2206 }
kaf24@8708 2207 case TRAP_int3:
kaf24@8708 2208 {
kaf24@8708 2209 save_cpu_user_regs(&regs);
kaf24@8708 2210 pdb_handle_exception(3, &regs, 1);
kaf24@8708 2211 restore_cpu_user_regs(&regs);
kaf24@8708 2212 break;
kaf24@8708 2213 }
kaf24@8708 2214 #else
kaf24@8708 2215 case TRAP_debug:
kaf24@8708 2216 {
kaf24@8708 2217 void store_cpu_user_regs(struct cpu_user_regs *regs);
kaf24@8708 2218
kaf24@10523 2219 if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
kaf24@10523 2220 {
kaf24@10523 2221 store_cpu_user_regs(&regs);
kaf24@10523 2222 domain_pause_for_debugger();
kaf24@10523 2223 __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS,
kaf24@10523 2224 PENDING_DEBUG_EXC_BS);
kaf24@10523 2225 }
kaf24@10523 2226 else
kaf24@10523 2227 {
kaf24@10523 2228 vmx_reflect_exception(v);
kaf24@10523 2229 __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS,
kaf24@10523 2230 PENDING_DEBUG_EXC_BS);
kaf24@10523 2231 }
kaf24@8708 2232
kaf24@8708 2233 break;
kaf24@8708 2234 }
kaf24@9549 2235 case TRAP_int3:
kaf24@9549 2236 {
kaf24@9549 2237 if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
kaf24@9549 2238 domain_pause_for_debugger();
kaf24@9549 2239 else
kfraser@10822 2240 vmx_reflect_exception(v);
kaf24@9549 2241 break;
kaf24@9549 2242 }
kaf24@8708 2243 #endif
kaf24@8708 2244 case TRAP_no_device:
kaf24@8708 2245 {
kaf24@8708 2246 vmx_do_no_device_fault();
kaf24@8708 2247 break;
kaf24@8708 2248 }
kaf24@8708 2249 case TRAP_page_fault:
kaf24@8708 2250 {
kaf24@8708 2251 __vmread(EXIT_QUALIFICATION, &va);
kaf24@8708 2252 __vmread(VM_EXIT_INTR_ERROR_CODE, &regs.error_code);
kaf24@8708 2253
kaf24@8708 2254 TRACE_VMEXIT(3,regs.error_code);
kaf24@8708 2255 TRACE_VMEXIT(4,va);
kaf24@8708 2256
kaf24@8708 2257 HVM_DBG_LOG(DBG_LEVEL_VMMU,
kaf24@8708 2258 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
kaf24@8708 2259 (unsigned long)regs.eax, (unsigned long)regs.ebx,
kaf24@8708 2260 (unsigned long)regs.ecx, (unsigned long)regs.edx,
kaf24@8708 2261 (unsigned long)regs.esi, (unsigned long)regs.edi);
kaf24@8708 2262
kaf24@8708 2263 if (!(error = vmx_do_page_fault(va, &regs))) {
kaf24@8708 2264 /*
kaf24@8708 2265 * Inject #PG using Interruption-Information Fields
kaf24@8708 2266 */
kfraser@10822 2267 vmx_inject_hw_exception(v, TRAP_page_fault, regs.error_code);
kaf24@8708 2268 v->arch.hvm_vmx.cpu_cr2 = va;
kaf24@8708 2269 TRACE_3D(TRC_VMX_INT, v->domain->domain_id, TRAP_page_fault, va);
kaf24@8708 2270 }
kaf24@8708 2271 break;
kaf24@8708 2272 }
kaf24@8708 2273 case TRAP_nmi:
kaf24@8708 2274 do_nmi(&regs);
kaf24@8708 2275 break;
kaf24@8708 2276 default:
kaf24@8708 2277 vmx_reflect_exception(v);
kaf24@8708 2278 break;
kaf24@8708 2279 }
kaf24@8708 2280 break;
kaf24@8708 2281 }
kaf24@8708 2282 case EXIT_REASON_EXTERNAL_INTERRUPT:
kaf24@8708 2283 vmx_vmexit_do_extint(&regs);
kaf24@8708 2284 break;
kaf24@8708 2285 case EXIT_REASON_PENDING_INTERRUPT:
kaf24@10524 2286 /*
kaf24@10524 2287 * Not sure exactly what the purpose of this is. The only bits set
kaf24@10524 2288 * and cleared at this point are CPU_BASED_VIRTUAL_INTR_PENDING.
kaf24@10524 2289 * (in io.c:{enable,disable}_irq_window(). So presumably we want to
kaf24@10524 2290 * set it to the original value...
kaf24@10524 2291 */
kaf24@10524 2292 v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
kaf24@10524 2293 v->arch.hvm_vcpu.u.vmx.exec_control |=
kaf24@10524 2294 (MONITOR_CPU_BASED_EXEC_CONTROLS & CPU_BASED_VIRTUAL_INTR_PENDING);
kaf24@8708 2295 __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
kaf24@10524 2296 v->arch.hvm_vcpu.u.vmx.exec_control);
kaf24@8708 2297 break;
kaf24@8708 2298 case EXIT_REASON_TASK_SWITCH:
kaf24@8708 2299 __hvm_bug(&regs);
kaf24@8708 2300 break;
kaf24@8708 2301 case EXIT_REASON_CPUID:
kaf24@9311 2302 vmx_vmexit_do_cpuid(&regs);
kaf24@8708 2303 __get_instruction_length(inst_len);
kaf24@8708 2304 __update_guest_eip(inst_len);
kaf24@8708 2305 break;
kaf24@8708 2306 case EXIT_REASON_HLT:
kaf24@8708 2307 __get_instruction_length(inst_len);
kaf24@8708 2308 __update_guest_eip(inst_len);
kaf24@8708 2309 vmx_vmexit_do_hlt();
kaf24@8708 2310 break;
kaf24@8708 2311 case EXIT_REASON_INVLPG:
kaf24@8708 2312 {
kaf24@8708 2313 unsigned long va;
kaf24@8708 2314
kaf24@8708 2315 __vmread(EXIT_QUALIFICATION, &va);
kaf24@8708 2316 vmx_vmexit_do_invlpg(va);
kaf24@8708 2317 __get_instruction_length(inst_len);
kaf24@8708 2318 __update_guest_eip(inst_len);
kaf24@8708 2319 break;
kaf24@8708 2320 }
kaf24@8708 2321 case EXIT_REASON_VMCALL:
kfraser@10892 2322 {
kaf24@8708 2323 __get_instruction_length(inst_len);
kaf24@8708 2324 __vmread(GUEST_RIP, &eip);
kaf24@8708 2325 __vmread(EXIT_QUALIFICATION, &exit_qualification);
kaf24@8708 2326
kfraser@10892 2327 hvm_do_hypercall(&regs);
kaf24@8708 2328 __update_guest_eip(inst_len);
kaf24@8708 2329 break;
kfraser@10892 2330 }
kaf24@8708 2331 case EXIT_REASON_CR_ACCESS:
kaf24@8708 2332 {
kaf24@8708 2333 __vmread(GUEST_RIP, &eip);
kaf24@8708 2334 __get_instruction_length(inst_len);
kaf24@8708 2335 __vmread(EXIT_QUALIFICATION, &exit_qualification);
kaf24@8708 2336
kaf24@8708 2337 HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx",
kaf24@8708 2338 eip, inst_len, exit_qualification);
kaf24@8708 2339 if (vmx_cr_access(exit_qualification, &regs))
kaf24@8708 2340 __update_guest_eip(inst_len);
kaf24@8708 2341 TRACE_VMEXIT(3,regs.error_code);
kaf24@8708 2342 TRACE_VMEXIT(4,exit_qualification);
kaf24@8708 2343 break;
kaf24@8708 2344 }
kaf24@8708 2345 case EXIT_REASON_DR_ACCESS:
kaf24@8708 2346 __vmread(EXIT_QUALIFICATION, &exit_qualification);
kaf24@8708 2347 vmx_dr_access(exit_qualification, &regs);
kaf24@8708 2348 __get_instruction_length(inst_len);
kaf24@8708 2349 __update_guest_eip(inst_len);
kaf24@8708 2350 break;
kaf24@8708 2351 case EXIT_REASON_IO_INSTRUCTION:
kaf24@8708 2352 __vmread(EXIT_QUALIFICATION, &exit_qualification);
kaf24@8708 2353 __get_instruction_length(inst_len);
kaf24@10763 2354 vmx_io_instruction(exit_qualification, inst_len);
kaf24@8708 2355 TRACE_VMEXIT(4,exit_qualification);
kaf24@8708 2356 break;
kaf24@8708 2357 case EXIT_REASON_MSR_READ:
kaf24@8708 2358 __get_instruction_length(inst_len);
kaf24@8708 2359 vmx_do_msr_read(&regs);
kaf24@8708 2360 __update_guest_eip(inst_len);
kaf24@8708 2361 break;
kaf24@8708 2362 case EXIT_REASON_MSR_WRITE:
kaf24@8708 2363 __vmread(GUEST_RIP, &eip);
kaf24@8708 2364 vmx_do_msr_write(&regs);
kaf24@8708 2365 __get_instruction_length(inst_len);
kaf24@8708 2366 __update_guest_eip(inst_len);
kaf24@8708 2367 break;
kaf24@8708 2368 case EXIT_REASON_MWAIT_INSTRUCTION:
kaf24@8708 2369 __hvm_bug(&regs);
kaf24@8708 2370 break;
kaf24@9226 2371 case EXIT_REASON_VMCLEAR:
kaf24@9226 2372 case EXIT_REASON_VMLAUNCH:
kaf24@9226 2373 case EXIT_REASON_VMPTRLD:
kaf24@9226 2374 case EXIT_REASON_VMPTRST:
kaf24@9226 2375 case EXIT_REASON_VMREAD:
kaf24@9226 2376 case EXIT_REASON_VMRESUME:
kaf24@9226 2377 case EXIT_REASON_VMWRITE:
kaf24@9226 2378 case EXIT_REASON_VMOFF:
kaf24@9226 2379 case EXIT_REASON_VMON:
kaf24@9226 2380 /* Report invalid opcode exception when a VMX guest tries to execute
kaf24@9226 2381 any of the VMX instructions */
kfraser@10822 2382 vmx_inject_hw_exception(v, TRAP_invalid_op, VMX_DELIVER_NO_ERROR_CODE);
kaf24@9226 2383 break;
kaf24@9226 2384
kaf24@8708 2385 default:
kaf24@8708 2386 __hvm_bug(&regs); /* should not happen */
kaf24@8708 2387 }
kaf24@8708 2388 }
kaf24@8708 2389
kaf24@8708 2390 asmlinkage void vmx_load_cr2(void)
kaf24@8708 2391 {
kaf24@8708 2392 struct vcpu *v = current;
kaf24@8708 2393
kaf24@8708 2394 local_irq_disable();
kaf24@9948 2395 asm volatile("mov %0,%%cr2": :"r" (v->arch.hvm_vmx.cpu_cr2));
kaf24@8708 2396 }
kaf24@8708 2397
kaf24@8708 2398 asmlinkage void vmx_trace_vmentry (void)
kaf24@8708 2399 {
kaf24@9447 2400 TRACE_5D(TRC_VMX_VMENTRY,
kaf24@8708 2401 trace_values[smp_processor_id()][0],
kaf24@8708 2402 trace_values[smp_processor_id()][1],
kaf24@8708 2403 trace_values[smp_processor_id()][2],
kaf24@8708 2404 trace_values[smp_processor_id()][3],
kaf24@8708 2405 trace_values[smp_processor_id()][4]);
kaf24@8708 2406 TRACE_VMEXIT(0,9);
kaf24@8708 2407 TRACE_VMEXIT(1,9);
kaf24@8708 2408 TRACE_VMEXIT(2,9);
kaf24@8708 2409 TRACE_VMEXIT(3,9);
kaf24@8708 2410 TRACE_VMEXIT(4,9);
kaf24@8708 2411 return;
kaf24@8708 2412 }
kaf24@8708 2413
kaf24@8708 2414 asmlinkage void vmx_trace_vmexit (void)
kaf24@8708 2415 {
kaf24@9447 2416 TRACE_3D(TRC_VMX_VMEXIT,0,0,0);
kaf24@8708 2417 return;
kaf24@8708 2418 }
kaf24@8708 2419
kaf24@8708 2420 /*
kaf24@8708 2421 * Local variables:
kaf24@8708 2422 * mode: C
kaf24@8708 2423 * c-set-style: "BSD"
kaf24@8708 2424 * c-basic-offset: 4
kaf24@8708 2425 * tab-width: 4
kaf24@8708 2426 * indent-tabs-mode: nil
kaf24@8708 2427 * End:
kaf24@8708 2428 */