ia64/xen-unstable

annotate xen/include/asm-x86/hvm/support.h @ 18026:f454f2cac170

x86 hvm: New boot option 'softtsc' to cause RDTSC to be trapped-and-emulated.

Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jul 10 15:45:18 2008 +0100 (2008-07-10)
parents cd5dc735bdf3
children
rev   line source
kaf24@8708 1 /*
kaf24@8708 2 * support.h: HVM support routines used by VT-x and SVM.
kaf24@8708 3 *
kaf24@8708 4 * Leendert van Doorn, leendert@watson.ibm.com
kaf24@8708 5 * Copyright (c) 2005, International Business Machines Corporation.
kaf24@8708 6 *
kaf24@8708 7 * This program is free software; you can redistribute it and/or modify it
kaf24@8708 8 * under the terms and conditions of the GNU General Public License,
kaf24@8708 9 * version 2, as published by the Free Software Foundation.
kaf24@8708 10 *
kaf24@8708 11 * This program is distributed in the hope it will be useful, but WITHOUT
kaf24@8708 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
kaf24@8708 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
kaf24@8708 14 * more details.
kaf24@8708 15 *
kaf24@8708 16 * You should have received a copy of the GNU General Public License along with
kaf24@8708 17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
kaf24@8708 18 * Place - Suite 330, Boston, MA 02111-1307 USA.
kaf24@8708 19 */
kaf24@8708 20
kaf24@8708 21 #ifndef __ASM_X86_HVM_SUPPORT_H__
kaf24@8708 22 #define __ASM_X86_HVM_SUPPORT_H__
kaf24@8708 23
kaf24@8708 24 #include <xen/sched.h>
keir@16152 25 #include <xen/hvm/save.h>
kaf24@8708 26 #include <asm/types.h>
kaf24@8708 27 #include <asm/regs.h>
kaf24@8708 28 #include <asm/processor.h>
kaf24@8708 29
keir@14851 30 static inline vcpu_iodata_t *get_ioreq(struct vcpu *v)
kaf24@8708 31 {
keir@14851 32 struct domain *d = v->domain;
keir@14851 33 shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
keir@14851 34 ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
keir@14851 35 ASSERT(d->arch.hvm_domain.ioreq.va != NULL);
keir@14851 36 return &p->vcpu_iodata[v->vcpu_id];
kaf24@8708 37 }
kaf24@8708 38
keir@16427 39 #define HVM_DELIVER_NO_ERROR_CODE -1
kaf24@8708 40
keir@17348 41 #ifndef NDEBUG
kaf24@8708 42 #define DBG_LEVEL_0 (1 << 0)
kaf24@8708 43 #define DBG_LEVEL_1 (1 << 1)
kaf24@8708 44 #define DBG_LEVEL_2 (1 << 2)
kaf24@8708 45 #define DBG_LEVEL_3 (1 << 3)
kaf24@8708 46 #define DBG_LEVEL_IO (1 << 4)
kaf24@8708 47 #define DBG_LEVEL_VMMU (1 << 5)
kaf24@8708 48 #define DBG_LEVEL_VLAPIC (1 << 6)
kaf24@8708 49 #define DBG_LEVEL_VLAPIC_TIMER (1 << 7)
kaf24@9146 50 #define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8)
kaf24@8708 51 #define DBG_LEVEL_IOAPIC (1 << 9)
kfraser@15106 52 #define DBG_LEVEL_HCALL (1 << 10)
keir@16186 53 #define DBG_LEVEL_MSR (1 << 11)
kaf24@8708 54
kaf24@8708 55 extern unsigned int opt_hvm_debug_level;
tdeegan@11172 56 #define HVM_DBG_LOG(level, _f, _a...) \
tdeegan@11172 57 do { \
kfraser@11654 58 if ( unlikely((level) & opt_hvm_debug_level) ) \
tdeegan@11172 59 printk("[HVM:%d.%d] <%s> " _f "\n", \
tdeegan@11172 60 current->domain->domain_id, current->vcpu_id, __func__, \
tdeegan@11172 61 ## _a); \
tdeegan@11172 62 } while (0)
kaf24@8708 63 #else
kaf24@8708 64 #define HVM_DBG_LOG(level, _f, _a...)
kaf24@8708 65 #endif
kaf24@8708 66
keir@17211 67 extern unsigned long hvm_io_bitmap[];
kaf24@8708 68
kfraser@14090 69 void hvm_enable(struct hvm_function_table *);
kfraser@13627 70
keir@16662 71 enum hvm_copy_result {
keir@16662 72 HVMCOPY_okay = 0,
keir@16662 73 HVMCOPY_bad_gva_to_gfn,
keir@16662 74 HVMCOPY_bad_gfn_to_mfn
keir@16662 75 };
keir@16662 76
keir@16662 77 /*
keir@16662 78 * Copy to/from a guest physical address.
keir@16662 79 * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical
keir@16662 80 * address range does not map entirely onto ordinary machine memory.
keir@16662 81 */
keir@16662 82 enum hvm_copy_result hvm_copy_to_guest_phys(
keir@16662 83 paddr_t paddr, void *buf, int size);
keir@16662 84 enum hvm_copy_result hvm_copy_from_guest_phys(
keir@16662 85 void *buf, paddr_t paddr, int size);
keir@16662 86
keir@16662 87 /*
keir@17343 88 * Copy to/from a guest virtual address. @pfec should include PFEC_user_mode
keir@17343 89 * if emulating a user-mode access (CPL=3). All other flags in @pfec are
keir@17343 90 * managed by the called function: it is therefore optional for the caller
keir@17343 91 * to set them.
keir@17343 92 *
keir@16662 93 * Returns:
keir@16662 94 * HVMCOPY_okay: Copy was entirely successful.
keir@16662 95 * HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
keir@16662 96 * ordinary machine memory.
keir@16662 97 * HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
keir@16662 98 * mapping to a guest physical address. In this case
keir@16662 99 * a page fault exception is automatically queued
keir@16662 100 * for injection into the current HVM VCPU.
keir@16662 101 */
keir@16662 102 enum hvm_copy_result hvm_copy_to_guest_virt(
keir@17343 103 unsigned long vaddr, void *buf, int size, uint32_t pfec);
keir@16662 104 enum hvm_copy_result hvm_copy_from_guest_virt(
keir@17343 105 void *buf, unsigned long vaddr, int size, uint32_t pfec);
keir@16662 106 enum hvm_copy_result hvm_fetch_from_guest_virt(
keir@17343 107 void *buf, unsigned long vaddr, int size, uint32_t pfec);
keir@16662 108
keir@16662 109 /*
keir@16662 110 * As above (copy to/from a guest virtual address), but no fault is generated
keir@16662 111 * when HVMCOPY_bad_gva_to_gfn is returned.
keir@16662 112 */
keir@16662 113 enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
keir@17343 114 unsigned long vaddr, void *buf, int size, uint32_t pfec);
keir@16662 115 enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
keir@17343 116 void *buf, unsigned long vaddr, int size, uint32_t pfec);
keir@16662 117 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
keir@17343 118 void *buf, unsigned long vaddr, int size, uint32_t pfec);
kaf24@8708 119
kfraser@14748 120 #define HVM_HCALL_completed 0 /* hypercall completed - no further action */
kfraser@14748 121 #define HVM_HCALL_preempted 1 /* hypercall preempted - re-execute VMCALL */
kfraser@14748 122 #define HVM_HCALL_invalidate 2 /* invalidate ioemu-dm memory cache */
steven@14725 123 int hvm_do_hypercall(struct cpu_user_regs *pregs);
kfraser@10892 124
kfraser@11164 125 void hvm_hlt(unsigned long rflags);
kfraser@13242 126 void hvm_triple_fault(void);
kfraser@11164 127
keir@18026 128 extern int opt_softtsc;
keir@18026 129 void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
keir@18026 130
keir@17090 131 /* These functions all return X86EMUL return codes. */
keir@16012 132 int hvm_set_efer(uint64_t value);
kfraser@15734 133 int hvm_set_cr0(unsigned long value);
kfraser@15727 134 int hvm_set_cr3(unsigned long value);
kfraser@15729 135 int hvm_set_cr4(unsigned long value);
keir@17476 136 int hvm_msr_read_intercept(struct cpu_user_regs *regs);
keir@17476 137 int hvm_msr_write_intercept(struct cpu_user_regs *regs);
kfraser@15727 138
kaf24@8708 139 #endif /* __ASM_X86_HVM_SUPPORT_H__ */