ia64/xen-unstable

view xen/include/asm-x86/hvm/support.h @ 16662:e818c24cec03

hvm: For functions which translate virtual addresses to machine
addresses, page faults should only be raised when the gva->gfn
translation fails. These should be distinguished from gfn->mfn
translation failures.

The main effect of this is to change the behaviour of functions
derived from __hvm_copy(), which now returns a three-way enumeration,
and also can automatically inject #PF when the gva->gfn translation
fails.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Dec 27 12:00:30 2007 +0000 (2007-12-27)
parents fd3f6d814f6d
children 3f1cf03826fe
line source
1 /*
2 * support.h: HVM support routines used by VT-x and SVM.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_SUPPORT_H__
22 #define __ASM_X86_HVM_SUPPORT_H__
24 #include <xen/sched.h>
25 #include <xen/hvm/save.h>
26 #include <asm/types.h>
27 #include <asm/regs.h>
28 #include <asm/processor.h>
30 #ifndef NDEBUG
31 #define HVM_DEBUG 1
32 #else
33 #define HVM_DEBUG 1
34 #endif
36 static inline vcpu_iodata_t *get_ioreq(struct vcpu *v)
37 {
38 struct domain *d = v->domain;
39 shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
40 ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
41 ASSERT(d->arch.hvm_domain.ioreq.va != NULL);
42 return &p->vcpu_iodata[v->vcpu_id];
43 }
45 /* XXX these are really VMX specific */
46 #define TYPE_MOV_TO_DR (0 << 4)
47 #define TYPE_MOV_FROM_DR (1 << 4)
48 #define TYPE_MOV_TO_CR (0 << 4)
49 #define TYPE_MOV_FROM_CR (1 << 4)
50 #define TYPE_CLTS (2 << 4)
51 #define TYPE_LMSW (3 << 4)
53 #define HVM_DELIVER_NO_ERROR_CODE -1
55 #if HVM_DEBUG
56 #define DBG_LEVEL_0 (1 << 0)
57 #define DBG_LEVEL_1 (1 << 1)
58 #define DBG_LEVEL_2 (1 << 2)
59 #define DBG_LEVEL_3 (1 << 3)
60 #define DBG_LEVEL_IO (1 << 4)
61 #define DBG_LEVEL_VMMU (1 << 5)
62 #define DBG_LEVEL_VLAPIC (1 << 6)
63 #define DBG_LEVEL_VLAPIC_TIMER (1 << 7)
64 #define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8)
65 #define DBG_LEVEL_IOAPIC (1 << 9)
66 #define DBG_LEVEL_HCALL (1 << 10)
67 #define DBG_LEVEL_MSR (1 << 11)
69 extern unsigned int opt_hvm_debug_level;
70 #define HVM_DBG_LOG(level, _f, _a...) \
71 do { \
72 if ( unlikely((level) & opt_hvm_debug_level) ) \
73 printk("[HVM:%d.%d] <%s> " _f "\n", \
74 current->domain->domain_id, current->vcpu_id, __func__, \
75 ## _a); \
76 } while (0)
77 #else
78 #define HVM_DBG_LOG(level, _f, _a...)
79 #endif
81 extern char hvm_io_bitmap[];
83 void hvm_enable(struct hvm_function_table *);
85 enum hvm_copy_result {
86 HVMCOPY_okay = 0,
87 HVMCOPY_bad_gva_to_gfn,
88 HVMCOPY_bad_gfn_to_mfn
89 };
91 /*
92 * Copy to/from a guest physical address.
93 * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical
94 * address range does not map entirely onto ordinary machine memory.
95 */
96 enum hvm_copy_result hvm_copy_to_guest_phys(
97 paddr_t paddr, void *buf, int size);
98 enum hvm_copy_result hvm_copy_from_guest_phys(
99 void *buf, paddr_t paddr, int size);
101 /*
102 * Copy to/from a guest virtual address.
103 * Returns:
104 * HVMCOPY_okay: Copy was entirely successful.
105 * HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
106 * ordinary machine memory.
107 * HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
108 * mapping to a guest physical address. In this case
109 * a page fault exception is automatically queued
110 * for injection into the current HVM VCPU.
111 */
112 enum hvm_copy_result hvm_copy_to_guest_virt(
113 unsigned long vaddr, void *buf, int size);
114 enum hvm_copy_result hvm_copy_from_guest_virt(
115 void *buf, unsigned long vaddr, int size);
116 enum hvm_copy_result hvm_fetch_from_guest_virt(
117 void *buf, unsigned long vaddr, int size);
119 /*
120 * As above (copy to/from a guest virtual address), but no fault is generated
121 * when HVMCOPY_bad_gva_to_gfn is returned.
122 */
123 enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
124 unsigned long vaddr, void *buf, int size);
125 enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
126 void *buf, unsigned long vaddr, int size);
127 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
128 void *buf, unsigned long vaddr, int size);
130 void hvm_print_line(struct vcpu *v, const char c);
131 void hlt_timer_fn(void *data);
133 #define HVM_HCALL_completed 0 /* hypercall completed - no further action */
134 #define HVM_HCALL_preempted 1 /* hypercall preempted - re-execute VMCALL */
135 #define HVM_HCALL_invalidate 2 /* invalidate ioemu-dm memory cache */
136 int hvm_do_hypercall(struct cpu_user_regs *pregs);
138 void hvm_hlt(unsigned long rflags);
139 void hvm_triple_fault(void);
141 int hvm_set_efer(uint64_t value);
142 int hvm_set_cr0(unsigned long value);
143 int hvm_set_cr3(unsigned long value);
144 int hvm_set_cr4(unsigned long value);
146 #endif /* __ASM_X86_HVM_SUPPORT_H__ */