ia64/xen-unstable

view xen/include/asm-x86/hvm/support.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents f454f2cac170
children
line source
1 /*
2 * support.h: HVM support routines used by VT-x and SVM.
3 *
4 * Leendert van Doorn, leendert@watson.ibm.com
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #ifndef __ASM_X86_HVM_SUPPORT_H__
22 #define __ASM_X86_HVM_SUPPORT_H__
24 #include <xen/sched.h>
25 #include <xen/hvm/save.h>
26 #include <asm/types.h>
27 #include <asm/regs.h>
28 #include <asm/processor.h>
30 static inline vcpu_iodata_t *get_ioreq(struct vcpu *v)
31 {
32 struct domain *d = v->domain;
33 shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
34 ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
35 ASSERT(d->arch.hvm_domain.ioreq.va != NULL);
36 return &p->vcpu_iodata[v->vcpu_id];
37 }
39 #define HVM_DELIVER_NO_ERROR_CODE -1
41 #ifndef NDEBUG
42 #define DBG_LEVEL_0 (1 << 0)
43 #define DBG_LEVEL_1 (1 << 1)
44 #define DBG_LEVEL_2 (1 << 2)
45 #define DBG_LEVEL_3 (1 << 3)
46 #define DBG_LEVEL_IO (1 << 4)
47 #define DBG_LEVEL_VMMU (1 << 5)
48 #define DBG_LEVEL_VLAPIC (1 << 6)
49 #define DBG_LEVEL_VLAPIC_TIMER (1 << 7)
50 #define DBG_LEVEL_VLAPIC_INTERRUPT (1 << 8)
51 #define DBG_LEVEL_IOAPIC (1 << 9)
52 #define DBG_LEVEL_HCALL (1 << 10)
53 #define DBG_LEVEL_MSR (1 << 11)
55 extern unsigned int opt_hvm_debug_level;
56 #define HVM_DBG_LOG(level, _f, _a...) \
57 do { \
58 if ( unlikely((level) & opt_hvm_debug_level) ) \
59 printk("[HVM:%d.%d] <%s> " _f "\n", \
60 current->domain->domain_id, current->vcpu_id, __func__, \
61 ## _a); \
62 } while (0)
63 #else
64 #define HVM_DBG_LOG(level, _f, _a...)
65 #endif
67 extern unsigned long hvm_io_bitmap[];
69 void hvm_enable(struct hvm_function_table *);
71 enum hvm_copy_result {
72 HVMCOPY_okay = 0,
73 HVMCOPY_bad_gva_to_gfn,
74 HVMCOPY_bad_gfn_to_mfn
75 };
77 /*
78 * Copy to/from a guest physical address.
79 * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical
80 * address range does not map entirely onto ordinary machine memory.
81 */
82 enum hvm_copy_result hvm_copy_to_guest_phys(
83 paddr_t paddr, void *buf, int size);
84 enum hvm_copy_result hvm_copy_from_guest_phys(
85 void *buf, paddr_t paddr, int size);
87 /*
88 * Copy to/from a guest virtual address. @pfec should include PFEC_user_mode
89 * if emulating a user-mode access (CPL=3). All other flags in @pfec are
90 * managed by the called function: it is therefore optional for the caller
91 * to set them.
92 *
93 * Returns:
94 * HVMCOPY_okay: Copy was entirely successful.
95 * HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
96 * ordinary machine memory.
97 * HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
98 * mapping to a guest physical address. In this case
99 * a page fault exception is automatically queued
100 * for injection into the current HVM VCPU.
101 */
102 enum hvm_copy_result hvm_copy_to_guest_virt(
103 unsigned long vaddr, void *buf, int size, uint32_t pfec);
104 enum hvm_copy_result hvm_copy_from_guest_virt(
105 void *buf, unsigned long vaddr, int size, uint32_t pfec);
106 enum hvm_copy_result hvm_fetch_from_guest_virt(
107 void *buf, unsigned long vaddr, int size, uint32_t pfec);
109 /*
110 * As above (copy to/from a guest virtual address), but no fault is generated
111 * when HVMCOPY_bad_gva_to_gfn is returned.
112 */
113 enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
114 unsigned long vaddr, void *buf, int size, uint32_t pfec);
115 enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
116 void *buf, unsigned long vaddr, int size, uint32_t pfec);
117 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
118 void *buf, unsigned long vaddr, int size, uint32_t pfec);
120 #define HVM_HCALL_completed 0 /* hypercall completed - no further action */
121 #define HVM_HCALL_preempted 1 /* hypercall preempted - re-execute VMCALL */
122 #define HVM_HCALL_invalidate 2 /* invalidate ioemu-dm memory cache */
123 int hvm_do_hypercall(struct cpu_user_regs *pregs);
125 void hvm_hlt(unsigned long rflags);
126 void hvm_triple_fault(void);
128 extern int opt_softtsc;
129 void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
131 /* These functions all return X86EMUL return codes. */
132 int hvm_set_efer(uint64_t value);
133 int hvm_set_cr0(unsigned long value);
134 int hvm_set_cr3(unsigned long value);
135 int hvm_set_cr4(unsigned long value);
136 int hvm_msr_read_intercept(struct cpu_user_regs *regs);
137 int hvm_msr_write_intercept(struct cpu_user_regs *regs);
139 #endif /* __ASM_X86_HVM_SUPPORT_H__ */