ia64/xen-unstable

view xen/include/asm-x86/hypercall.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents f4390e34ad12
children ec10c9a2d976
line source
1 /******************************************************************************
2 * asm-x86/hypercall.h
3 */
5 #ifndef __ASM_X86_HYPERCALL_H__
6 #define __ASM_X86_HYPERCALL_H__
8 #include <public/physdev.h>
9 #include <xen/types.h>
11 /*
12 * Both do_mmuext_op() and do_mmu_update():
13 * We steal the m.s.b. of the @count parameter to indicate whether this
14 * invocation of do_mmu_update() is resuming a previously preempted call.
15 */
16 #define MMU_UPDATE_PREEMPTED (~(~0U>>1))
18 /*
19 * This gets set to a non-zero value whenever hypercall_create_continuation()
20 * is used (outside of multicall context; in multicall context the second call
21 * from do_multicall() itself will have this effect). Internal callers of
22 * hypercall handlers interested in this condition must clear the flag prior
23 * to invoking the respective handler(s).
24 */
25 DECLARE_PER_CPU(char, hc_preempted);
27 extern long
28 do_event_channel_op_compat(
29 XEN_GUEST_HANDLE(evtchn_op_t) uop);
31 extern long
32 do_physdev_op_compat(
33 XEN_GUEST_HANDLE(physdev_op_t) uop);
35 extern long
36 do_set_trap_table(
37 XEN_GUEST_HANDLE(trap_info_t) traps);
39 extern int
40 do_mmu_update(
41 XEN_GUEST_HANDLE(mmu_update_t) ureqs,
42 unsigned int count,
43 XEN_GUEST_HANDLE(uint) pdone,
44 unsigned int foreigndom);
46 extern long
47 do_set_gdt(
48 XEN_GUEST_HANDLE(ulong) frame_list,
49 unsigned int entries);
51 extern long
52 do_stack_switch(
53 unsigned long ss,
54 unsigned long esp);
56 extern long
57 do_fpu_taskswitch(
58 int set);
60 extern long
61 do_set_debugreg(
62 int reg,
63 unsigned long value);
65 extern unsigned long
66 do_get_debugreg(
67 int reg);
69 extern long
70 do_update_descriptor(
71 u64 pa,
72 u64 desc);
74 extern int
75 do_update_va_mapping(
76 unsigned long va,
77 u64 val64,
78 unsigned long flags);
80 extern long
81 do_physdev_op(
82 int cmd, XEN_GUEST_HANDLE(void) arg);
84 extern int
85 do_update_va_mapping_otherdomain(
86 unsigned long va,
87 u64 val64,
88 unsigned long flags,
89 domid_t domid);
91 extern int
92 do_mmuext_op(
93 XEN_GUEST_HANDLE(mmuext_op_t) uops,
94 unsigned int count,
95 XEN_GUEST_HANDLE(uint) pdone,
96 unsigned int foreigndom);
98 extern unsigned long
99 do_iret(
100 void);
102 struct vcpu;
103 extern long
104 arch_do_vcpu_op(
105 int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg);
107 extern int
108 do_kexec(
109 unsigned long op, unsigned arg1, XEN_GUEST_HANDLE(void) uarg);
111 #ifdef __x86_64__
113 extern long
114 do_set_callbacks(
115 unsigned long event_address,
116 unsigned long failsafe_address,
117 unsigned long syscall_address);
119 extern long
120 do_set_segment_base(
121 unsigned int which,
122 unsigned long base);
124 #else
126 extern long
127 do_set_callbacks(
128 unsigned long event_selector,
129 unsigned long event_address,
130 unsigned long failsafe_selector,
131 unsigned long failsafe_address);
133 #endif
135 #ifdef CONFIG_COMPAT
137 extern int
138 compat_physdev_op(
139 int cmd,
140 XEN_GUEST_HANDLE(void) arg);
142 extern int
143 arch_compat_vcpu_op(
144 int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg);
146 #endif
148 #endif /* __ASM_X86_HYPERCALL_H__ */