direct-io.hg

view xen/arch/x86/x86_64/asm-offsets.c @ 14445:522a1cd17b6d

[XEN] Implement faster int 0x80 handling for compat mode guests.

Using the GPF handler to spot the software interrupt and pass it back
to the guest increases the base syscall time by a factor of 2.7
compared with 32on32 using direct trap to ring 1. (0.3270->0.8680
microseconds, measured with lmbench lat_syscall).

Since the 64 bit IDT can only contain 64 bit segment selectors we
cannot trap directly to compat mode ring 1. However implementing a
dedicated 64 bit ring 0 trap handler allows us to avoid much of the
GPF handler overhead and reduces the overhead to 1.7 times
(0.3270->0.5497 microseconds).

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Ian Campbell <ian.campbell@xensource.com>
date Tue Mar 20 14:33:15 2007 +0000 (2007-03-20)
parents c2a82e026497
children 96f167771979
line source
1 /*
2 * Generate definitions needed by assembly language modules.
3 * This code generates raw asm output which is post-processed
4 * to extract and format the required data.
5 */
7 #include <xen/config.h>
8 #include <xen/perfc.h>
9 #include <xen/sched.h>
10 #ifdef CONFIG_COMPAT
11 #include <compat/xen.h>
12 #endif
13 #include <asm/fixmap.h>
14 #include <asm/hardirq.h>
16 #define DEFINE(_sym, _val) \
17 __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
18 #define BLANK() \
19 __asm__ __volatile__ ( "\n->" : : )
20 #define OFFSET(_sym, _str, _mem) \
21 DEFINE(_sym, offsetof(_str, _mem));
23 /* base-2 logarithm */
24 #define __L2(_x) (((_x) & 0x00000002) ? 1 : 0)
25 #define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
26 #define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
27 #define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
28 #define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
30 void __dummy__(void)
31 {
32 OFFSET(UREGS_r15, struct cpu_user_regs, r15);
33 OFFSET(UREGS_r14, struct cpu_user_regs, r14);
34 OFFSET(UREGS_r13, struct cpu_user_regs, r13);
35 OFFSET(UREGS_r12, struct cpu_user_regs, r12);
36 OFFSET(UREGS_rbp, struct cpu_user_regs, rbp);
37 OFFSET(UREGS_rbx, struct cpu_user_regs, rbx);
38 OFFSET(UREGS_r11, struct cpu_user_regs, r11);
39 OFFSET(UREGS_r10, struct cpu_user_regs, r10);
40 OFFSET(UREGS_r9, struct cpu_user_regs, r9);
41 OFFSET(UREGS_r8, struct cpu_user_regs, r8);
42 OFFSET(UREGS_rax, struct cpu_user_regs, rax);
43 OFFSET(UREGS_rcx, struct cpu_user_regs, rcx);
44 OFFSET(UREGS_rdx, struct cpu_user_regs, rdx);
45 OFFSET(UREGS_rsi, struct cpu_user_regs, rsi);
46 OFFSET(UREGS_rdi, struct cpu_user_regs, rdi);
47 OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
48 OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
49 OFFSET(UREGS_rip, struct cpu_user_regs, rip);
50 OFFSET(UREGS_cs, struct cpu_user_regs, cs);
51 OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
52 OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
53 OFFSET(UREGS_ss, struct cpu_user_regs, ss);
54 OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
55 DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
56 BLANK();
58 OFFSET(VCPU_processor, struct vcpu, processor);
59 OFFSET(VCPU_domain, struct vcpu, domain);
60 OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
61 OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
62 OFFSET(VCPU_int80_bounce, struct vcpu, arch.int80_bounce);
63 OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
64 OFFSET(VCPU_event_addr, struct vcpu,
65 arch.guest_context.event_callback_eip);
66 OFFSET(VCPU_event_sel, struct vcpu,
67 arch.guest_context.event_callback_cs);
68 OFFSET(VCPU_failsafe_addr, struct vcpu,
69 arch.guest_context.failsafe_callback_eip);
70 OFFSET(VCPU_failsafe_sel, struct vcpu,
71 arch.guest_context.failsafe_callback_cs);
72 OFFSET(VCPU_syscall_addr, struct vcpu,
73 arch.guest_context.syscall_callback_eip);
74 OFFSET(VCPU_kernel_sp, struct vcpu, arch.guest_context.kernel_sp);
75 OFFSET(VCPU_kernel_ss, struct vcpu, arch.guest_context.kernel_ss);
76 OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
77 OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
78 OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
79 OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
80 DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending);
81 DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
82 DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
83 DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events);
84 BLANK();
86 OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
87 OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
88 OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc);
89 BLANK();
91 OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
92 OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
93 BLANK();
95 OFFSET(DOMAIN_domain_flags, struct domain, domain_flags);
96 DEFINE(_DOMF_compat, _DOMF_compat);
97 BLANK();
99 OFFSET(VMCB_rax, struct vmcb_struct, rax);
100 OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset);
101 BLANK();
103 OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
104 OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
105 BLANK();
107 #ifdef CONFIG_COMPAT
108 OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, evtchn_upcall_pending);
109 OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, evtchn_upcall_mask);
110 BLANK();
111 #endif
113 OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
114 DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
115 BLANK();
117 OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
118 OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
119 OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
120 OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
121 BLANK();
123 #if PERF_COUNTERS
124 OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
125 OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
126 BLANK();
127 #endif
129 OFFSET(MULTICALL_op, struct multicall_entry, op);
130 OFFSET(MULTICALL_arg0, struct multicall_entry, args[0]);
131 OFFSET(MULTICALL_arg1, struct multicall_entry, args[1]);
132 OFFSET(MULTICALL_arg2, struct multicall_entry, args[2]);
133 OFFSET(MULTICALL_arg3, struct multicall_entry, args[3]);
134 OFFSET(MULTICALL_arg4, struct multicall_entry, args[4]);
135 OFFSET(MULTICALL_arg5, struct multicall_entry, args[5]);
136 OFFSET(MULTICALL_result, struct multicall_entry, result);
137 BLANK();
139 #ifdef CONFIG_COMPAT
140 OFFSET(COMPAT_MULTICALL_op, struct compat_multicall_entry, op);
141 OFFSET(COMPAT_MULTICALL_arg0, struct compat_multicall_entry, args[0]);
142 OFFSET(COMPAT_MULTICALL_arg1, struct compat_multicall_entry, args[1]);
143 OFFSET(COMPAT_MULTICALL_arg2, struct compat_multicall_entry, args[2]);
144 OFFSET(COMPAT_MULTICALL_arg3, struct compat_multicall_entry, args[3]);
145 OFFSET(COMPAT_MULTICALL_arg4, struct compat_multicall_entry, args[4]);
146 OFFSET(COMPAT_MULTICALL_arg5, struct compat_multicall_entry, args[5]);
147 OFFSET(COMPAT_MULTICALL_result, struct compat_multicall_entry, result);
148 BLANK();
149 #endif
151 DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
152 }