ia64/xen-unstable

view xen/arch/x86/x86_64/asm-offsets.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 2941b1a97c60
children
line source
1 /*
2 * Generate definitions needed by assembly language modules.
3 * This code generates raw asm output which is post-processed
4 * to extract and format the required data.
5 */
7 #include <xen/config.h>
8 #include <xen/perfc.h>
9 #include <xen/sched.h>
10 #ifdef CONFIG_COMPAT
11 #include <compat/xen.h>
12 #endif
13 #include <asm/fixmap.h>
14 #include <asm/hardirq.h>
16 #define DEFINE(_sym, _val) \
17 __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
18 #define BLANK() \
19 __asm__ __volatile__ ( "\n->" : : )
20 #define OFFSET(_sym, _str, _mem) \
21 DEFINE(_sym, offsetof(_str, _mem));
23 /* base-2 logarithm */
24 #define __L2(_x) (((_x) & 0x00000002) ? 1 : 0)
25 #define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
26 #define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
27 #define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
28 #define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
30 void __dummy__(void)
31 {
32 OFFSET(UREGS_r15, struct cpu_user_regs, r15);
33 OFFSET(UREGS_r14, struct cpu_user_regs, r14);
34 OFFSET(UREGS_r13, struct cpu_user_regs, r13);
35 OFFSET(UREGS_r12, struct cpu_user_regs, r12);
36 OFFSET(UREGS_rbp, struct cpu_user_regs, rbp);
37 OFFSET(UREGS_rbx, struct cpu_user_regs, rbx);
38 OFFSET(UREGS_r11, struct cpu_user_regs, r11);
39 OFFSET(UREGS_r10, struct cpu_user_regs, r10);
40 OFFSET(UREGS_r9, struct cpu_user_regs, r9);
41 OFFSET(UREGS_r8, struct cpu_user_regs, r8);
42 OFFSET(UREGS_rax, struct cpu_user_regs, rax);
43 OFFSET(UREGS_rcx, struct cpu_user_regs, rcx);
44 OFFSET(UREGS_rdx, struct cpu_user_regs, rdx);
45 OFFSET(UREGS_rsi, struct cpu_user_regs, rsi);
46 OFFSET(UREGS_rdi, struct cpu_user_regs, rdi);
47 OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
48 OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
49 OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
50 OFFSET(UREGS_rip, struct cpu_user_regs, rip);
51 OFFSET(UREGS_cs, struct cpu_user_regs, cs);
52 OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
53 OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
54 OFFSET(UREGS_ss, struct cpu_user_regs, ss);
55 OFFSET(UREGS_ds, struct cpu_user_regs, ds);
56 OFFSET(UREGS_es, struct cpu_user_regs, es);
57 OFFSET(UREGS_fs, struct cpu_user_regs, fs);
58 OFFSET(UREGS_gs, struct cpu_user_regs, gs);
59 OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
60 DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
61 BLANK();
63 OFFSET(irq_caps_offset, struct domain, irq_caps);
64 OFFSET(next_in_list_offset, struct domain, next_in_list);
65 OFFSET(VCPU_processor, struct vcpu, processor);
66 OFFSET(VCPU_domain, struct vcpu, domain);
67 OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
68 OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
69 OFFSET(VCPU_int80_bounce, struct vcpu, arch.int80_bounce);
70 OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
71 OFFSET(VCPU_event_addr, struct vcpu,
72 arch.guest_context.event_callback_eip);
73 OFFSET(VCPU_event_sel, struct vcpu,
74 arch.guest_context.event_callback_cs);
75 OFFSET(VCPU_failsafe_addr, struct vcpu,
76 arch.guest_context.failsafe_callback_eip);
77 OFFSET(VCPU_failsafe_sel, struct vcpu,
78 arch.guest_context.failsafe_callback_cs);
79 OFFSET(VCPU_syscall_addr, struct vcpu,
80 arch.guest_context.syscall_callback_eip);
81 OFFSET(VCPU_syscall32_addr, struct vcpu, arch.syscall32_callback_eip);
82 OFFSET(VCPU_syscall32_sel, struct vcpu, arch.syscall32_callback_cs);
83 OFFSET(VCPU_syscall32_disables_events, struct vcpu,
84 arch.syscall32_disables_events);
85 OFFSET(VCPU_sysenter_addr, struct vcpu, arch.sysenter_callback_eip);
86 OFFSET(VCPU_sysenter_sel, struct vcpu, arch.sysenter_callback_cs);
87 OFFSET(VCPU_sysenter_disables_events, struct vcpu,
88 arch.sysenter_disables_events);
89 OFFSET(VCPU_gp_fault_addr, struct vcpu,
90 arch.guest_context.trap_ctxt[TRAP_gp_fault].address);
91 OFFSET(VCPU_gp_fault_sel, struct vcpu,
92 arch.guest_context.trap_ctxt[TRAP_gp_fault].cs);
93 OFFSET(VCPU_kernel_sp, struct vcpu, arch.guest_context.kernel_sp);
94 OFFSET(VCPU_kernel_ss, struct vcpu, arch.guest_context.kernel_ss);
95 OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
96 OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
97 OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
98 OFFSET(VCPU_old_trap_priority, struct vcpu, old_trap_priority);
99 OFFSET(VCPU_trap_priority, struct vcpu, trap_priority);
100 DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
101 DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
102 DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
103 DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events);
104 BLANK();
106 OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
107 OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
108 OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
109 BLANK();
111 OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
112 OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm_vmx.vmx_realmode);
113 OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm_vmx.vmx_emulate);
114 OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm_vmx.vm86_segment_mask);
115 OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
116 BLANK();
118 OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
119 BLANK();
121 OFFSET(VMCB_rax, struct vmcb_struct, rax);
122 OFFSET(VMCB_rip, struct vmcb_struct, rip);
123 OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
124 OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
125 BLANK();
127 OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
128 OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
129 BLANK();
131 #ifdef CONFIG_COMPAT
132 OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, evtchn_upcall_pending);
133 OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, evtchn_upcall_mask);
134 BLANK();
135 #endif
137 OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
138 DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
139 BLANK();
141 OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
142 OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
143 OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
144 OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
145 BLANK();
147 #if PERF_COUNTERS
148 DEFINE(PERFC_hypercalls, PERFC_hypercalls);
149 DEFINE(PERFC_exceptions, PERFC_exceptions);
150 BLANK();
151 #endif
153 DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
154 BLANK();
156 OFFSET(CPUINFO_ext_features, struct cpuinfo_x86, x86_capability[1]);
157 }