ia64/xen-unstable

view xen/arch/x86/x86_32/asm-offsets.c @ 11756:eee52c100996

[XEN] Remove unused MULTICALL_arg6, and unnecessary use of STR() macro.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kfraser@localhost.localdomain
date Thu Oct 05 16:36:39 2006 +0100 (2006-10-05)
parents d20e1835c24b
children 1c506820c468
line source
1 /*
2 * Generate definitions needed by assembly language modules.
3 * This code generates raw asm output which is post-processed
4 * to extract and format the required data.
5 */
7 #include <xen/config.h>
8 #include <xen/perfc.h>
9 #include <xen/sched.h>
10 #include <asm/fixmap.h>
11 #include <asm/hardirq.h>
13 #define DEFINE(_sym, _val) \
14 __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
15 #define BLANK() \
16 __asm__ __volatile__ ( "\n->" : : )
17 #define OFFSET(_sym, _str, _mem) \
18 DEFINE(_sym, offsetof(_str, _mem));
20 /* base-2 logarithm */
21 #define __L2(_x) (((_x) & 0x00000002) ? 1 : 0)
22 #define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
23 #define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
24 #define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
25 #define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
27 void __dummy__(void)
28 {
29 OFFSET(UREGS_eax, struct cpu_user_regs, eax);
30 OFFSET(UREGS_ebx, struct cpu_user_regs, ebx);
31 OFFSET(UREGS_ecx, struct cpu_user_regs, ecx);
32 OFFSET(UREGS_edx, struct cpu_user_regs, edx);
33 OFFSET(UREGS_esi, struct cpu_user_regs, esi);
34 OFFSET(UREGS_edi, struct cpu_user_regs, edi);
35 OFFSET(UREGS_esp, struct cpu_user_regs, esp);
36 OFFSET(UREGS_ebp, struct cpu_user_regs, ebp);
37 OFFSET(UREGS_eip, struct cpu_user_regs, eip);
38 OFFSET(UREGS_cs, struct cpu_user_regs, cs);
39 OFFSET(UREGS_ds, struct cpu_user_regs, ds);
40 OFFSET(UREGS_es, struct cpu_user_regs, es);
41 OFFSET(UREGS_fs, struct cpu_user_regs, fs);
42 OFFSET(UREGS_gs, struct cpu_user_regs, gs);
43 OFFSET(UREGS_ss, struct cpu_user_regs, ss);
44 OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
45 OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
46 OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
47 OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
48 DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
49 BLANK();
51 OFFSET(VCPU_processor, struct vcpu, processor);
52 OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
53 OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
54 OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
55 OFFSET(VCPU_event_sel, struct vcpu,
56 arch.guest_context.event_callback_cs);
57 OFFSET(VCPU_event_addr, struct vcpu,
58 arch.guest_context.event_callback_eip);
59 OFFSET(VCPU_failsafe_sel, struct vcpu,
60 arch.guest_context.failsafe_callback_cs);
61 OFFSET(VCPU_failsafe_addr, struct vcpu,
62 arch.guest_context.failsafe_callback_eip);
63 OFFSET(VCPU_kernel_ss, struct vcpu,
64 arch.guest_context.kernel_ss);
65 OFFSET(VCPU_kernel_sp, struct vcpu,
66 arch.guest_context.kernel_sp);
67 OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
68 OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
69 OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
70 OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
71 DEFINE(_VCPUF_nmi_pending, _VCPUF_nmi_pending);
72 DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
73 DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
74 BLANK();
76 OFFSET(TSS_ss0, struct tss_struct, ss0);
77 OFFSET(TSS_esp0, struct tss_struct, esp0);
78 OFFSET(TSS_ss1, struct tss_struct, ss1);
79 OFFSET(TSS_esp1, struct tss_struct, esp1);
80 DEFINE(TSS_sizeof, sizeof(struct tss_struct));
81 BLANK();
83 OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
84 OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
85 OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc);
86 BLANK();
88 OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
89 BLANK();
91 OFFSET(VMCB_rax, struct vmcb_struct, rax);
92 OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset);
93 BLANK();
95 OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
96 OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask);
97 BLANK();
99 DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
100 BLANK();
102 OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
103 OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
104 OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
105 OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
106 BLANK();
108 #if PERF_COUNTERS
109 OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
110 OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
111 BLANK();
112 #endif
114 OFFSET(MULTICALL_op, struct multicall_entry, op);
115 OFFSET(MULTICALL_arg0, struct multicall_entry, args[0]);
116 OFFSET(MULTICALL_arg1, struct multicall_entry, args[1]);
117 OFFSET(MULTICALL_arg2, struct multicall_entry, args[2]);
118 OFFSET(MULTICALL_arg3, struct multicall_entry, args[3]);
119 OFFSET(MULTICALL_arg4, struct multicall_entry, args[4]);
120 OFFSET(MULTICALL_arg5, struct multicall_entry, args[5]);
121 OFFSET(MULTICALL_result, struct multicall_entry, result);
122 BLANK();
124 DEFINE(FIXMAP_apic_base, fix_to_virt(FIX_APIC_BASE));
125 BLANK();
127 DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
128 }