ia64/xen-unstable

annotate xen/arch/x86/x86_64/asm-offsets.c @ 16468:9f61a0add5b6

x86_emulate: Emulate CPUID and HLT.
vmx realmode: Fix decode & emulate loop, add hooks for CPUID, HLT and
WBINVD. Also do not hook realmode entry off of vmentry failure any
more.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Nov 26 15:32:54 2007 +0000 (2007-11-26)
parents a7f8ff1ca311
children 2324110ef2c6
rev   line source
kaf24@3276 1 /*
kaf24@3276 2 * Generate definitions needed by assembly language modules.
kaf24@3276 3 * This code generates raw asm output which is post-processed
kaf24@3276 4 * to extract and format the required data.
kaf24@3276 5 */
kaf24@3276 6
kaf24@3958 7 #include <xen/config.h>
kaf24@4196 8 #include <xen/perfc.h>
kaf24@3276 9 #include <xen/sched.h>
ack@13292 10 #ifdef CONFIG_COMPAT
ack@13292 11 #include <compat/xen.h>
ack@13292 12 #endif
cl349@5287 13 #include <asm/fixmap.h>
cl349@5285 14 #include <asm/hardirq.h>
kaf24@3276 15
kaf24@3276 16 #define DEFINE(_sym, _val) \
kaf24@3276 17 __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
kaf24@3276 18 #define BLANK() \
kaf24@3276 19 __asm__ __volatile__ ( "\n->" : : )
kaf24@3276 20 #define OFFSET(_sym, _str, _mem) \
kaf24@3276 21 DEFINE(_sym, offsetof(_str, _mem));
kaf24@3276 22
kaf24@4593 23 /* base-2 logarithm */
kaf24@4593 24 #define __L2(_x) (((_x) & 0x00000002) ? 1 : 0)
kaf24@4593 25 #define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
kaf24@4593 26 #define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
kaf24@4593 27 #define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
kaf24@4593 28 #define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
kaf24@4593 29
kaf24@3276 30 void __dummy__(void)
kaf24@3276 31 {
kaf24@4683 32 OFFSET(UREGS_r15, struct cpu_user_regs, r15);
kaf24@4683 33 OFFSET(UREGS_r14, struct cpu_user_regs, r14);
kaf24@4683 34 OFFSET(UREGS_r13, struct cpu_user_regs, r13);
kaf24@4683 35 OFFSET(UREGS_r12, struct cpu_user_regs, r12);
kaf24@4683 36 OFFSET(UREGS_rbp, struct cpu_user_regs, rbp);
kaf24@4683 37 OFFSET(UREGS_rbx, struct cpu_user_regs, rbx);
kaf24@4683 38 OFFSET(UREGS_r11, struct cpu_user_regs, r11);
kaf24@4683 39 OFFSET(UREGS_r10, struct cpu_user_regs, r10);
kaf24@4683 40 OFFSET(UREGS_r9, struct cpu_user_regs, r9);
kaf24@4683 41 OFFSET(UREGS_r8, struct cpu_user_regs, r8);
kaf24@4683 42 OFFSET(UREGS_rax, struct cpu_user_regs, rax);
kaf24@4683 43 OFFSET(UREGS_rcx, struct cpu_user_regs, rcx);
kaf24@4683 44 OFFSET(UREGS_rdx, struct cpu_user_regs, rdx);
kaf24@4683 45 OFFSET(UREGS_rsi, struct cpu_user_regs, rsi);
kaf24@4683 46 OFFSET(UREGS_rdi, struct cpu_user_regs, rdi);
kaf24@4683 47 OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
kaf24@4683 48 OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
kaf24@4683 49 OFFSET(UREGS_rip, struct cpu_user_regs, rip);
kaf24@4683 50 OFFSET(UREGS_cs, struct cpu_user_regs, cs);
kaf24@4683 51 OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
kaf24@4683 52 OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
kaf24@4683 53 OFFSET(UREGS_ss, struct cpu_user_regs, ss);
kaf24@4683 54 OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
kaf24@4683 55 DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
kaf24@3276 56 BLANK();
kaf24@3276 57
kaf24@5289 58 OFFSET(VCPU_processor, struct vcpu, processor);
ack@13289 59 OFFSET(VCPU_domain, struct vcpu, domain);
kaf24@5289 60 OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
kaf24@5289 61 OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
ian@14476 62 OFFSET(VCPU_int80_bounce, struct vcpu, arch.int80_bounce);
kaf24@5289 63 OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
kaf24@5289 64 OFFSET(VCPU_event_addr, struct vcpu,
kaf24@4689 65 arch.guest_context.event_callback_eip);
ack@13288 66 OFFSET(VCPU_event_sel, struct vcpu,
ack@13288 67 arch.guest_context.event_callback_cs);
kaf24@5289 68 OFFSET(VCPU_failsafe_addr, struct vcpu,
kaf24@4689 69 arch.guest_context.failsafe_callback_eip);
ack@13288 70 OFFSET(VCPU_failsafe_sel, struct vcpu,
ack@13288 71 arch.guest_context.failsafe_callback_cs);
kaf24@5289 72 OFFSET(VCPU_syscall_addr, struct vcpu,
kaf24@4689 73 arch.guest_context.syscall_callback_eip);
keir@16207 74 OFFSET(VCPU_syscall32_addr, struct vcpu, arch.syscall32_callback_eip);
keir@16207 75 OFFSET(VCPU_syscall32_sel, struct vcpu, arch.syscall32_callback_cs);
keir@16207 76 OFFSET(VCPU_syscall32_disables_events, struct vcpu,
keir@16207 77 arch.syscall32_disables_events);
keir@16207 78 OFFSET(VCPU_sysenter_addr, struct vcpu, arch.sysenter_callback_eip);
keir@16207 79 OFFSET(VCPU_sysenter_sel, struct vcpu, arch.sysenter_callback_cs);
keir@16207 80 OFFSET(VCPU_sysenter_disables_events, struct vcpu,
keir@16207 81 arch.sysenter_disables_events);
keir@16207 82 OFFSET(VCPU_gp_fault_addr, struct vcpu,
keir@16207 83 arch.guest_context.trap_ctxt[TRAP_gp_fault].address);
keir@16207 84 OFFSET(VCPU_gp_fault_sel, struct vcpu,
keir@16207 85 arch.guest_context.trap_ctxt[TRAP_gp_fault].cs);
ack@13288 86 OFFSET(VCPU_kernel_sp, struct vcpu, arch.guest_context.kernel_sp);
ack@13288 87 OFFSET(VCPU_kernel_ss, struct vcpu, arch.guest_context.kernel_ss);
kaf24@10305 88 OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
kfraser@14661 89 OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
kfraser@14661 90 OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
kaf24@10305 91 DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
kaf24@10305 92 DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events);
kaf24@3276 93 BLANK();
kaf24@3276 94
kaf24@8708 95 OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
kaf24@8708 96 OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
keir@15090 97 OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
kaf24@8708 98 BLANK();
kaf24@8708 99
kaf24@10356 100 OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
keir@16468 101 OFFSET(VCPU_hvm_guest_cr0, struct vcpu, arch.hvm_vcpu.guest_cr[0]);
kfraser@15727 102 OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
kaf24@10356 103 BLANK();
kaf24@10356 104
kfraser@14974 105 OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
ack@13289 106 BLANK();
ack@13289 107
kaf24@8708 108 OFFSET(VMCB_rax, struct vmcb_struct, rax);
kfraser@15934 109 OFFSET(VMCB_rip, struct vmcb_struct, rip);
kfraser@15934 110 OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
kfraser@15934 111 OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
kaf24@8708 112 BLANK();
kaf24@8708 113
ack@13292 114 OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
ack@13292 115 OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
kaf24@3276 116 BLANK();
kaf24@3276 117
ack@13292 118 #ifdef CONFIG_COMPAT
ack@13292 119 OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, evtchn_upcall_pending);
ack@13292 120 OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, evtchn_upcall_mask);
ack@13292 121 BLANK();
ack@13292 122 #endif
ack@13292 123
ack@13289 124 OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
kaf24@6470 125 DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
kaf24@6470 126 BLANK();
kaf24@6470 127
kaf24@3276 128 OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
kaf24@3276 129 OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
kaf24@3276 130 OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
kaf24@3276 131 OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
kaf24@3276 132 BLANK();
kaf24@3276 133
kaf24@3958 134 #if PERF_COUNTERS
kfraser@14594 135 DEFINE(PERFC_hypercalls, PERFC_hypercalls);
kfraser@14594 136 DEFINE(PERFC_exceptions, PERFC_exceptions);
ack@13301 137 BLANK();
ack@13301 138 #endif
ack@13301 139
kaf24@4593 140 DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
keir@16378 141 BLANK();
keir@16378 142
keir@16378 143 OFFSET(CPUINFO_ext_features, struct cpuinfo_x86, x86_capability[1]);
kaf24@3276 144 }