extern char hypercall_page[];
static struct rangeset *mem;
+DEFINE_PER_CPU(unsigned int, vcpu_id);
+
static void __init find_xen_leaves(void)
{
uint32_t eax, ebx, ecx, edx, base;
write_atomic(&XEN_shared_info->evtchn_mask[i], ~0ul);
}
+static void set_vcpu_id(void)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ ASSERT(xen_cpuid_base);
+
+ /* Fetch vcpu id from cpuid. */
+ cpuid(xen_cpuid_base + 4, &eax, &ebx, &ecx, &edx);
+ if ( eax & XEN_HVM_CPUID_VCPU_ID_PRESENT )
+ this_cpu(vcpu_id) = ebx;
+ else
+ this_cpu(vcpu_id) = smp_processor_id();
+}
+
static void __init init_memmap(void)
{
unsigned int i;
init_memmap();
map_shared_info();
+
+ set_vcpu_id();
+}
+
+void hypervisor_ap_setup(void)
+{
+ set_vcpu_id();
}
int hypervisor_alloc_unused_page(mfn_t *mfn)
#include <asm/desc.h>
#include <asm/div64.h>
#include <asm/flushtlb.h>
+#include <asm/guest.h>
#include <asm/msr.h>
#include <asm/mtrr.h>
#include <asm/time.h>
cpumask_set_cpu(cpu, &cpu_online_map);
unlock_vector_lock();
+ if ( xen_guest )
+ hypervisor_ap_setup();
+
/* We can take interrupts now: we're officially "up". */
local_irq_enable();
mtrr_ap_init();
void probe_hypervisor(void);
void hypervisor_setup(void);
+void hypervisor_ap_setup(void);
int hypervisor_alloc_unused_page(mfn_t *mfn);
int hypervisor_free_unused_page(mfn_t mfn);
+DECLARE_PER_CPU(unsigned int, vcpu_id);
+
#else
#define xen_guest 0
{
ASSERT_UNREACHABLE();
}
+static inline void hypervisor_ap_setup(void)
+{
+ ASSERT_UNREACHABLE();
+}
#endif /* CONFIG_XEN_GUEST */
#endif /* __X86_GUEST_XEN_H__ */