#error BITS_PER_LONG definition absent
#endif
-#ifdef CONFIG_COMPAT
# include <compat/arch-x86/xen-mca.h>
# define xen_mcinfo_msr mcinfo_msr
# undef xen_cpu_offline_action
# undef xen_page_offline_action
# undef xen_mcinfo_recovery
-#else
-# define compat_mc_fetch xen_mc_fetch
-# define compat_mc_physcpuinfo xen_mc_physcpuinfo
-# define compat_handle_is_null guest_handle_is_null
-# define copy_to_compat copy_to_guest
-#endif
/* Machine Check Architecture Hypercall */
long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u_xen_mc)
#include <asm/amd.h>
#include <xen/numa.h>
#include <xen/iommu.h>
-#ifdef CONFIG_COMPAT
#include <compat/vcpu.h>
-#endif
DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
DEFINE_PER_CPU(unsigned long, cr4);
return (hv_cr4 & hv_cr4_mask) | (guest_cr4 & ~hv_cr4_mask);
}
-#ifdef CONFIG_COMPAT
#define xen_vcpu_guest_context vcpu_guest_context
#define fpu_ctxt fpu_ctxt.x
CHECK_FIELD_(struct, vcpu_guest_context, fpu_ctxt);
#undef fpu_ctxt
#undef xen_vcpu_guest_context
-#endif
/* Called by XEN_DOMCTL_setvcpucontext and VCPUOP_initialise. */
int arch_set_info_guest(
* we expect the tools to DTRT even in compat-mode callers. */
compat = is_pv_32on64_domain(d);
-#ifdef CONFIG_COMPAT
#define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
-#else
-#define c(fld) (c.nat->fld)
-#endif
flags = c(flags);
if ( !is_hvm_vcpu(v) )
LDT_ENTRY_SIZE) )
return -EINVAL;
}
-#ifdef CONFIG_COMPAT
else
{
fixup_guest_stack_selector(d, c.cmp->user_regs.ss);
LDT_ENTRY_SIZE) )
return -EINVAL;
}
-#endif
}
v->fpu_initialised = !!(flags & VGCF_I387_VALID);
memcpy(v->arch.pv_vcpu.trap_ctxt, c.nat->trap_ctxt,
sizeof(c.nat->trap_ctxt));
}
-#ifdef CONFIG_COMPAT
else
{
XLAT_cpu_user_regs(&v->arch.user_regs, &c.cmp->user_regs);
XLAT_trap_info(v->arch.pv_vcpu.trap_ctxt + i,
c.cmp->trap_ctxt + i);
}
-#endif
for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
v->arch.debugreg[i] = c(debugreg[i]);
if ( !compat )
rc = (int)set_gdt(v, c.nat->gdt_frames, c.nat->gdt_ents);
-#ifdef CONFIG_COMPAT
else
{
unsigned long gdt_frames[ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames)];
gdt_frames[i] = c.cmp->gdt_frames[i];
rc = (int)set_gdt(v, gdt_frames, c.cmp->gdt_ents);
}
-#endif
if ( rc != 0 )
return rc;
if ( guest_handle_is_null(runstate_guest(v)) )
return;
-#ifdef CONFIG_COMPAT
if ( has_32bit_shinfo(v->domain) )
{
struct compat_vcpu_runstate_info info;
__copy_to_guest(v->runstate_guest.compat, &info, 1);
return;
}
-#endif
__copy_to_guest(runstate_guest(v), &v->runstate, 1);
}
{
__context_switch();
-#ifdef CONFIG_COMPAT
if ( !is_hvm_vcpu(next) &&
(is_idle_vcpu(prev) ||
is_hvm_vcpu(prev) ||
if ( !(efer & EFER_SCE) )
write_efer(efer | EFER_SCE);
}
-#endif
/* Re-enable interrupts before restoring state which may fault. */
local_irq_enable();
return op;
}
-#ifdef CONFIG_COMPAT
int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...)
{
int rc = 0;
return rc;
}
-#endif
static int relinquish_memory(
struct domain *d, struct page_list_head *list, unsigned long type)
switch ( domctl->u.address_size.size )
{
-#ifdef CONFIG_COMPAT
case 32:
ret = switch_compat(d);
break;
case 64:
ret = switch_native(d);
break;
-#endif
default:
ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
break;
return ret;
}
-#ifdef CONFIG_COMPAT
#define xen_vcpu_guest_context vcpu_guest_context
#define fpu_ctxt fpu_ctxt.x
CHECK_FIELD_(struct, vcpu_guest_context, fpu_ctxt);
#undef fpu_ctxt
#undef xen_vcpu_guest_context
-#endif
void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
{
unsigned int i;
bool_t compat = is_pv_32on64_domain(v->domain);
-#ifdef CONFIG_COMPAT
#define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
-#else
-#define c(fld) (c.nat->fld)
-#endif
if ( is_hvm_vcpu(v) )
memset(c.nat, 0, sizeof(*c.nat));
memcpy(c.nat->trap_ctxt, v->arch.pv_vcpu.trap_ctxt,
sizeof(c.nat->trap_ctxt));
}
-#ifdef CONFIG_COMPAT
else
{
XLAT_cpu_user_regs(&c.cmp->user_regs, &v->arch.user_regs);
XLAT_trap_info(c.cmp->trap_ctxt + i,
v->arch.pv_vcpu.trap_ctxt + i);
}
-#endif
for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
c(debugreg[i] = v->arch.debugreg[i]);
c(ldt_ents = v->arch.pv_vcpu.ldt_ents);
for ( i = 0; i < ARRAY_SIZE(v->arch.pv_vcpu.gdt_frames); ++i )
c(gdt_frames[i] = v->arch.pv_vcpu.gdt_frames[i]);
-#ifdef CONFIG_COMPAT
BUILD_BUG_ON(ARRAY_SIZE(c.nat->gdt_frames) !=
ARRAY_SIZE(c.cmp->gdt_frames));
-#endif
for ( ; i < ARRAY_SIZE(c.nat->gdt_frames); ++i )
c(gdt_frames[i] = 0);
c(gdt_ents = v->arch.pv_vcpu.gdt_ents);
c.nat->debugreg[7] |= c.nat->debugreg[5];
c.nat->debugreg[5] = 0;
}
-#ifdef CONFIG_COMPAT
else
{
l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
c.cmp->debugreg[7] |= c.cmp->debugreg[5];
c.cmp->debugreg[5] = 0;
}
-#endif
if ( guest_kernel_mode(v, &v->arch.user_regs) )
c(flags |= VGCF_in_kernel);
v->arch.hvm_vcpu.inject_trap.vector = -1;
-#ifdef CONFIG_COMPAT
rc = setup_compat_arg_xlat(v);
if ( rc != 0 )
goto fail4;
-#endif
rc = hvm_vcpu_cacheattr_init(v);
if ( rc != 0 )
return 0;
fail5:
-#ifdef CONFIG_COMPAT
free_compat_arg_xlat(v);
-#endif
fail4:
nestedhvm_vcpu_destroy(v);
fail3:
{
nestedhvm_vcpu_destroy(v);
-#ifdef CONFIG_COMPAT
free_compat_arg_xlat(v);
-#endif
tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
hvm_vcpu_cacheattr_destroy(v);
*/
asm volatile ( "lgdt %0" : : "m" (gdt_desc) );
-#ifdef CONFIG_COMPAT
if ( is_pv_32on64_domain(dom0) )
{
compat_machine_kexec(image->page_list[1],
image->start_address);
}
else
-#endif
{
relocate_new_kernel_t rnk;
#include <xen/sched.h>
#include <xen/xenoprof.h>
#include <public/xenoprof.h>
-#ifdef CONFIG_COMPAT
#include <compat/xenoprof.h>
-#endif
#include <asm/hvm/support.h>
#include "op_counter.h"
return 0;
}
-#ifdef CONFIG_COMPAT
int compat_oprof_arch_counter(XEN_GUEST_HANDLE(void) arg)
{
struct compat_oprof_counter counter;
return 0;
}
-#endif
int xenoprofile_get_mode(const struct vcpu *v,
const struct cpu_user_regs *regs)
#include <xen/pfn.h>
#include <xen/nodemask.h>
#include <public/version.h>
-#ifdef CONFIG_COMPAT
#include <compat/platform.h>
#include <compat/xen.h>
-#endif
#include <asm/bitops.h>
#include <asm/smp.h>
#include <asm/processor.h>
BUILD_BUG_ON(sizeof(shared_info_t) > PAGE_SIZE);
BUILD_BUG_ON(sizeof(struct vcpu_info) != 64);
-#ifdef CONFIG_COMPAT
BUILD_BUG_ON(sizeof(((struct compat_platform_op *)0)->u) !=
sizeof(((struct compat_platform_op *)0)->u.pad));
BUILD_BUG_ON(sizeof(start_info_compat_t) > PAGE_SIZE);
BUILD_BUG_ON(sizeof(struct compat_vcpu_info) != 64);
-#endif
/* Check definitions in public headers match internal defs. */
BUILD_BUG_ON(__HYPERVISOR_VIRT_START != HYPERVISOR_VIRT_START);
DEFINE_PER_CPU_READ_MOSTLY(u32, ler_msr);
DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, gdt_table);
-#ifdef CONFIG_COMPAT
DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, compat_gdt_table);
-#endif
/* Master table, used by CPU0. */
idt_entry_t idt_table[IDT_ENTRIES];
*reg = xen_pfn_to_cr3(mfn_to_gmfn(
v->domain, mfn));
}
-#ifdef CONFIG_COMPAT
else
{
mfn = l4e_get_pfn(*(l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)));
*reg = compat_pfn_to_cr3(mfn_to_gmfn(
v->domain, mfn));
}
-#endif
/* PTs should not be shared */
BUG_ON(page_get_owner(mfn_to_page(mfn)) == dom_cow);
}
unsigned long gfn;
struct page_info *page;
domain_lock(v->domain);
- if ( !is_pv_32on64_vcpu(v) )
- {
- gfn = xen_cr3_to_pfn(*reg);
-#ifdef CONFIG_COMPAT
- } else {
- gfn = compat_cr3_to_pfn(*reg);
-#endif
- }
+ gfn = !is_pv_32on64_vcpu(v)
+ ? xen_cr3_to_pfn(*reg) : compat_cr3_to_pfn(*reg);
page = get_page_from_gfn(v->domain, gfn, NULL, P2M_ALLOC);
rc = page ? new_guest_cr3(page_to_mfn(page)) : 0;
if ( page )
(unsigned long)tss,
offsetof(struct tss_struct, __cacheline_filler) - 1,
9);
-#ifdef CONFIG_COMPAT
_set_tssldt_desc(
this_cpu(compat_gdt_table) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
(unsigned long)tss,
offsetof(struct tss_struct, __cacheline_filler) - 1,
11);
-#endif
/* Switch to non-compat GDT (which has B bit clear) to execute LTR. */
asm volatile (
idt_tables[0] = idt_table;
this_cpu(gdt_table) = boot_cpu_gdt_table;
-#ifdef CONFIG_COMPAT
this_cpu(compat_gdt_table) = boot_cpu_compat_gdt_table;
-#endif
percpu_traps_init();
extern struct desc_struct boot_cpu_gdt_table[];
DECLARE_PER_CPU(struct desc_struct *, gdt_table);
-#ifdef CONFIG_COMPAT
extern struct desc_struct boot_cpu_compat_gdt_table[];
DECLARE_PER_CPU(struct desc_struct *, compat_gdt_table);
-#else
-# define boot_cpu_compat_gdt_table boot_cpu_gdt_table
-# define per_cpu__compat_gdt_table per_cpu__gdt_table
-#endif
extern void set_intr_gate(unsigned int irq, void * addr);
extern void load_TR(void);
/* A secondary copy of the vcpu time info. */
XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
-#ifdef CONFIG_COMPAT
void *compat_arg_xlat;
-#endif
} __cacheline_aligned;
#define SHARED_M2P_ENTRY (~0UL - 1UL)
#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY)
-#ifdef CONFIG_COMPAT
#define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
#define _set_gpfn_from_mfn(mfn, pfn) ({ \
struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
(compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(entry))), \
machine_to_phys_mapping[(mfn)] = (entry)); \
})
-#else
-#define _set_gpfn_from_mfn(mfn, pfn) ({ \
- struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
- if(d && (d == dom_cow)) \
- machine_to_phys_mapping[(mfn)] = SHARED_M2P_ENTRY; \
- else \
- machine_to_phys_mapping[(mfn)] = (pfn); \
- })
-#endif
/*
* Disable some users of set_gpfn_from_mfn() (e.g., free_heap_pages()) until
extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm);
-#ifdef CONFIG_COMPAT
void domain_set_alloc_bitsize(struct domain *d);
unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
-#else
-# define domain_set_alloc_bitsize(d) ((void)0)
-# define domain_clamp_alloc_bitsize(d, b) (b)
-#endif
unsigned long domain_get_maximum_gpfn(struct domain *d);
void show_page_walk(unsigned long addr);
void fatal_trap(int trapnr, struct cpu_user_regs *regs);
-#ifdef CONFIG_COMPAT
void compat_show_guest_stack(struct vcpu *, struct cpu_user_regs *, int lines);
-#else
-#define compat_show_guest_stack(vcpu, regs, lines) ((void)0)
-#endif
extern void mtrr_ap_init(void);
extern void mtrr_bp_init(void);
#ifndef __XEN_X86_SHARED_H__
#define __XEN_X86_SHARED_H__
-#ifdef CONFIG_COMPAT
-
#define nmi_reason(d) (!has_32bit_shinfo(d) ? \
(u32 *)&(d)->shared_info->native.arch.nmi_reason : \
(u32 *)&(d)->shared_info->compat.arch.nmi_reason)
v->vcpu_info->compat.arch.field = val; \
}
-#else
-
-#define nmi_reason(d) ((u32 *)&(d)->shared_info->arch.nmi_reason)
-
-#define GET_SET_SHARED(type, field) \
-static inline type arch_get_##field(const struct domain *d) \
-{ \
- return d->shared_info->arch.field; \
-} \
-static inline void arch_set_##field(struct domain *d, \
- type val) \
-{ \
- d->shared_info->arch.field = val; \
-}
-
-#define GET_SET_VCPU(type, field) \
-static inline type arch_get_##field(const struct vcpu *v) \
-{ \
- return v->vcpu_info->arch.field; \
-} \
-static inline void arch_set_##field(struct vcpu *v, \
- type val) \
-{ \
- v->vcpu_info->arch.field = val; \
-}
-#endif
-
GET_SET_SHARED(unsigned long, max_pfn)
GET_SET_SHARED(xen_pfn_t, pfn_to_mfn_frame_list_list)
GET_SET_SHARED(unsigned long, nmi_reason)