return 0;
}
+int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+ return default_initialise_vcpu(v, arg);
+}
+
int arch_vcpu_reset(struct vcpu *v)
{
vcpu_end_shutdown_deferral(v);
#include <xen/wait.h>
#include <xen/guest_access.h>
#include <public/sysctl.h>
+#include <public/hvm/hvm_vcpu.h>
#include <asm/regs.h>
#include <asm/mc146818rtc.h>
#include <asm/system.h>
#undef c
}
+static inline int check_segment(struct segment_register *reg,
+ enum x86_segment seg)
+{
+
+ if ( reg->attr.fields.pad != 0 )
+ {
+ gprintk(XENLOG_ERR, "Segment attribute bits 12-15 are not zero\n");
+ return -EINVAL;
+ }
+
+ if ( reg->attr.bytes == 0 )
+ {
+ if ( seg != x86_seg_ds && seg != x86_seg_es )
+ {
+ gprintk(XENLOG_ERR, "Null selector provided for CS, SS or TR\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if ( seg != x86_seg_tr && !reg->attr.fields.s )
+ {
+ gprintk(XENLOG_ERR,
+ "System segment provided for a code or data segment\n");
+ return -EINVAL;
+ }
+
+ if ( seg == x86_seg_tr && reg->attr.fields.s )
+ {
+ gprintk(XENLOG_ERR, "Code or data segment provided for TR\n");
+ return -EINVAL;
+ }
+
+ if ( !reg->attr.fields.p )
+ {
+ gprintk(XENLOG_ERR, "Non-present segment provided\n");
+ return -EINVAL;
+ }
+
+ if ( seg == x86_seg_cs && !(reg->attr.fields.type & 0x8) )
+ {
+ gprintk(XENLOG_ERR, "Non-code segment provided for CS\n");
+ return -EINVAL;
+ }
+
+ if ( seg == x86_seg_ss &&
+ ((reg->attr.fields.type & 0x8) || !(reg->attr.fields.type & 0x2)) )
+ {
+ gprintk(XENLOG_ERR, "Non-writeable segment provided for SS\n");
+ return -EINVAL;
+ }
+
+ if ( reg->attr.fields.s && seg != x86_seg_ss && seg != x86_seg_cs &&
+ (reg->attr.fields.type & 0x8) && !(reg->attr.fields.type & 0x2) )
+ {
+ gprintk(XENLOG_ERR, "Non-readable segment provided for DS or ES\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Called by VCPUOP_initialise for HVM guests. */
+int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
+{
+ struct cpu_user_regs *uregs = &v->arch.user_regs;
+ struct segment_register cs, ds, ss, es, tr;
+ const char *errstr;
+ int rc;
+
+ if ( ctx->pad != 0 )
+ return -EINVAL;
+
+ switch ( ctx->mode )
+ {
+ default:
+ return -EINVAL;
+
+ case VCPU_HVM_MODE_32B:
+ {
+ const struct vcpu_hvm_x86_32 *regs = &ctx->cpu_regs.x86_32;
+ uint32_t limit;
+
+ if ( ctx->cpu_regs.x86_32.pad1 != 0 ||
+ ctx->cpu_regs.x86_32.pad2[0] != 0 ||
+ ctx->cpu_regs.x86_32.pad2[1] != 0 ||
+ ctx->cpu_regs.x86_32.pad2[2] != 0 )
+ return -EINVAL;
+
+#define SEG(s, r) ({ \
+ s = (struct segment_register){ .base = (r)->s ## _base, \
+ .limit = (r)->s ## _limit, \
+ .attr.bytes = (r)->s ## _ar }; \
+ check_segment(&s, x86_seg_ ## s); })
+
+ rc = SEG(cs, regs);
+ rc |= SEG(ds, regs);
+ rc |= SEG(ss, regs);
+ rc |= SEG(es, regs);
+ rc |= SEG(tr, regs);
+#undef SEG
+
+ if ( rc != 0 )
+ return rc;
+
+ /* Basic sanity checks. */
+ limit = cs.limit;
+ if ( cs.attr.fields.g )
+ limit = (limit << 12) | 0xfff;
+ if ( regs->eip > limit )
+ {
+ gprintk(XENLOG_ERR, "EIP (%#08x) outside CS limit (%#08x)\n",
+ regs->eip, limit);
+ return -EINVAL;
+ }
+
+ if ( ss.attr.fields.dpl != cs.attr.fields.dpl )
+ {
+ gprintk(XENLOG_ERR, "SS.DPL (%u) is different than CS.DPL (%u)\n",
+ ss.attr.fields.dpl, cs.attr.fields.dpl);
+ return -EINVAL;
+ }
+
+ if ( ds.attr.fields.p && ds.attr.fields.dpl > cs.attr.fields.dpl )
+ {
+ gprintk(XENLOG_ERR, "DS.DPL (%u) is greater than CS.DPL (%u)\n",
+ ds.attr.fields.dpl, cs.attr.fields.dpl);
+ return -EINVAL;
+ }
+
+ if ( es.attr.fields.p && es.attr.fields.dpl > cs.attr.fields.dpl )
+ {
+ gprintk(XENLOG_ERR, "ES.DPL (%u) is greater than CS.DPL (%u)\n",
+ es.attr.fields.dpl, cs.attr.fields.dpl);
+ return -EINVAL;
+ }
+
+ if ( (regs->efer & EFER_LMA) && !(regs->efer & EFER_LME) )
+ {
+ gprintk(XENLOG_ERR, "EFER.LMA set without EFER.LME (%#016lx)\n",
+ regs->efer);
+ return -EINVAL;
+ }
+
+ uregs->rax = regs->eax;
+ uregs->rcx = regs->ecx;
+ uregs->rdx = regs->edx;
+ uregs->rbx = regs->ebx;
+ uregs->rsp = regs->esp;
+ uregs->rbp = regs->ebp;
+ uregs->rsi = regs->esi;
+ uregs->rdi = regs->edi;
+ uregs->rip = regs->eip;
+ uregs->rflags = regs->eflags;
+
+ v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
+ v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
+ v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
+ v->arch.hvm_vcpu.guest_efer = regs->efer;
+ }
+ break;
+
+ case VCPU_HVM_MODE_64B:
+ {
+ const struct vcpu_hvm_x86_64 *regs = &ctx->cpu_regs.x86_64;
+
+ /* Basic sanity checks. */
+ if ( !is_canonical_address(regs->rip) )
+ {
+ gprintk(XENLOG_ERR, "RIP contains a non-canonical address (%#lx)\n",
+ regs->rip);
+ return -EINVAL;
+ }
+
+ if ( !(regs->cr0 & X86_CR0_PG) )
+ {
+ gprintk(XENLOG_ERR, "CR0 doesn't have paging enabled (%#016lx)\n",
+ regs->cr0);
+ return -EINVAL;
+ }
+
+ if ( !(regs->cr4 & X86_CR4_PAE) )
+ {
+ gprintk(XENLOG_ERR, "CR4 doesn't have PAE enabled (%#016lx)\n",
+ regs->cr4);
+ return -EINVAL;
+ }
+
+ if ( !(regs->efer & EFER_LME) )
+ {
+ gprintk(XENLOG_ERR, "EFER doesn't have LME enabled (%#016lx)\n",
+ regs->efer);
+ return -EINVAL;
+ }
+
+ uregs->rax = regs->rax;
+ uregs->rcx = regs->rcx;
+ uregs->rdx = regs->rdx;
+ uregs->rbx = regs->rbx;
+ uregs->rsp = regs->rsp;
+ uregs->rbp = regs->rbp;
+ uregs->rsi = regs->rsi;
+ uregs->rdi = regs->rdi;
+ uregs->rip = regs->rip;
+ uregs->rflags = regs->rflags;
+
+ v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
+ v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
+ v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
+ v->arch.hvm_vcpu.guest_efer = regs->efer;
+
+#define SEG(l, a) (struct segment_register){ .limit = (l), .attr.bytes = (a) }
+ cs = SEG(~0u, 0xa9b); /* 64bit code segment. */
+ ds = ss = es = SEG(~0u, 0xc93);
+ tr = SEG(0x67, 0x8b); /* 64bit TSS (busy). */
+#undef SEG
+ }
+ break;
+
+ }
+
+ if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
+ v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
+
+ if ( v->arch.hvm_vcpu.guest_cr[4] & hvm_cr4_guest_reserved_bits(v, 0) )
+ {
+ gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n",
+ v->arch.hvm_vcpu.guest_cr[4]);
+ return -EINVAL;
+ }
+
+ errstr = hvm_efer_valid(v, v->arch.hvm_vcpu.guest_efer, -1);
+ if ( errstr )
+ {
+ gprintk(XENLOG_ERR, "Bad EFER value (%#016lx): %s\n",
+ v->arch.hvm_vcpu.guest_efer, errstr);
+ return -EINVAL;
+ }
+
+ hvm_update_guest_cr(v, 0);
+ hvm_update_guest_cr(v, 3);
+ hvm_update_guest_cr(v, 4);
+ hvm_update_guest_efer(v);
+
+ if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
+ {
+ /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
+ struct page_info *page = get_page_from_gfn(v->domain,
+ v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT,
+ NULL, P2M_ALLOC);
+ if ( !page )
+ {
+ gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n",
+ v->arch.hvm_vcpu.guest_cr[3]);
+ return -EINVAL;
+ }
+
+ v->arch.guest_table = pagetable_from_page(page);
+ }
+
+ hvm_set_segment_register(v, x86_seg_cs, &cs);
+ hvm_set_segment_register(v, x86_seg_ds, &ds);
+ hvm_set_segment_register(v, x86_seg_ss, &ss);
+ hvm_set_segment_register(v, x86_seg_es, &es);
+ hvm_set_segment_register(v, x86_seg_tr, &tr);
+
+ /* Sync AP's TSC with BSP's. */
+ v->arch.hvm_vcpu.cache_tsc_offset =
+ v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
+ v->domain->arch.hvm_domain.sync_tsc);
+
+ paging_update_paging_modes(v);
+
+ v->is_initialised = 1;
+ set_bit(_VPF_down, &v->pause_flags);
+
+ return 0;
+}
+
+int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+ int rc;
+
+ if ( is_hvm_vcpu(v) )
+ {
+ struct domain *d = v->domain;
+ struct vcpu_hvm_context ctxt;
+
+ if ( copy_from_guest(&ctxt, arg, 1) )
+ return -EFAULT;
+
+ domain_lock(d);
+ rc = v->is_initialised ? -EEXIST : arch_set_info_hvm_guest(v, &ctxt);
+ domain_unlock(d);
+ }
+ else
+ rc = default_initialise_vcpu(v, arg);
+
+ return rc;
+}
+
int arch_vcpu_reset(struct vcpu *v)
{
if ( is_pv_vcpu(v) )
}
/* Return a string indicating the error, or NULL for valid. */
-static const char * hvm_efer_valid(const struct vcpu *v, uint64_t value,
- signed int cr0_pg)
+const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
+ signed int cr0_pg)
{
unsigned int ext1_ecx = 0, ext1_edx = 0;
X86_CR0_CD | X86_CR0_PG)))
/* These bits in CR4 cannot be set by the guest. */
-static unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v,
- bool_t restore)
+unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v,bool_t restore)
{
unsigned int leaf1_ecx = 0, leaf1_edx = 0;
unsigned int leaf7_0_ebx = 0, leaf7_0_ecx = 0;
}
}
-static long hvm_vcpu_op(
- int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
-{
- long rc;
-
- switch ( cmd )
- {
- case VCPUOP_register_runstate_memory_area:
- case VCPUOP_get_runstate_info:
- case VCPUOP_set_periodic_timer:
- case VCPUOP_stop_periodic_timer:
- case VCPUOP_set_singleshot_timer:
- case VCPUOP_stop_singleshot_timer:
- case VCPUOP_register_vcpu_info:
- case VCPUOP_register_vcpu_time_memory_area:
- rc = do_vcpu_op(cmd, vcpuid, arg);
- break;
- default:
- rc = -ENOSYS;
- break;
- }
-
- return rc;
-}
-
typedef unsigned long hvm_hypercall_t(
unsigned long, unsigned long, unsigned long, unsigned long, unsigned long,
unsigned long);
return compat_memory_op(cmd, arg);
}
-static long hvm_vcpu_op_compat32(
- int cmd, unsigned vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
-{
- long rc;
-
- switch ( cmd )
- {
- case VCPUOP_register_runstate_memory_area:
- case VCPUOP_get_runstate_info:
- case VCPUOP_set_periodic_timer:
- case VCPUOP_stop_periodic_timer:
- case VCPUOP_set_singleshot_timer:
- case VCPUOP_stop_singleshot_timer:
- case VCPUOP_register_vcpu_info:
- case VCPUOP_register_vcpu_time_memory_area:
- rc = compat_vcpu_op(cmd, vcpuid, arg);
- break;
- default:
- rc = -ENOSYS;
- break;
- }
-
- return rc;
-}
-
static long hvm_physdev_op_compat32(
int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
static hvm_hypercall_t *const hvm_hypercall64_table[NR_hypercalls] = {
[ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
[ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
- [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
+ HYPERCALL(vcpu_op),
[ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op,
HYPERCALL(xen_version),
HYPERCALL(console_io),
static hvm_hypercall_t *const hvm_hypercall32_table[NR_hypercalls] = {
[ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op_compat32,
[ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op_compat32,
- [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op_compat32,
+ COMPAT_CALL(vcpu_op),
[ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op_compat32,
COMPAT_CALL(xen_version),
HYPERCALL(console_io),
#include <xen/guest_access.h>
#include <xen/hypercall.h>
#include <compat/vcpu.h>
+#include <compat/hvm/hvm_vcpu.h>
#define xen_vcpu_set_periodic_timer vcpu_set_periodic_timer
CHECK_vcpu_set_periodic_timer;
CHECK_vcpu_register_vcpu_info;
#undef xen_vcpu_register_vcpu_info
+#define xen_vcpu_hvm_context vcpu_hvm_context
+#define xen_vcpu_hvm_x86_32 vcpu_hvm_x86_32
+#define xen_vcpu_hvm_x86_64 vcpu_hvm_x86_64
+CHECK_vcpu_hvm_context;
+#undef xen_vcpu_hvm_x86_64
+#undef xen_vcpu_hvm_x86_32
+#undef xen_vcpu_hvm_context
+
int compat_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct domain *d = current->domain;
{
case VCPUOP_initialise:
{
- struct compat_vcpu_guest_context *cmp_ctxt;
-
if ( v->vcpu_info == &dummy_vcpu_info )
return -EINVAL;
- if ( (cmp_ctxt = xmalloc(struct compat_vcpu_guest_context)) == NULL )
+ if ( is_hvm_vcpu(v) )
{
- rc = -ENOMEM;
- break;
- }
+ struct vcpu_hvm_context ctxt;
- if ( copy_from_guest(cmp_ctxt, arg, 1) )
- {
- xfree(cmp_ctxt);
- rc = -EFAULT;
- break;
+ if ( copy_from_guest(&ctxt, arg, 1) )
+ return -EFAULT;
+
+ domain_lock(d);
+ rc = v->is_initialised ? -EEXIST : arch_set_info_hvm_guest(v, &ctxt);
+ domain_unlock(d);
}
+ else
+ {
+ struct compat_vcpu_guest_context *ctxt;
+
+ if ( (ctxt = xmalloc(struct compat_vcpu_guest_context)) == NULL )
+ return -ENOMEM;
+
+ if ( copy_from_guest(ctxt, arg, 1) )
+ {
+ xfree(ctxt);
+ return -EFAULT;
+ }
- domain_lock(d);
- rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, cmp_ctxt);
- domain_unlock(d);
+ domain_lock(d);
+ rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
+ domain_unlock(d);
+
+ xfree(ctxt);
+ }
if ( rc == -ERESTART )
rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iuh",
cmd, vcpuid, arg);
- xfree(cmp_ctxt);
break;
}
put_page_and_type(mfn_to_page(mfn));
}
+int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+ struct vcpu_guest_context *ctxt;
+ struct domain *d = v->domain;
+ int rc;
+
+ if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
+ return -ENOMEM;
+
+ if ( copy_from_guest(ctxt, arg, 1) )
+ {
+ free_vcpu_guest_context(ctxt);
+ return -EFAULT;
+ }
+
+ domain_lock(d);
+ rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
+ domain_unlock(d);
+
+ free_vcpu_guest_context(ctxt);
+
+ return rc;
+}
+
long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct domain *d = current->domain;
struct vcpu *v;
- struct vcpu_guest_context *ctxt;
long rc = 0;
if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
if ( v->vcpu_info == &dummy_vcpu_info )
return -EINVAL;
- if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
- return -ENOMEM;
-
- if ( copy_from_guest(ctxt, arg, 1) )
- {
- free_vcpu_guest_context(ctxt);
- return -EFAULT;
- }
-
- domain_lock(d);
- rc = v->is_initialised ? -EEXIST : arch_set_info_guest(v, ctxt);
- domain_unlock(d);
-
- free_vcpu_guest_context(ctxt);
-
+ rc = arch_initialise_vcpu(v, arg);
if ( rc == -ERESTART )
rc = hypercall_create_continuation(__HYPERVISOR_vcpu_op, "iuh",
cmd, vcpuid, arg);
headers-$(CONFIG_X86) += compat/arch-x86/xen-mca.h
headers-$(CONFIG_X86) += compat/arch-x86/xen.h
headers-$(CONFIG_X86) += compat/arch-x86/xen-$(compat-arch-y).h
+headers-$(CONFIG_X86) += compat/hvm/hvm_vcpu.h
headers-y += compat/arch-$(compat-arch-y).h compat/pmu.h compat/xlat.h
headers-$(FLASK_ENABLE) += compat/xsm/flask_op.h
vfree(vgc);
}
+struct vcpu_hvm_context;
+int arch_set_info_hvm_guest(struct vcpu *v, const struct vcpu_hvm_context *ctx);
+
#endif /* __ASM_DOMAIN_H__ */
/*
/* emulates #VE */
bool_t altp2m_vcpu_emulate_ve(struct vcpu *v);
+/* Check CR4/EFER values */
+const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
+ signed int cr0_pg);
+unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v, bool_t restore);
+
#endif /* __ASM_X86_HVM_HVM_H__ */
/*
--- /dev/null
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2015, Roger Pau Monne <roger.pau@citrix.com>
+ */
+
+#ifndef __XEN_PUBLIC_HVM_HVM_VCPU_H__
+#define __XEN_PUBLIC_HVM_HVM_VCPU_H__
+
+#include "../xen.h"
+
+struct vcpu_hvm_x86_32 {
+ uint32_t eax;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t ebx;
+ uint32_t esp;
+ uint32_t ebp;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t eip;
+ uint32_t eflags;
+
+ uint32_t cr0;
+ uint32_t cr3;
+ uint32_t cr4;
+
+ uint32_t pad1;
+
+ /*
+ * EFER should only be used to set the NXE bit (if required)
+ * when starting a vCPU in 32bit mode with paging enabled or
+ * to set the LME/LMA bits in order to start the vCPU in
+ * compatibility mode.
+ */
+ uint64_t efer;
+
+ uint32_t cs_base;
+ uint32_t ds_base;
+ uint32_t ss_base;
+ uint32_t es_base;
+ uint32_t tr_base;
+ uint32_t cs_limit;
+ uint32_t ds_limit;
+ uint32_t ss_limit;
+ uint32_t es_limit;
+ uint32_t tr_limit;
+ uint16_t cs_ar;
+ uint16_t ds_ar;
+ uint16_t ss_ar;
+ uint16_t es_ar;
+ uint16_t tr_ar;
+
+ uint16_t pad2[3];
+};
+
+/*
+ * The layout of the _ar fields of the segment registers is the
+ * following:
+ *
+ * Bits [0,3]: type (bits 40-43).
+ * Bit 4: s (descriptor type, bit 44).
+ * Bit [5,6]: dpl (descriptor privilege level, bits 45-46).
+ * Bit 7: p (segment-present, bit 47).
+ * Bit 8: avl (available for system software, bit 52).
+ * Bit 9: l (64-bit code segment, bit 53).
+ * Bit 10: db (meaning depends on the segment, bit 54).
+ * Bit 11: g (granularity, bit 55)
+ * Bits [12,15]: unused, must be blank.
+ *
+ * A more complete description of the meaning of this fields can be
+ * obtained from the Intel SDM, Volume 3, section 3.4.5.
+ */
+
+struct vcpu_hvm_x86_64 {
+ uint64_t rax;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rbx;
+ uint64_t rsp;
+ uint64_t rbp;
+ uint64_t rsi;
+ uint64_t rdi;
+ uint64_t rip;
+ uint64_t rflags;
+
+ uint64_t cr0;
+ uint64_t cr3;
+ uint64_t cr4;
+ uint64_t efer;
+
+ /*
+ * Using VCPU_HVM_MODE_64B implies that the vCPU is launched
+ * directly in long mode, so the cached parts of the segment
+ * registers get set to match that environment.
+ *
+ * If the user wants to launch the vCPU in compatibility mode
+ * the 32-bit structure should be used instead.
+ */
+};
+
+struct vcpu_hvm_context {
+#define VCPU_HVM_MODE_32B 0 /* 32bit fields of the structure will be used. */
+#define VCPU_HVM_MODE_64B 1 /* 64bit fields of the structure will be used. */
+ uint32_t mode;
+
+ uint32_t pad;
+
+ /* CPU registers. */
+ union {
+ struct vcpu_hvm_x86_32 x86_32;
+ struct vcpu_hvm_x86_64 x86_64;
+ } cpu_regs;
+};
+typedef struct vcpu_hvm_context vcpu_hvm_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_hvm_context_t);
+
+#endif /* __XEN_PUBLIC_HVM_HVM_VCPU_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
* Initialise a VCPU. Each VCPU can be initialised only once. A
* newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
*
- * @extra_arg == pointer to vcpu_guest_context structure containing initial
- * state for the VCPU.
+ * @extra_arg == For PV or ARM guests this is a pointer to a vcpu_guest_context
+ * structure containing the initial state for the VCPU. For x86
+ * HVM based guests this is a pointer to a vcpu_hvm_context
+ * structure.
*/
#define VCPUOP_initialise 0
int arch_set_info_guest(struct vcpu *, vcpu_guest_context_u);
void arch_get_info_guest(struct vcpu *, vcpu_guest_context_u);
+int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg);
+int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg);
+
int domain_relinquish_resources(struct domain *d);
void dump_pageframe_info(struct domain *d);
? grant_entry_header grant_table.h
? grant_entry_v2 grant_table.h
? gnttab_swap_grant_ref grant_table.h
+? vcpu_hvm_context hvm/hvm_vcpu.h
+? vcpu_hvm_x86_32 hvm/hvm_vcpu.h
+? vcpu_hvm_x86_64 hvm/hvm_vcpu.h
? kexec_exec kexec.h
! kexec_image kexec.h
! kexec_range kexec.h