This removal applies to both the hypervisor and the toolstack side of PVHv1.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
it is safe to allow this to be enabled but you may wish to disable it
anyway.
-=item B<pvh=BOOLEAN>
-
-Selects whether to run this PV guest in an HVM container. Default is 0.
-
=back
=head2 Fully-virtualised (HVM) Guest Specific Options
+++ /dev/null
-
-PVH : an x86 PV guest running in an HVM container.
-
-See: http://blog.xen.org/index.php/2012/10/23/the-paravirtualization-spectrum-part-1-the-ends-of-the-spectrum/
-
-At the moment HAP is required for PVH.
-
-At present the only PVH guest is an x86 64bit PV linux. Patches are at:
- git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen.git
-
-A PVH guest kernel must support following features, as defined for linux
-in arch/x86/xen/xen-head.S:
-
- #define FEATURES_PVH "|writable_descriptor_tables" \
- "|auto_translated_physmap" \
- "|supervisor_mode_kernel" \
- "|hvm_callback_vector"
-
-In a nutshell:
-* the guest uses auto translate:
- - p2m is managed by xen
- - pagetables are owned by the guest
- - mmu_update hypercall not available
-* it uses event callback and not vlapic emulation,
-* IDT is native, so set_trap_table hcall is also N/A for a PVH guest.
-
-For a full list of hcalls supported for PVH, see pvh_hypercall64_table
-in arch/x86/hvm/hvm.c in xen. From the ABI prespective, it's mostly a
-PV guest with auto translate, although it does use hvm_op for setting
-callback vector, and has a special version of arch_set_guest_info for bringing
-up secondary cpus.
-
-The initial phase targets the booting of a 64bit UP/SMP linux guest in PVH
-mode. This is done by adding: pvh=1 in the config file. xl, and not xm, is
-supported. Phase I patches are broken into three parts:
- - xen changes for booting of 64bit PVH guest
- - tools changes for creating a PVH guest
- - boot of 64bit dom0 in PVH mode.
-
-To boot 64bit dom0 in PVH mode, add dom0pvh to grub xen command line.
-
-Following fixme's exist in the code:
- - arch/x86/time.c: support more tsc modes.
-
-Following remain to be done for PVH:
- - Get rid of PVH mode, make it just HVM with some flags set
- - implement arch_get_info_guest() for pvh.
- - Investigate what else needs to be done for VMI support.
- - AMD port.
- - 32bit PVH guest support in both linux and xen. Xen changes are tagged
- "32bitfixme".
- - Add support for monitoring guest behavior. See hvm_memory_event* functions
- in hvm.c
- - vcpu hotplug support
- - Live migration of PVH guests.
- - Avail PVH dom0 of posted interrupts. (This will be a big win).
-
-
-Note, any emails to me must be cc'd to xen devel mailing list. OTOH, please
-cc me on PVH emails to the xen devel mailing list.
-
-Mukesh Rathor
-mukesh.rathor [at] oracle [dot] com
Flag that makes a dom0 use shadow paging.
-### dom0pvh
-> `= <boolean>`
-
-> Default: `false`
-
-Flag that makes a 64bit dom0 boot in PVH mode. No 32bit support at present.
-
### dtuart (ARM)
> `= path [:options]`
struct xen_domctl domctl; /* just use a global domctl */
static int _hvm_guest; /* hvm guest? 32bit HVMs have 64bit context */
-static int _pvh_guest; /* PV guest in HVM container */
static domid_t _dom_id; /* guest domid */
static int _max_vcpu_id; /* thus max_vcpu_id+1 VCPUs */
static int _dom0_fd; /* fd of /dev/privcmd */
_max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
_hvm_guest = (domctl.u.getdomaininfo.flags & XEN_DOMINF_hvm_guest);
- _pvh_guest = (domctl.u.getdomaininfo.flags & XEN_DOMINF_pvh_guest);
return _max_vcpu_id;
}
int sz = sizeof(anyc);
/* first try the MTF for hvm guest. otherwise do manually */
- if (_hvm_guest || _pvh_guest) {
+ if (_hvm_guest) {
domctl.u.debug_op.vcpu = which_vcpu;
domctl.u.debug_op.op = setit ? XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON :
XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF;
domid_t console_domid;
domid_t xenstore_domid;
xen_pfn_t shared_info_mfn;
- int pvh_enabled;
xc_interface *xch;
domid_t guest_domid;
uint32_t ssidref;
unsigned int dying:1, crashed:1, shutdown:1,
paused:1, blocked:1, running:1,
- hvm:1, debugged:1, pvh:1, xenstore:1, hap:1;
+ hvm:1, debugged:1, xenstore:1, hap:1;
unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
unsigned long nr_pages; /* current number, not maximum */
unsigned long nr_outstanding_pages;
} vendor;
bool hvm;
- bool pvh;
uint64_t xfeature_mask;
uint32_t *featureset;
return -ESRCH;
info->hvm = di.hvm;
- info->pvh = di.pvh;
info->featureset = calloc(host_nr_features, sizeof(*info->featureset));
if ( !info->featureset )
clear_bit(X86_FEATURE_SYSCALL, info->featureset);
}
- if ( !info->pvh )
- {
- clear_bit(X86_FEATURE_PSE, info->featureset);
- clear_bit(X86_FEATURE_PSE36, info->featureset);
- clear_bit(X86_FEATURE_PGE, info->featureset);
- clear_bit(X86_FEATURE_PAGE1GB, info->featureset);
- }
+ clear_bit(X86_FEATURE_PSE, info->featureset);
+ clear_bit(X86_FEATURE_PSE36, info->featureset);
+ clear_bit(X86_FEATURE_PGE, info->featureset);
+ clear_bit(X86_FEATURE_PAGE1GB, info->featureset);
}
if ( info->xfeature_mask == 0 )
goto err;
}
- if ( dom->pvh_enabled )
- {
- const char *pvh_features = "writable_descriptor_tables|"
- "auto_translated_physmap|"
- "supervisor_mode_kernel|"
- "hvm_callback_vector";
- elf_xen_parse_features(pvh_features, dom->f_requested, NULL);
- }
-
/* check features */
for ( i = 0; i < XENFEAT_NR_SUBMAPS; i++ )
{
unsigned m;
prot = domx86->params->lvl_prot[l];
- if ( l > 0 || dom->pvh_enabled )
+ if ( l > 0 )
return prot;
for ( m = 0; m < domx86->n_mappings; m++ )
DOMPRINTF("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "",
__FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
- if ( !dom->pvh_enabled )
- {
- ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_32;
- ctxt->user_regs.es = FLAT_KERNEL_DS_X86_32;
- ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_32;
- ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_32;
- ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_32;
- ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_32;
-
- ctxt->kernel_ss = ctxt->user_regs.ss;
- ctxt->kernel_sp = ctxt->user_regs.esp;
- }
+ ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_32;
+ ctxt->user_regs.es = FLAT_KERNEL_DS_X86_32;
+ ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_32;
+ ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_32;
+ ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_32;
+ ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_32;
+
+ ctxt->kernel_ss = ctxt->user_regs.ss;
+ ctxt->kernel_sp = ctxt->user_regs.esp;
rc = xc_vcpu_setcontext(dom->xch, dom->guest_domid, 0, &any_ctx);
if ( rc != 0 )
DOMPRINTF("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "",
__FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
- if ( !dom->pvh_enabled )
- {
- ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_64;
- ctxt->user_regs.es = FLAT_KERNEL_DS_X86_64;
- ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_64;
- ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_64;
- ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_64;
- ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_64;
-
- ctxt->kernel_ss = ctxt->user_regs.ss;
- ctxt->kernel_sp = ctxt->user_regs.esp;
- }
+ ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_64;
+ ctxt->user_regs.es = FLAT_KERNEL_DS_X86_64;
+ ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_64;
+ ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_64;
+ ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_64;
+ ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_64;
+
+ ctxt->kernel_ss = ctxt->user_regs.ss;
+ ctxt->kernel_sp = ctxt->user_regs.esp;
rc = xc_vcpu_setcontext(dom->xch, dom->guest_domid, 0, &any_ctx);
if ( rc != 0 )
rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type);
if ( rc )
return rc;
- if ( xc_dom_feature_translated(dom) && !dom->pvh_enabled )
+ if ( xc_dom_feature_translated(dom) )
{
dom->shadow_enabled = 1;
rc = x86_shadow(dom->xch, dom->guest_domid);
{
int i, rc;
- if ( dom->pvh_enabled )
- return 0;
-
for ( i = 0; ; i++ )
{
rc = xc_domain_add_to_physmap(dom->xch, dom->guest_domid,
info->running = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_running);
info->hvm = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hvm_guest);
info->debugged = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_debugged);
- info->pvh = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_pvh_guest);
info->xenstore = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_xs_domain);
info->hap = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hap);
*/
#define LIBXL_HAVE_DOMAIN_CREATE_RESTORE_SEND_BACK_FD 1
-/*
- * LIBXL_HAVE_CREATEINFO_PVH
- * If this is defined, then libxl supports creation of a PVH guest.
- */
-#define LIBXL_HAVE_CREATEINFO_PVH 1
-
/*
* LIBXL_HAVE_DRIVER_DOMAIN_CREATION 1
*
if (c_info->type == LIBXL_DOMAIN_TYPE_HVM) {
libxl_defbool_setdefault(&c_info->hap, true);
libxl_defbool_setdefault(&c_info->oos, true);
- } else {
- libxl_defbool_setdefault(&c_info->pvh, false);
- libxl_defbool_setdefault(&c_info->hap, libxl_defbool_val(c_info->pvh));
}
libxl_defbool_setdefault(&c_info->run_hotplug_scripts, true);
break;
case LIBXL_DOMAIN_TYPE_PV:
- state->pvh_enabled = libxl_defbool_val(d_config->c_info.pvh);
-
ret = libxl__build_pv(gc, domid, info, state);
if (ret)
goto out;
flags |= XEN_DOMCTL_CDF_hvm_guest;
flags |= libxl_defbool_val(info->hap) ? XEN_DOMCTL_CDF_hap : 0;
flags |= libxl_defbool_val(info->oos) ? 0 : XEN_DOMCTL_CDF_oos_off;
- } else if (libxl_defbool_val(info->pvh)) {
- flags |= XEN_DOMCTL_CDF_pvh_guest;
- if (!libxl_defbool_val(info->hap)) {
- LOGD(ERROR, *domid, "HAP must be on for PVH");
- rc = ERROR_INVAL;
- goto out;
- }
- flags |= XEN_DOMCTL_CDF_hap;
}
/* Ultimately, handle is an array of 16 uint8_t, same as uuid */
return ERROR_FAIL;
}
- dom->pvh_enabled = state->pvh_enabled;
dom->container_type = XC_DOM_PV_CONTAINER;
LOG(DEBUG, "pv kernel mapped %d path %s", state->pv_kernel.mapped, state->pv_kernel.path);
libxl__file_reference pv_kernel;
libxl__file_reference pv_ramdisk;
const char * pv_cmdline;
- bool pvh_enabled;
xen_vmemrange_t *vmemranges;
uint32_t num_vmemranges;
("poolid", uint32),
("pool_name", string),
("run_hotplug_scripts",libxl_defbool),
- ("pvh", libxl_defbool),
("driver_domain",libxl_defbool),
], dir=DIR_IN)
if (rtc_timeoffset)
xc_domain_set_time_offset(ctx->xch, domid, rtc_timeoffset);
- if (d_config->b_info.type == LIBXL_DOMAIN_TYPE_HVM ||
- libxl_defbool_val(d_config->c_info.pvh)) {
-
- unsigned long shadow;
- shadow = (d_config->b_info.shadow_memkb + 1023) / 1024;
- xc_shadow_control(ctx->xch, domid, XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION, NULL, 0, &shadow, 0, NULL);
+ if (d_config->b_info.type == LIBXL_DOMAIN_TYPE_HVM) {
+ unsigned long shadow = DIV_ROUNDUP(d_config->b_info.shadow_memkb,
+ 1024);
+ xc_shadow_control(ctx->xch, domid, XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION,
+ NULL, 0, &shadow, 0, NULL);
}
if (d_config->c_info.type == LIBXL_DOMAIN_TYPE_PV &&
!strncmp(buf, "hvm", strlen(buf)))
c_info->type = LIBXL_DOMAIN_TYPE_HVM;
- xlu_cfg_get_defbool(config, "pvh", &c_info->pvh, 0);
xlu_cfg_get_defbool(config, "hap", &c_info->hap, 0);
if (xlu_cfg_replace_string (config, "name", &c_info->name, 0)) {
if ( !vpmu->xenpmu_data )
return;
- if ( is_pvh_vcpu(sampling) &&
- !(vpmu_mode & XENPMU_MODE_ALL) &&
+ if ( !(vpmu_mode & XENPMU_MODE_ALL) &&
!vpmu->arch_vpmu_ops->do_interrupt(regs) )
return;
if ( is_hvm_domain(d) || d->tot_pages != 0 )
return -EACCES;
- if ( is_pv_32bit_domain(d) || is_pvh_32bit_domain(d) )
+ if ( is_pv_32bit_domain(d) )
return 0;
d->arch.has_32bit_shinfo = 1;
{
rc = setup_compat_arg_xlat(v);
if ( !rc )
- {
- if ( !is_pvh_domain(d) )
- rc = setup_compat_l4(v);
- else
- rc = hvm_set_mode(v, 4);
- }
+ rc = setup_compat_l4(v);
if ( rc )
goto undo_and_fail;
{
free_compat_arg_xlat(v);
- if ( !is_pvh_domain(d) && !pagetable_is_null(v->arch.guest_table) )
+ if ( !pagetable_is_null(v->arch.guest_table) )
release_compat_l4(v);
}
/* The context is a compat-mode one if the target domain is compat-mode;
* we expect the tools to DTRT even in compat-mode callers. */
- compat = is_pv_32bit_domain(d) || is_pvh_32bit_domain(d);
+ compat = is_pv_32bit_domain(d);
#define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
flags = c(flags);
(c(ldt_ents) > 8192) )
return -EINVAL;
}
- else if ( is_pvh_domain(d) )
- {
- if ( c(ctrlreg[0]) || c(ctrlreg[1]) || c(ctrlreg[2]) ||
- c(ctrlreg[4]) || c(ctrlreg[5]) || c(ctrlreg[6]) ||
- c(ctrlreg[7]) || c(ldt_base) || c(ldt_ents) ||
- c(user_regs.cs) || c(user_regs.ss) || c(user_regs.es) ||
- c(user_regs.ds) || c(user_regs.fs) || c(user_regs.gs) ||
- c(kernel_ss) || c(kernel_sp) || c(gdt_ents) ||
- (!compat && (c.nat->gs_base_kernel ||
- c.nat->fs_base || c.nat->gs_base_user)) )
- return -EINVAL;
- }
v->fpu_initialised = !!(flags & VGCF_I387_VALID);
v->arch.debugreg[i] = c(debugreg[i]);
hvm_set_info_guest(v);
-
- if ( is_hvm_domain(d) || v->is_initialised )
- goto out;
-
- /* NB: No need to use PV cr3 un-pickling macros */
- cr3_gfn = c(ctrlreg[3]) >> PAGE_SHIFT;
- cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
-
- v->arch.cr3 = page_to_maddr(cr3_page);
- v->arch.hvm_vcpu.guest_cr[3] = c(ctrlreg[3]);
- v->arch.guest_table = pagetable_from_page(cr3_page);
-
- ASSERT(paging_mode_enabled(d));
-
- goto pvh_skip_pv_stuff;
+ goto out;
}
init_int80_direct_trap(v);
clear_bit(_VPF_in_reset, &v->pause_flags);
- pvh_skip_pv_stuff:
if ( v->vcpu_id == 0 )
update_domain_wallclock_time(d);
}
}
-static __init void pvh_add_mem_mapping(struct domain *d, unsigned long gfn,
- unsigned long mfn, unsigned long nr_mfns)
-{
- unsigned long i;
- p2m_access_t a;
- mfn_t omfn;
- p2m_type_t t;
- int rc;
-
- for ( i = 0; i < nr_mfns; i++ )
- {
- if ( !iomem_access_permitted(d, mfn + i, mfn + i) )
- {
- omfn = get_gfn_query_unlocked(d, gfn + i, &t);
- guest_physmap_remove_page(d, _gfn(gfn + i), omfn, PAGE_ORDER_4K);
- continue;
- }
-
- if ( rangeset_contains_singleton(mmio_ro_ranges, mfn + i) )
- a = p2m_access_r;
- else
- a = p2m_access_rw;
-
- if ( (rc = set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i),
- PAGE_ORDER_4K, a)) )
- panic("pvh_add_mem_mapping: gfn:%lx mfn:%lx i:%ld rc:%d\n",
- gfn, mfn, i, rc);
- if ( !(i & 0xfffff) )
- process_pending_softirqs();
- }
-}
-
-/*
- * Set the 1:1 map for all non-RAM regions for dom 0. Thus, dom0 will have
- * the entire io region mapped in the EPT/NPT.
- *
- * pvh fixme: The following doesn't map MMIO ranges when they sit above the
- * highest E820 covered address.
- */
-static __init void pvh_map_all_iomem(struct domain *d, unsigned long nr_pages)
-{
- unsigned long start_pfn, end_pfn, end = 0, start = 0;
- const struct e820entry *entry;
- unsigned long nump, nmap, navail, mfn, nr_holes = 0;
- unsigned int i;
- struct page_info *page;
- int rc;
-
- for ( i = 0, entry = e820.map; i < e820.nr_map; i++, entry++ )
- {
- end = entry->addr + entry->size;
-
- if ( entry->type == E820_RAM || entry->type == E820_UNUSABLE ||
- i == e820.nr_map - 1 )
- {
- start_pfn = PFN_DOWN(start);
-
- /* Unused RAM areas are marked UNUSABLE, so skip them too */
- if ( entry->type == E820_RAM || entry->type == E820_UNUSABLE )
- end_pfn = PFN_UP(entry->addr);
- else
- end_pfn = PFN_UP(end);
-
- if ( start_pfn < end_pfn )
- {
- nump = end_pfn - start_pfn;
- /* Add pages to the mapping */
- pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
- if ( start_pfn < nr_pages )
- nr_holes += (end_pfn < nr_pages) ?
- nump : (nr_pages - start_pfn);
- }
- start = end;
- }
- }
-
- /*
- * Some BIOSes may not report io space above ram that is less than 4GB. So
- * we map any non-ram upto 4GB.
- */
- if ( end < GB(4) )
- {
- start_pfn = PFN_UP(end);
- end_pfn = (GB(4)) >> PAGE_SHIFT;
- nump = end_pfn - start_pfn;
- pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
- }
-
- /*
- * Add the memory removed by the holes at the end of the
- * memory map.
- */
- page = page_list_first(&d->page_list);
- for ( i = 0, entry = e820.map; i < e820.nr_map && nr_holes > 0;
- i++, entry++ )
- {
- if ( entry->type != E820_RAM )
- continue;
-
- end_pfn = PFN_UP(entry->addr + entry->size);
- if ( end_pfn <= nr_pages )
- continue;
-
- navail = end_pfn - nr_pages;
- nmap = min(navail, nr_holes);
- nr_holes -= nmap;
- start_pfn = max_t(unsigned long, nr_pages, PFN_DOWN(entry->addr));
- /*
- * Populate this memory region using the pages
- * previously removed by the MMIO holes.
- */
- do
- {
- mfn = page_to_mfn(page);
- if ( get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY )
- continue;
-
- rc = guest_physmap_add_page(d, _gfn(start_pfn), _mfn(mfn), 0);
- if ( rc != 0 )
- panic("Unable to add gpfn %#lx mfn %#lx to Dom0 physmap: %d",
- start_pfn, mfn, rc);
- start_pfn++;
- nmap--;
- if ( !(nmap & 0xfffff) )
- process_pending_softirqs();
- } while ( ((page = page_list_next(page, &d->page_list)) != NULL)
- && nmap );
- ASSERT(nmap == 0);
- if ( page == NULL )
- break;
- }
-
- ASSERT(nr_holes == 0);
-}
-
static __init void pvh_setup_e820(struct domain *d, unsigned long nr_pages)
{
struct e820entry *entry, *entry_guest;
static __init void dom0_update_physmap(struct domain *d, unsigned long pfn,
unsigned long mfn, unsigned long vphysmap_s)
{
- if ( is_pvh_domain(d) )
- {
- int rc = guest_physmap_add_page(d, _gfn(pfn), _mfn(mfn), 0);
- BUG_ON(rc);
- return;
- }
if ( !is_pv_32bit_domain(d) )
((unsigned long *)vphysmap_s)[pfn] = mfn;
else
set_gpfn_from_mfn(mfn, pfn);
}
-/* Replace mfns with pfns in dom0 page tables */
-static __init void pvh_fixup_page_tables_for_hap(struct vcpu *v,
- unsigned long v_start,
- unsigned long v_end)
-{
- int i, j, k;
- l4_pgentry_t *pl4e, *l4start;
- l3_pgentry_t *pl3e;
- l2_pgentry_t *pl2e;
- l1_pgentry_t *pl1e;
- unsigned long cr3_pfn;
-
- ASSERT(paging_mode_enabled(v->domain));
-
- l4start = map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
-
- /* Clear entries prior to guest L4 start */
- pl4e = l4start + l4_table_offset(v_start);
- memset(l4start, 0, (unsigned long)pl4e - (unsigned long)l4start);
-
- for ( ; pl4e <= l4start + l4_table_offset(v_end - 1); pl4e++ )
- {
- pl3e = map_l3t_from_l4e(*pl4e);
- for ( i = 0; i < PAGE_SIZE / sizeof(*pl3e); i++, pl3e++ )
- {
- if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
- continue;
-
- pl2e = map_l2t_from_l3e(*pl3e);
- for ( j = 0; j < PAGE_SIZE / sizeof(*pl2e); j++, pl2e++ )
- {
- if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
- continue;
-
- pl1e = map_l1t_from_l2e(*pl2e);
- for ( k = 0; k < PAGE_SIZE / sizeof(*pl1e); k++, pl1e++ )
- {
- if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
- continue;
-
- *pl1e = l1e_from_pfn(get_gpfn_from_mfn(l1e_get_pfn(*pl1e)),
- l1e_get_flags(*pl1e));
- }
- unmap_domain_page(pl1e);
- *pl2e = l2e_from_pfn(get_gpfn_from_mfn(l2e_get_pfn(*pl2e)),
- l2e_get_flags(*pl2e));
- }
- unmap_domain_page(pl2e);
- *pl3e = l3e_from_pfn(get_gpfn_from_mfn(l3e_get_pfn(*pl3e)),
- l3e_get_flags(*pl3e));
- }
- unmap_domain_page(pl3e);
- *pl4e = l4e_from_pfn(get_gpfn_from_mfn(l4e_get_pfn(*pl4e)),
- l4e_get_flags(*pl4e));
- }
-
- /* Clear entries post guest L4. */
- if ( (unsigned long)pl4e & (PAGE_SIZE - 1) )
- memset(pl4e, 0, PAGE_SIZE - ((unsigned long)pl4e & (PAGE_SIZE - 1)));
-
- unmap_domain_page(l4start);
-
- cr3_pfn = get_gpfn_from_mfn(paddr_to_pfn(v->arch.cr3));
- v->arch.hvm_vcpu.guest_cr[3] = pfn_to_paddr(cr3_pfn);
-
- /*
- * Finally, we update the paging modes (hap_update_paging_modes). This will
- * create monitor_table for us, update v->arch.cr3, and update vmcs.cr3.
- */
- paging_update_paging_modes(v);
-}
-
static __init void mark_pv_pt_pages_rdonly(struct domain *d,
l4_pgentry_t *l4start,
unsigned long vpt_start,
l3_pgentry_t *l3tab = NULL, *l3start = NULL;
l2_pgentry_t *l2tab = NULL, *l2start = NULL;
l1_pgentry_t *l1tab = NULL, *l1start = NULL;
- paddr_t shared_info_paddr = 0;
- u32 save_pvh_pg_mode = 0;
/*
* This fully describes the memory layout of the initial domain. All
rc = -EINVAL;
goto out;
}
- if ( is_pvh_domain(d) &&
- !test_bit(XENFEAT_hvm_callback_vector, parms.f_supported) )
- {
- printk("Kernel does not support PVH mode\n");
- rc = -EINVAL;
- goto out;
- }
}
if ( compat32 )
sizeof(struct start_info) +
sizeof(struct dom0_vga_console_info));
- if ( is_pvh_domain(d) )
- {
- shared_info_paddr = round_pgup(vstartinfo_end) - v_start;
- vstartinfo_end += PAGE_SIZE;
- }
-
vpt_start = round_pgup(vstartinfo_end);
for ( nr_pt_pages = 2; ; nr_pt_pages++ )
{
setup_dom0_vcpu(d, i, cpu);
}
- /*
- * pvh: we temporarily disable d->arch.paging.mode so that we can build cr3
- * needed to run on dom0's page tables.
- */
- save_pvh_pg_mode = d->arch.paging.mode;
d->arch.paging.mode = 0;
/* Set up CR3 value for write_ptbase */
nr_pages);
}
- /*
- * We enable paging mode again so guest_physmap_add_page and
- * paging_set_allocation will do the right thing for us.
- */
- d->arch.paging.mode = save_pvh_pg_mode;
-
- if ( is_pvh_domain(d) )
- {
- bool preempted;
-
- do {
- preempted = false;
- paging_set_allocation(d, dom0_paging_pages(d, nr_pages),
- &preempted);
- process_pending_softirqs();
- } while ( preempted );
- }
-
-
/* Write the phys->machine and machine->phys table entries. */
for ( pfn = 0; pfn < count; pfn++ )
{
si->console.dom0.info_size = sizeof(struct dom0_vga_console_info);
}
- /*
- * PVH: We need to update si->shared_info while we are on dom0 page tables,
- * but need to defer the p2m update until after we have fixed up the
- * page tables for PVH so that the m2p for the si pte entry returns
- * correct pfn.
- */
- if ( is_pvh_domain(d) )
- si->shared_info = shared_info_paddr;
-
if ( is_pv_32bit_domain(d) )
xlat_start_info(si, XLAT_start_info_console_dom0);
regs->eflags = X86_EFLAGS_IF;
#ifdef CONFIG_SHADOW_PAGING
- if ( opt_dom0_shadow )
- {
- if ( is_pvh_domain(d) )
- {
- printk("Unsupported option dom0_shadow for PVH\n");
- return -EINVAL;
- }
- if ( paging_enable(d, PG_SH_enable) == 0 )
- paging_update_paging_modes(v);
- }
+ if ( opt_dom0_shadow && paging_enable(d, PG_SH_enable) == 0 )
+ paging_update_paging_modes(v);
#endif
/*
printk(" Xen warning: dom0 kernel broken ELF: %s\n",
elf_check_broken(&elf));
- if ( is_pvh_domain(d) )
- {
- /* finally, fixup the page table, replacing mfns with pfns */
- pvh_fixup_page_tables_for_hap(v, v_start, v_end);
-
- /* the pt has correct pfn for si, now update the mfn in the p2m */
- mfn = virt_to_mfn(d->shared_info);
- pfn = shared_info_paddr >> PAGE_SHIFT;
- dom0_update_physmap(d, pfn, mfn, 0);
-
- pvh_map_all_iomem(d, nr_pages);
- pvh_setup_e820(d, nr_pages);
- }
-
if ( d->domain_id == hardware_domid )
iommu_hwdom_init(d);
break;
case XEN_DOMCTL_get_address_size:
- domctl->u.address_size.size =
- (is_pv_32bit_domain(d) || is_pvh_32bit_domain(d)) ?
- 32 : BITS_PER_LONG;
+ domctl->u.address_size.size = is_pv_32bit_domain(d) ? 32 :
+ BITS_PER_LONG;
copyback = 1;
break;
{
unsigned int i;
const struct domain *d = v->domain;
- bool_t compat = is_pv_32bit_domain(d) || is_pvh_32bit_domain(d);
+ bool_t compat = is_pv_32bit_domain(d);
#define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
if ( !is_pv_domain(d) )
printk("\n");
}
- if ( !fns->pvh_supported )
- printk(XENLOG_INFO "HVM: PVH mode not supported on this platform\n");
-
if ( !opt_altp2m_enabled )
hvm_funcs.altp2m_supported = 0;
void hvm_migrate_timers(struct vcpu *v)
{
- /* PVH doesn't use rtc and emulated timers, it uses pvclock mechanism. */
- if ( is_pvh_vcpu(v) )
- return;
-
rtc_migrate_timers(v);
pt_migrate(v);
}
return X86EMUL_OKAY;
}
-static int handle_pvh_io(
- int dir, unsigned int port, unsigned int bytes, uint32_t *val)
-{
- struct domain *currd = current->domain;
-
- if ( dir == IOREQ_WRITE )
- guest_io_write(port, bytes, *val, currd);
- else
- *val = guest_io_read(port, bytes, currd);
-
- return X86EMUL_OKAY;
-}
-
int hvm_domain_initialise(struct domain *d)
{
int rc;
return -EINVAL;
}
- if ( is_pvh_domain(d) )
- {
- if ( !hvm_funcs.pvh_supported )
- {
- printk(XENLOG_G_WARNING "Attempt to create a PVH guest "
- "on a system without necessary hardware support\n");
- return -EINVAL;
- }
- if ( !hap_enabled(d) )
- {
- printk(XENLOG_G_INFO "PVH guest must have HAP on\n");
- return -EINVAL;
- }
-
- }
-
spin_lock_init(&d->arch.hvm_domain.irq_lock);
spin_lock_init(&d->arch.hvm_domain.uc_lock);
spin_lock_init(&d->arch.hvm_domain.write_map.lock);
hvm_ioreq_init(d);
- if ( is_pvh_domain(d) )
- {
- register_portio_handler(d, 0, 0x10003, handle_pvh_io);
- return 0;
- }
-
hvm_init_guest_time(d);
d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON] = SHUTDOWN_reboot;
void hvm_domain_relinquish_resources(struct domain *d)
{
- if ( is_pvh_domain(d) )
- return;
-
if ( hvm_funcs.nhvm_domain_relinquish_resources )
hvm_funcs.nhvm_domain_relinquish_resources(d);
hvm_destroy_cacheattr_region_list(d);
- if ( is_pvh_domain(d) )
- return;
-
hvm_funcs.domain_destroy(d);
rtc_deinit(d);
stdvga_deinit(d);
v->arch.hvm_vcpu.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
- if ( is_pvh_domain(d) )
- {
- /* This is for hvm_long_mode_enabled(v). */
- v->arch.hvm_vcpu.guest_efer = EFER_LMA | EFER_LME;
- return 0;
- }
-
rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */
if ( rc != 0 )
goto fail4;
__put_gfn(hostp2m, gfn);
rc = 0;
- if ( unlikely(is_pvh_domain(currd)) )
- goto out;
-
if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
rc = 1;
(value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PG )
return X86EMUL_EXCEPTION;
- /* A pvh is not expected to change to real mode. */
- if ( is_pvh_domain(d) &&
- (value & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PG | X86_CR0_PE) )
- {
- printk(XENLOG_G_WARNING
- "PVH attempting to turn off PE/PG. CR0:%lx\n", value);
- return X86EMUL_EXCEPTION;
- }
-
if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) )
{
"EFER.LMA is set");
return X86EMUL_EXCEPTION;
}
- if ( is_pvh_vcpu(v) )
- {
- HVM_DBG_LOG(DBG_LEVEL_1, "32-bit PVH guest cleared CR4.PAE");
- return X86EMUL_EXCEPTION;
- }
}
old_cr = v->arch.hvm_vcpu.guest_cr[4];
break;
case MSR_IA32_APICBASE:
- if ( unlikely(is_pvh_vcpu(v)) ||
- !vlapic_msr_set(vcpu_vlapic(v), msr_content) )
+ if ( !vlapic_msr_set(vcpu_vlapic(v), msr_content) )
goto gp_fault;
break;
return -ESRCH;
rc = -EINVAL;
- if ( !has_hvm_container_domain(d) ||
- (is_pvh_domain(d) && (a.index != HVM_PARAM_CALLBACK_IRQ)) )
+ if ( !has_hvm_container_domain(d) )
goto out;
rc = hvm_allow_set_param(d, &a);
return -ESRCH;
rc = -EINVAL;
- if ( !has_hvm_container_domain(d) ||
- (is_pvh_domain(d) && (a.index != HVM_PARAM_CALLBACK_IRQ)) )
+ if ( !has_hvm_container_domain(d) )
goto out;
rc = hvm_allow_get_param(d, &a);
switch ( cmd )
{
default:
- if ( !is_pvh_vcpu(curr) || !is_hardware_domain(curr->domain) )
+ if ( !is_hardware_domain(curr->domain) )
return -ENOSYS;
/* fall through */
case PHYSDEVOP_map_pirq:
case PHYSDEVOP_eoi:
case PHYSDEVOP_irq_status_query:
case PHYSDEVOP_get_free_pirq:
- if ( !has_pirq(curr->domain) && !is_pvh_vcpu(curr) )
+ if ( !has_pirq(curr->domain) )
return -ENOSYS;
break;
}
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
int rc;
- ASSERT(!is_pvh_vcpu(curr));
-
hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs());
rc = hvm_emulate_one(&ctxt);
spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock);
INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
- if ( !is_pvh_domain(d) )
- register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
+ register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
}
/*
&& vcpu_info(v, evtchn_upcall_pending) )
return hvm_intack_vector(plat->irq.callback_via.vector);
- if ( is_pvh_vcpu(v) )
- return hvm_intack_none;
-
if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
return hvm_intack_pic(0);
vmx_pin_based_exec_control & ~PIN_BASED_POSTED_INTERRUPT);
}
- if ( is_pvh_domain(d) )
- {
- /* Unrestricted guest (real mode for EPT) */
- v->arch.hvm_vmx.secondary_exec_control &=
- ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
-
- /* Start in 64-bit mode. PVH 32bitfixme. */
- vmentry_ctl |= VM_ENTRY_IA32E_MODE; /* GUEST_EFER.LME/LMA ignored */
-
- ASSERT(v->arch.hvm_vmx.exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
- ASSERT(v->arch.hvm_vmx.exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP);
- ASSERT(!(v->arch.hvm_vmx.exec_control & CPU_BASED_RDTSC_EXITING));
- }
-
vmx_update_cpu_exec_control(v);
__vmwrite(VM_EXIT_CONTROLS, vmexit_ctl);
__vmwrite(GUEST_DS_AR_BYTES, 0xc093);
__vmwrite(GUEST_FS_AR_BYTES, 0xc093);
__vmwrite(GUEST_GS_AR_BYTES, 0xc093);
- if ( is_pvh_domain(d) )
- /* CS.L == 1, exec, read/write, accessed. */
- __vmwrite(GUEST_CS_AR_BYTES, 0xa09b);
- else
- __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */
+ __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */
/* Guest IDT. */
__vmwrite(GUEST_IDTR_BASE, 0);
| (1U << TRAP_no_device);
vmx_update_exception_bitmap(v);
- /*
- * In HVM domains, this happens on the realmode->paging
- * transition. Since PVH never goes through this transition, we
- * need to do it at start-of-day.
- */
- if ( is_pvh_domain(d) )
- vmx_update_debug_state(v);
-
v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
-
- /* PVH domains always start in paging mode */
- if ( is_pvh_domain(d) )
- v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_PG;
-
hvm_update_guest_cr(v, 0);
- v->arch.hvm_vcpu.guest_cr[4] = is_pvh_domain(d) ? X86_CR4_PAE : 0;
+ v->arch.hvm_vcpu.guest_cr[4] = 0;
hvm_update_guest_cr(v, 4);
if ( cpu_has_vmx_tpr_shadow )
{
unsigned long attr;
- if ( !is_pvh_vcpu(v) )
- return 0;
-
ASSERT((mode == 4) || (mode == 8));
attr = (mode == 4) ? 0xc09b : 0xa09b;
vmx_function_table.sync_pir_to_irr = NULL;
}
- if ( cpu_has_vmx_ept
- && cpu_has_vmx_pat
- && cpu_has_vmx_msr_bitmap
- && cpu_has_vmx_secondary_exec_control )
- vmx_function_table.pvh_supported = 1;
-
if ( cpu_has_vmx_tsc_scaling )
vmx_function_table.tsc_scaling.ratio_frac_bits = 48;
if ( exit_qualification & 0x10 )
{
/* INS, OUTS */
- if ( unlikely(is_pvh_vcpu(v)) /* PVH fixme */ ||
- !hvm_emulate_one_insn(x86_insn_is_portio) )
+ if ( !hvm_emulate_one_insn(x86_insn_is_portio) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
else
goto out;
}
- if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
+ if ( unlikely(paging_mode_translate(curr)) )
{
MEM_LOG("Cannot mix foreign mappings with translated domains");
goto out;
if ( unlikely(p2m_is_foreign(p2mt)) )
{
- /* pvh fixme: foreign types are only supported on ept at present */
+ /* hvm fixme: foreign types are only supported on ept at present */
gdprintk(XENLOG_WARNING, "Unimplemented foreign p2m type.\n");
return -EINVAL;
}
}
/*
- * pvh fixme: when adding support for pvh non-hardware domains, this path must
+ * hvm fixme: when adding support for pvh non-hardware domains, this path must
* cleanup any foreign p2m types (release refcnts on them).
*/
void p2m_teardown(struct p2m_domain *p2m)
struct domain *fdom;
ASSERT(tdom);
- if ( foreigndom == DOMID_SELF || !is_pvh_domain(tdom) )
+ if ( foreigndom == DOMID_SELF )
return -EINVAL;
/*
- * pvh fixme: until support is added to p2m teardown code to cleanup any
+ * hvm fixme: until support is added to p2m teardown code to cleanup any
* foreign entries, limit this to hardware domain only.
*/
if ( !is_hardware_domain(tdom) )
struct vcpu *curr = current;
struct physdev_set_iopl set_iopl;
- ret = -ENOSYS;
- if ( is_pvh_vcpu(curr) )
- break;
-
ret = -EFAULT;
if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
break;
struct vcpu *curr = current;
struct physdev_set_iobitmap set_iobitmap;
- ret = -ENOSYS;
- if ( is_pvh_vcpu(curr) )
- break;
-
ret = -EFAULT;
if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
break;
unsigned long __read_mostly cr4_pv32_mask;
-/* Boot dom0 in pvh mode */
-static bool_t __initdata opt_dom0pvh;
-boolean_param("dom0pvh", opt_dom0pvh);
-
/* **** Linux config option: propagated to domain0. */
/* "acpi=off": Sisables both ACPI table parsing and interpreter. */
/* "acpi=force": Override the disable blacklist. */
init_guest_cpuid();
- if ( opt_dom0pvh )
- domcr_flags |= DOMCRF_pvh | DOMCRF_hap;
-
if ( dom0_pvh )
{
domcr_flags |= DOMCRF_hvm |
d->arch.vtsc = 0;
return;
}
- if ( is_pvh_domain(d) )
- {
- /*
- * PVH fixme: support more tsc modes.
- *
- * NB: The reason this is disabled here appears to be with
- * additional support required to do the PV RDTSC emulation.
- * Since we're no longer taking the PV emulation path for
- * anything, we may be able to remove this restriction.
- *
- * pvhfixme: Experiments show that "default" works for PVH,
- * but "always_emulate" does not for some reason. Figure out
- * why.
- */
- switch ( tsc_mode )
- {
- case TSC_MODE_NEVER_EMULATE:
- break;
- default:
- printk(XENLOG_WARNING
- "PVH currently does not support tsc emulation. Setting timer_mode = never_emulate\n");
- /* FALLTHRU */
- case TSC_MODE_DEFAULT:
- tsc_mode = TSC_MODE_NEVER_EMULATE;
- break;
- }
- }
switch ( d->arch.tsc_mode = tsc_mode )
{
if ( domcr_flags & DOMCRF_hvm )
d->guest_type = guest_type_hvm;
- else if ( domcr_flags & DOMCRF_pvh )
- d->guest_type = guest_type_pvh;
else
d->guest_type = guest_type_pv;
case guest_type_hvm:
info->flags |= XEN_DOMINF_hvm_guest;
break;
- case guest_type_pvh:
- info->flags |= XEN_DOMINF_pvh_guest;
- break;
default:
break;
}
ret = -EINVAL;
if ( (op->u.createdomain.flags &
~(XEN_DOMCTL_CDF_hvm_guest
- | XEN_DOMCTL_CDF_pvh_guest
| XEN_DOMCTL_CDF_hap
| XEN_DOMCTL_CDF_s3_integrity
| XEN_DOMCTL_CDF_oos_off
rover = dom;
}
- if ( (op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest)
- && (op->u.createdomain.flags & XEN_DOMCTL_CDF_pvh_guest) )
- return -EINVAL;
-
domcr_flags = 0;
if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
domcr_flags |= DOMCRF_hvm;
- if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_pvh_guest )
- domcr_flags |= DOMCRF_pvh;
if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
domcr_flags |= DOMCRF_hap;
if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_s3_integrity )
(1U << XENFEAT_highmem_assist) |
(1U << XENFEAT_gnttab_map_avail_bits);
break;
- case guest_type_pvh:
- fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
- (1U << XENFEAT_supervisor_mode_kernel) |
- (1U << XENFEAT_hvm_callback_vector);
- break;
case guest_type_hvm:
fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
(1U << XENFEAT_hvm_callback_vector) |
struct p2m_domain *p2m = p2m_get_hostp2m(d);
rc = -EOPNOTSUPP;
- /* pvh fixme: p2m_is_foreign types need addressing */
- if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
+ /* hvm fixme: p2m_is_foreign types need addressing */
+ if ( is_hvm_domain(hardware_domain) )
break;
rc = -ENODEV;
{
case XEN_VM_EVENT_ENABLE:
rc = -EOPNOTSUPP;
- /* pvh fixme: p2m_is_foreign types need addressing */
- if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
+ /* hvm fixme: p2m_is_foreign types need addressing */
+ if ( is_hvm_domain(hardware_domain) )
break;
rc = -ENODEV;
#define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo)
#define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv)
#define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain))
-#define is_pvh_32bit_domain(d) (is_pvh_domain(d) && has_32bit_shinfo(d))
#define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
/* Support Hardware-Assisted Paging? */
bool_t hap_supported;
- /* Necessary hardware support for PVH mode? */
- bool_t pvh_supported;
-
/* Necessary hardware support for alternate p2m's? */
bool altp2m_supported;
#include "hvm/save.h"
#include "memory.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x0000000c
+#define XEN_DOMCTL_INTERFACE_VERSION 0x0000000d
/*
* NB. xen_domctl.domain is an IN/OUT parameter for this operation.
/* Disable out-of-sync shadow page tables? */
#define _XEN_DOMCTL_CDF_oos_off 3
#define XEN_DOMCTL_CDF_oos_off (1U<<_XEN_DOMCTL_CDF_oos_off)
- /* Is this a PVH guest (as opposed to an HVM or PV guest)? */
-#define _XEN_DOMCTL_CDF_pvh_guest 4
-#define XEN_DOMCTL_CDF_pvh_guest (1U<<_XEN_DOMCTL_CDF_pvh_guest)
/* Is this a xenstore domain? */
-#define _XEN_DOMCTL_CDF_xs_domain 5
+#define _XEN_DOMCTL_CDF_xs_domain 4
#define XEN_DOMCTL_CDF_xs_domain (1U<<_XEN_DOMCTL_CDF_xs_domain)
uint32_t flags;
struct xen_arch_domainconfig config;
/* Being debugged. */
#define _XEN_DOMINF_debugged 6
#define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged)
-/* domain is PVH */
-#define _XEN_DOMINF_pvh_guest 7
-#define XEN_DOMINF_pvh_guest (1U<<_XEN_DOMINF_pvh_guest)
/* domain is a xenstore domain */
-#define _XEN_DOMINF_xs_domain 8
+#define _XEN_DOMINF_xs_domain 7
#define XEN_DOMINF_xs_domain (1U<<_XEN_DOMINF_xs_domain)
/* domain has hardware assisted paging */
-#define _XEN_DOMINF_hap 9
+#define _XEN_DOMINF_hap 8
#define XEN_DOMINF_hap (1U<<_XEN_DOMINF_hap)
/* XEN_DOMINF_shutdown guest-supplied code. */
#define XEN_DOMINF_shutdownmask 255
* will be false, but has_hvm_container_* checks will be true.
*/
enum guest_type {
- guest_type_pv, guest_type_pvh, guest_type_hvm
+ guest_type_pv, guest_type_hvm
};
struct domain
/* DOMCRF_oos_off: dont use out-of-sync optimization for shadow page tables */
#define _DOMCRF_oos_off 4
#define DOMCRF_oos_off (1U<<_DOMCRF_oos_off)
- /* DOMCRF_pvh: Create PV domain in HVM container. */
-#define _DOMCRF_pvh 5
-#define DOMCRF_pvh (1U<<_DOMCRF_pvh)
/* DOMCRF_xs_domain: xenstore domain */
-#define _DOMCRF_xs_domain 6
+#define _DOMCRF_xs_domain 5
#define DOMCRF_xs_domain (1U<<_DOMCRF_xs_domain)
/*
#define is_pv_domain(d) ((d)->guest_type == guest_type_pv)
#define is_pv_vcpu(v) (is_pv_domain((v)->domain))
-#define is_pvh_domain(d) ((d)->guest_type == guest_type_pvh)
-#define is_pvh_vcpu(v) (is_pvh_domain((v)->domain))
#define is_hvm_domain(d) ((d)->guest_type == guest_type_hvm)
#define is_hvm_vcpu(v) (is_hvm_domain(v->domain))
#define has_hvm_container_domain(d) ((d)->guest_type != guest_type_pv)