ia64/xen-unstable
changeset 12210:7b5115221dfc
[HVM] HVM is now a flag parameter to domain-creation hypercall.
This cleans up HVM start-of-day in Xen and means that the
HVM status of a domain is maintained from cradle to grave.
Signed-off-by: Keir Fraser <keir@xensource.com>
This cleans up HVM start-of-day in Xen and means that the
HVM status of a domain is maintained from cradle to grave.
Signed-off-by: Keir Fraser <keir@xensource.com>
line diff
1.1 --- a/tools/libxc/xc_domain.c Wed Nov 01 15:56:55 2006 +0000 1.2 +++ b/tools/libxc/xc_domain.c Wed Nov 01 16:08:19 2006 +0000 1.3 @@ -12,6 +12,7 @@ 1.4 int xc_domain_create(int xc_handle, 1.5 uint32_t ssidref, 1.6 xen_domain_handle_t handle, 1.7 + uint32_t flags, 1.8 uint32_t *pdomid) 1.9 { 1.10 int err; 1.11 @@ -20,6 +21,7 @@ int xc_domain_create(int xc_handle, 1.12 domctl.cmd = XEN_DOMCTL_createdomain; 1.13 domctl.domain = (domid_t)*pdomid; 1.14 domctl.u.createdomain.ssidref = ssidref; 1.15 + domctl.u.createdomain.flags = flags; 1.16 memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t)); 1.17 if ( (err = do_domctl(xc_handle, &domctl)) != 0 ) 1.18 return err;
2.1 --- a/tools/libxc/xc_hvm_build.c Wed Nov 01 15:56:55 2006 +0000 2.2 +++ b/tools/libxc/xc_hvm_build.c Wed Nov 01 16:08:19 2006 +0000 2.3 @@ -261,6 +261,19 @@ static int setup_guest(int xc_handle, 2.4 goto error_out; 2.5 } 2.6 2.7 + /* HVM domains must be put into shadow mode at the start of day. */ 2.8 + /* XXX *After* xc_get_pfn_list()!! */ 2.9 + if ( xc_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_ENABLE, 2.10 + NULL, 0, NULL, 2.11 + XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT | 2.12 + XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE | 2.13 + XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL, 2.14 + NULL) ) 2.15 + { 2.16 + PERROR("Could not enable shadow paging for domain.\n"); 2.17 + goto error_out; 2.18 + } 2.19 + 2.20 loadelfimage(image, xc_handle, dom, page_array, &dsi); 2.21 2.22 if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL ) 2.23 @@ -417,6 +430,7 @@ static int xc_hvm_build_internal(int xc_ 2.24 goto error_out; 2.25 } 2.26 2.27 +#if 0 2.28 /* HVM domains must be put into shadow mode at the start of day */ 2.29 if ( xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_ENABLE, 2.30 NULL, 0, NULL, 2.31 @@ -428,6 +442,7 @@ static int xc_hvm_build_internal(int xc_ 2.32 PERROR("Could not enable shadow paging for domain.\n"); 2.33 goto error_out; 2.34 } 2.35 +#endif 2.36 2.37 memset(ctxt, 0, sizeof(*ctxt)); 2.38
3.1 --- a/tools/libxc/xenctrl.h Wed Nov 01 15:56:55 2006 +0000 3.2 +++ b/tools/libxc/xenctrl.h Wed Nov 01 16:08:19 2006 +0000 3.3 @@ -177,6 +177,7 @@ typedef xen_domctl_getdomaininfo_t xc_do 3.4 int xc_domain_create(int xc_handle, 3.5 uint32_t ssidref, 3.6 xen_domain_handle_t handle, 3.7 + uint32_t flags, 3.8 uint32_t *pdomid); 3.9 3.10
4.1 --- a/tools/python/xen/lowlevel/xc/xc.c Wed Nov 01 15:56:55 2006 +0000 4.2 +++ b/tools/python/xen/lowlevel/xc/xc.c Wed Nov 01 16:08:19 2006 +0000 4.3 @@ -65,18 +65,17 @@ static PyObject *pyxc_domain_create(XcOb 4.4 PyObject *args, 4.5 PyObject *kwds) 4.6 { 4.7 - uint32_t dom = 0; 4.8 - int ret, i; 4.9 - uint32_t ssidref = 0; 4.10 + uint32_t dom = 0, ssidref = 0, flags = 0; 4.11 + int ret, i, hvm = 0; 4.12 PyObject *pyhandle = NULL; 4.13 xen_domain_handle_t handle = { 4.14 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 4.15 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef }; 4.16 4.17 - static char *kwd_list[] = { "domid", "ssidref", "handle", NULL }; 4.18 + static char *kwd_list[] = { "domid", "ssidref", "handle", "hvm", NULL }; 4.19 4.20 - if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiO", kwd_list, 4.21 - &dom, &ssidref, &pyhandle)) 4.22 + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOi", kwd_list, 4.23 + &dom, &ssidref, &pyhandle, &hvm)) 4.24 return NULL; 4.25 4.26 if ( pyhandle != NULL ) 4.27 @@ -94,7 +93,11 @@ static PyObject *pyxc_domain_create(XcOb 4.28 } 4.29 } 4.30 4.31 - if ( (ret = xc_domain_create(self->xc_handle, ssidref, handle, &dom)) < 0 ) 4.32 + if ( hvm ) 4.33 + flags |= XEN_DOMCTL_CDF_hvm_guest; 4.34 + 4.35 + if ( (ret = xc_domain_create(self->xc_handle, ssidref, 4.36 + handle, flags, &dom)) < 0 ) 4.37 return PyErr_SetFromErrno(xc_error); 4.38 4.39 return PyInt_FromLong(dom);
5.1 --- a/tools/python/xen/xend/XendDomainInfo.py Wed Nov 01 15:56:55 2006 +0000 5.2 +++ b/tools/python/xen/xend/XendDomainInfo.py Wed Nov 01 16:08:19 2006 +0000 5.3 @@ -1198,10 +1198,20 @@ class XendDomainInfo: 5.4 5.5 log.debug('XendDomainInfo.constructDomain') 5.6 5.7 + hvm = (self._infoIsSet('image') and 5.8 + sxp.name(self.info['image']) == "hvm") 5.9 + if hvm: 5.10 + info = xc.xeninfo() 5.11 + if not 'hvm' in info['xen_caps']: 5.12 + raise VmError("HVM guest support is unavailable: is VT/AMD-V " 5.13 + "supported by your CPU and enabled in your " 5.14 + "BIOS?") 5.15 + 5.16 self.domid = xc.domain_create( 5.17 domid = 0, 5.18 ssidref = security.get_security_info(self.info, 'ssidref'), 5.19 - handle = uuid.fromString(self.info['uuid'])) 5.20 + handle = uuid.fromString(self.info['uuid']), 5.21 + hvm = int(hvm)) 5.22 5.23 if self.domid < 0: 5.24 raise VmError('Creating domain failed: name=%s' %
6.1 --- a/xen/arch/ia64/xen/xensetup.c Wed Nov 01 15:56:55 2006 +0000 6.2 +++ b/xen/arch/ia64/xen/xensetup.c Wed Nov 01 16:08:19 2006 +0000 6.3 @@ -422,7 +422,7 @@ void start_kernel(void) 6.4 6.5 scheduler_init(); 6.6 idle_vcpu[0] = (struct vcpu*) ia64_r13; 6.7 - idle_domain = domain_create(IDLE_DOMAIN_ID); 6.8 + idle_domain = domain_create(IDLE_DOMAIN_ID, 0); 6.9 if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) ) 6.10 BUG(); 6.11 6.12 @@ -502,11 +502,11 @@ printk("num_online_cpus=%d, max_cpus=%d\ 6.13 expose_p2m_init(); 6.14 6.15 /* Create initial domain 0. */ 6.16 - dom0 = domain_create(0); 6.17 + dom0 = domain_create(0, 0); 6.18 if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) ) 6.19 panic("Error creating domain 0\n"); 6.20 6.21 - set_bit(_DOMF_privileged, &dom0->domain_flags); 6.22 + dom0->is_privileged = 1; 6.23 6.24 /* 6.25 * We're going to setup domain0 using the module(s) that we stashed safely
7.1 --- a/xen/arch/powerpc/mm.c Wed Nov 01 15:56:55 2006 +0000 7.2 +++ b/xen/arch/powerpc/mm.c Wed Nov 01 16:08:19 2006 +0000 7.3 @@ -316,8 +316,7 @@ ulong pfn2mfn(struct domain *d, ulong pf 7.4 int t = PFN_TYPE_NONE; 7.5 7.6 /* quick tests first */ 7.7 - if (test_bit(_DOMF_privileged, &d->domain_flags) && 7.8 - cpu_io_mfn(pfn)) { 7.9 + if (d->is_privileged && cpu_io_mfn(pfn)) { 7.10 t = PFN_TYPE_IO; 7.11 mfn = pfn; 7.12 } else { 7.13 @@ -341,8 +340,7 @@ ulong pfn2mfn(struct domain *d, ulong pf 7.14 if (t == PFN_TYPE_NONE) { 7.15 /* This hack allows dom0 to map all memory, necessary to 7.16 * initialize domU state. */ 7.17 - if (test_bit(_DOMF_privileged, &d->domain_flags) && 7.18 - mfn_valid(pfn)) { 7.19 + if (d->is_privileged && mfn_valid(pfn)) { 7.20 struct page_info *pg; 7.21 7.22 /* page better be allocated to some domain but not the caller */
8.1 --- a/xen/arch/powerpc/papr/xlate.c Wed Nov 01 15:56:55 2006 +0000 8.2 +++ b/xen/arch/powerpc/papr/xlate.c Wed Nov 01 16:08:19 2006 +0000 8.3 @@ -174,7 +174,7 @@ static void h_enter(struct cpu_user_regs 8.4 8.5 if (mtype == PFN_TYPE_IO) { 8.6 /* only a privilaged dom can access outside IO space */ 8.7 - if ( !test_bit(_DOMF_privileged, &d->domain_flags) ) { 8.8 + if ( !d->is_privileged ) { 8.9 regs->gprs[3] = H_Privilege; 8.10 printk("%s: unprivileged access to physical page: 0x%lx\n", 8.11 __func__, pfn);
9.1 --- a/xen/arch/powerpc/setup.c Wed Nov 01 15:56:55 2006 +0000 9.2 +++ b/xen/arch/powerpc/setup.c Wed Nov 01 16:08:19 2006 +0000 9.3 @@ -157,7 +157,7 @@ static void __init start_of_day(void) 9.4 scheduler_init(); 9.5 9.6 /* create idle domain */ 9.7 - idle_domain = domain_create(IDLE_DOMAIN_ID); 9.8 + idle_domain = domain_create(IDLE_DOMAIN_ID, 0); 9.9 if ((idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL)) 9.10 BUG(); 9.11 set_current(idle_domain->vcpu[0]); 9.12 @@ -342,7 +342,7 @@ static void __init __start_xen(multiboot 9.13 start_of_day(); 9.14 9.15 /* Create initial domain 0. */ 9.16 - dom0 = domain_create(0); 9.17 + dom0 = domain_create(0, 0); 9.18 if (dom0 == NULL) 9.19 panic("Error creating domain 0\n"); 9.20 dom0->max_pages = ~0U; 9.21 @@ -355,8 +355,9 @@ static void __init __start_xen(multiboot 9.22 * need to make sure Dom0's vVCPU 0 is pinned to the CPU */ 9.23 dom0->vcpu[0]->cpu_affinity = cpumask_of_cpu(0); 9.24 9.25 - set_bit(_DOMF_privileged, &dom0->domain_flags); 9.26 - /* post-create hooks sets security label */ 9.27 + dom0->is_privileged = 1; 9.28 + 9.29 + /* Post-create hook sets security label. */ 9.30 acm_post_domain0_create(dom0->domain_id); 9.31 9.32 cmdline = (char *)(mod[0].string ? __va((ulong)mod[0].string) : NULL);
10.1 --- a/xen/arch/x86/domain.c Wed Nov 01 15:56:55 2006 +0000 10.2 +++ b/xen/arch/x86/domain.c Wed Nov 01 16:08:19 2006 +0000 10.3 @@ -157,6 +157,13 @@ int arch_domain_create(struct domain *d) 10.4 int vcpuid, pdpt_order; 10.5 int i; 10.6 10.7 + if ( is_hvm_domain(d) && !hvm_enabled ) 10.8 + { 10.9 + gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest " 10.10 + "on a non-VT/AMDV platform.\n"); 10.11 + return -EINVAL; 10.12 + } 10.13 + 10.14 pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)); 10.15 d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order); 10.16 if ( d->arch.mm_perdomain_pt == NULL ) 10.17 @@ -258,7 +265,11 @@ int arch_set_info_guest( 10.18 unsigned long cr3_pfn = INVALID_MFN; 10.19 int i, rc; 10.20 10.21 - if ( !(c->flags & VGCF_HVM_GUEST) ) 10.22 + if ( !!(c->flags & VGCF_hvm_guest) != is_hvm_vcpu(v) ) 10.23 + return -EINVAL; 10.24 + c->flags &= ~VGCF_hvm_guest; 10.25 + 10.26 + if ( !is_hvm_vcpu(v) ) 10.27 { 10.28 fixup_guest_stack_selector(c->user_regs.ss); 10.29 fixup_guest_stack_selector(c->kernel_ss); 10.30 @@ -272,15 +283,13 @@ int arch_set_info_guest( 10.31 for ( i = 0; i < 256; i++ ) 10.32 fixup_guest_code_selector(c->trap_ctxt[i].cs); 10.33 } 10.34 - else if ( !hvm_enabled ) 10.35 - return -EINVAL; 10.36 10.37 clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 10.38 - if ( c->flags & VGCF_I387_VALID ) 10.39 + if ( c->flags & VGCF_i387_valid ) 10.40 set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags); 10.41 10.42 v->arch.flags &= ~TF_kernel_mode; 10.43 - if ( (c->flags & VGCF_IN_KERNEL) || (c->flags & VGCF_HVM_GUEST) ) 10.44 + if ( (c->flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ ) 10.45 v->arch.flags |= TF_kernel_mode; 10.46 10.47 memcpy(&v->arch.guest_context, c, sizeof(*c)); 10.48 @@ -291,7 +300,7 @@ int arch_set_info_guest( 10.49 10.50 init_int80_direct_trap(v); 10.51 10.52 - if ( !(c->flags & VGCF_HVM_GUEST) ) 10.53 + if ( !is_hvm_vcpu(v) ) 10.54 { 10.55 /* IOPL privileges are virtualised. */ 10.56 v->arch.iopl = (v->arch.guest_context.user_regs.eflags >> 12) & 3; 10.57 @@ -316,7 +325,7 @@ int arch_set_info_guest( 10.58 if ( v->vcpu_id == 0 ) 10.59 d->vm_assist = c->vm_assist; 10.60 10.61 - if ( !(c->flags & VGCF_HVM_GUEST) ) 10.62 + if ( !is_hvm_vcpu(v) ) 10.63 { 10.64 cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c->ctrlreg[3])); 10.65 v->arch.guest_table = pagetable_from_pfn(cr3_pfn); 10.66 @@ -325,7 +334,7 @@ int arch_set_info_guest( 10.67 if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 ) 10.68 return rc; 10.69 10.70 - if ( c->flags & VGCF_HVM_GUEST ) 10.71 + if ( is_hvm_vcpu(v) ) 10.72 { 10.73 v->arch.guest_table = pagetable_null(); 10.74 10.75 @@ -745,7 +754,7 @@ void context_switch(struct vcpu *prev, s 10.76 /* Re-enable interrupts before restoring state which may fault. */ 10.77 local_irq_enable(); 10.78 10.79 - if ( !hvm_guest(next) ) 10.80 + if ( !is_hvm_vcpu(next) ) 10.81 { 10.82 load_LDT(next); 10.83 load_segments(next); 10.84 @@ -835,7 +844,7 @@ unsigned long hypercall_create_continuat 10.85 #if defined(__i386__) 10.86 regs->eax = op; 10.87 10.88 - if ( supervisor_mode_kernel || hvm_guest(current) ) 10.89 + if ( supervisor_mode_kernel || is_hvm_vcpu(current) ) 10.90 regs->eip &= ~31; /* re-execute entire hypercall entry stub */ 10.91 else 10.92 regs->eip -= 2; /* re-execute 'int 0x82' */ 10.93 @@ -972,7 +981,7 @@ void domain_relinquish_resources(struct 10.94 #endif 10.95 } 10.96 10.97 - if ( d->vcpu[0] && hvm_guest(d->vcpu[0]) ) 10.98 + if ( is_hvm_domain(d) ) 10.99 hvm_relinquish_guest_resources(d); 10.100 10.101 /* Tear down shadow mode stuff. */
11.1 --- a/xen/arch/x86/domctl.c Wed Nov 01 15:56:55 2006 +0000 11.2 +++ b/xen/arch/x86/domctl.c Wed Nov 01 16:08:19 2006 +0000 11.3 @@ -224,7 +224,7 @@ long arch_do_domctl( 11.4 11.5 spin_lock(&d->page_alloc_lock); 11.6 11.7 - if ( hvm_guest(d->vcpu[0]) && shadow_mode_translate(d) ) 11.8 + if ( is_hvm_domain(d) && shadow_mode_translate(d) ) 11.9 { 11.10 /* HVM domain: scan P2M to get guaranteed physmap order. */ 11.11 for ( i = 0, gmfn = 0; 11.12 @@ -321,7 +321,7 @@ void arch_getdomaininfo_ctxt( 11.13 { 11.14 memcpy(c, &v->arch.guest_context, sizeof(*c)); 11.15 11.16 - if ( hvm_guest(v) ) 11.17 + if ( is_hvm_vcpu(v) ) 11.18 { 11.19 hvm_store_cpu_guest_regs(v, &c->user_regs, c->ctrlreg); 11.20 } 11.21 @@ -334,11 +334,11 @@ void arch_getdomaininfo_ctxt( 11.22 11.23 c->flags = 0; 11.24 if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) ) 11.25 - c->flags |= VGCF_I387_VALID; 11.26 + c->flags |= VGCF_i387_valid; 11.27 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) ) 11.28 - c->flags |= VGCF_IN_KERNEL; 11.29 - if ( hvm_guest(v) ) 11.30 - c->flags |= VGCF_HVM_GUEST; 11.31 + c->flags |= VGCF_in_kernel; 11.32 + if ( is_hvm_vcpu(v) ) 11.33 + c->flags |= VGCF_hvm_guest; 11.34 11.35 c->ctrlreg[3] = xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table)); 11.36
12.1 --- a/xen/arch/x86/hvm/hvm.c Wed Nov 01 15:56:55 2006 +0000 12.2 +++ b/xen/arch/x86/hvm/hvm.c Wed Nov 01 16:08:19 2006 +0000 12.3 @@ -260,12 +260,12 @@ void hvm_release_assist_channel(struct v 12.4 } 12.5 12.6 12.7 -void hvm_setup_platform(struct domain* d) 12.8 +void hvm_setup_platform(struct domain *d) 12.9 { 12.10 struct hvm_domain *platform; 12.11 - struct vcpu *v=current; 12.12 + struct vcpu *v = current; 12.13 12.14 - if ( !hvm_guest(v) || (v->vcpu_id != 0) ) 12.15 + if ( !is_hvm_domain(d) || (v->vcpu_id != 0) ) 12.16 return; 12.17 12.18 hvm_zap_iommu_pages(d); 12.19 @@ -635,7 +635,7 @@ int hvm_bringup_ap(int vcpuid, int tramp 12.20 struct vcpu_guest_context *ctxt; 12.21 int rc = 0; 12.22 12.23 - BUG_ON(!hvm_guest(bsp)); 12.24 + BUG_ON(!is_hvm_domain(d)); 12.25 12.26 if ( bsp->vcpu_id != 0 ) 12.27 {
13.1 --- a/xen/arch/x86/hvm/svm/svm.c Wed Nov 01 15:56:55 2006 +0000 13.2 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Nov 01 16:08:19 2006 +0000 13.3 @@ -562,7 +562,6 @@ static void svm_init_ap_context(struct v 13.4 */ 13.5 ctxt->user_regs.eip = 0x0; 13.6 ctxt->user_regs.cs = (trampoline_vector << 8); 13.7 - ctxt->flags = VGCF_HVM_GUEST; 13.8 } 13.9 13.10 static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
14.1 --- a/xen/arch/x86/hvm/svm/vmcb.c Wed Nov 01 15:56:55 2006 +0000 14.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Nov 01 16:08:19 2006 +0000 14.3 @@ -482,20 +482,14 @@ static void vmcb_dump(unsigned char ch) 14.4 struct vcpu *v; 14.5 14.6 printk("*********** VMCB Areas **************\n"); 14.7 - for_each_domain(d) { 14.8 + for_each_domain ( d ) 14.9 + { 14.10 + if ( !is_hvm_domain(d) ) 14.11 + continue; 14.12 printk("\n>>> Domain %d <<<\n", d->domain_id); 14.13 - for_each_vcpu(d, v) { 14.14 - 14.15 - /* 14.16 - * Presumably, if a domain is not an HVM guest, 14.17 - * the very first CPU will not pass this test 14.18 - */ 14.19 - if (!hvm_guest(v)) { 14.20 - printk("\t\tNot HVM guest\n"); 14.21 - break; 14.22 - } 14.23 + for_each_vcpu ( d, v ) 14.24 + { 14.25 printk("\tVCPU %d\n", v->vcpu_id); 14.26 - 14.27 svm_dump_vmcb("key_handler", v->arch.hvm_svm.vmcb); 14.28 } 14.29 }
15.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Nov 01 15:56:55 2006 +0000 15.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Nov 01 16:08:19 2006 +0000 15.3 @@ -218,7 +218,7 @@ void vmx_vmcs_exit(struct vcpu *v) 15.4 15.5 /* Don't confuse arch_vmx_do_resume (for @v or @current!) */ 15.6 vmx_clear_vmcs(v); 15.7 - if ( hvm_guest(current) ) 15.8 + if ( is_hvm_vcpu(current) ) 15.9 vmx_load_vmcs(current); 15.10 15.11 spin_unlock(&v->arch.hvm_vmx.vmcs_lock); 15.12 @@ -709,20 +709,14 @@ static void vmcs_dump(unsigned char ch) 15.13 struct vcpu *v; 15.14 15.15 printk("*********** VMCS Areas **************\n"); 15.16 - for_each_domain(d) { 15.17 + for_each_domain ( d ) 15.18 + { 15.19 + if ( !is_hvm_domain(d) ) 15.20 + continue; 15.21 printk("\n>>> Domain %d <<<\n", d->domain_id); 15.22 - for_each_vcpu(d, v) { 15.23 - 15.24 - /* 15.25 - * Presumably, if a domain is not an HVM guest, 15.26 - * the very first CPU will not pass this test 15.27 - */ 15.28 - if (!hvm_guest(v)) { 15.29 - printk("\t\tNot HVM guest\n"); 15.30 - break; 15.31 - } 15.32 + for_each_vcpu ( d, v ) 15.33 + { 15.34 printk("\tVCPU %d\n", v->vcpu_id); 15.35 - 15.36 vmx_vmcs_enter(v); 15.37 vmcs_dump_vcpu(); 15.38 vmx_vmcs_exit(v);
16.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Nov 01 15:56:55 2006 +0000 16.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Nov 01 16:08:19 2006 +0000 16.3 @@ -672,8 +672,6 @@ static void vmx_init_ap_context(struct v 16.4 ctxt->user_regs.edx = vcpuid; 16.5 ctxt->user_regs.ebx = trampoline_vector; 16.6 16.7 - ctxt->flags = VGCF_HVM_GUEST; 16.8 - 16.9 /* Virtual IDT is empty at start-of-day. */ 16.10 for ( i = 0; i < 256; i++ ) 16.11 {
17.1 --- a/xen/arch/x86/mm.c Wed Nov 01 15:56:55 2006 +0000 17.2 +++ b/xen/arch/x86/mm.c Wed Nov 01 16:08:19 2006 +0000 17.3 @@ -1715,7 +1715,7 @@ int new_guest_cr3(unsigned long mfn) 17.4 int okay; 17.5 unsigned long old_base_mfn; 17.6 17.7 - if ( hvm_guest(v) && !hvm_paging_enabled(v) ) 17.8 + if ( is_hvm_domain(d) && !hvm_paging_enabled(v) ) 17.9 domain_crash_synchronous(); 17.10 17.11 if ( shadow_mode_refcounts(d) )
18.1 --- a/xen/arch/x86/mm/shadow/common.c Wed Nov 01 15:56:55 2006 +0000 18.2 +++ b/xen/arch/x86/mm/shadow/common.c Wed Nov 01 16:08:19 2006 +0000 18.3 @@ -2286,7 +2286,7 @@ void sh_update_paging_modes(struct vcpu 18.4 // 18.5 shadow_detach_old_tables(v); 18.6 18.7 - if ( !hvm_guest(v) ) 18.8 + if ( !is_hvm_domain(d) ) 18.9 { 18.10 /// 18.11 /// PV guest 18.12 @@ -2394,7 +2394,7 @@ void sh_update_paging_modes(struct vcpu 18.13 SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u " 18.14 "(was g=%u s=%u)\n", 18.15 d->domain_id, v->vcpu_id, 18.16 - hvm_guest(v) ? !!hvm_paging_enabled(v) : 1, 18.17 + is_hvm_domain(d) ? !!hvm_paging_enabled(v) : 1, 18.18 v->arch.shadow.mode->guest_levels, 18.19 v->arch.shadow.mode->shadow_levels, 18.20 old_mode ? old_mode->guest_levels : 0,
19.1 --- a/xen/arch/x86/mm/shadow/multi.c Wed Nov 01 15:56:55 2006 +0000 19.2 +++ b/xen/arch/x86/mm/shadow/multi.c Wed Nov 01 16:08:19 2006 +0000 19.3 @@ -202,14 +202,14 @@ guest_supports_superpages(struct vcpu *v 19.4 { 19.5 /* The _PAGE_PSE bit must be honoured in HVM guests, whenever 19.6 * CR4.PSE is set or the guest is in PAE or long mode */ 19.7 - return (hvm_guest(v) && (GUEST_PAGING_LEVELS != 2 19.8 + return (is_hvm_vcpu(v) && (GUEST_PAGING_LEVELS != 2 19.9 || (hvm_get_guest_ctrl_reg(v, 4) & X86_CR4_PSE))); 19.10 } 19.11 19.12 static inline int 19.13 guest_supports_nx(struct vcpu *v) 19.14 { 19.15 - if ( !hvm_guest(v) ) 19.16 + if ( !is_hvm_vcpu(v) ) 19.17 return cpu_has_nx; 19.18 19.19 // XXX - fix this! 19.20 @@ -769,7 +769,7 @@ static always_inline void 19.21 // PV guests in 64-bit mode use two different page tables for user vs 19.22 // supervisor permissions, making the guest's _PAGE_USER bit irrelevant. 19.23 // It is always shadowed as present... 19.24 - if ( (GUEST_PAGING_LEVELS == 4) && !hvm_guest(v) ) 19.25 + if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_domain(d) ) 19.26 { 19.27 sflags |= _PAGE_USER; 19.28 } 19.29 @@ -2293,7 +2293,7 @@ static int validate_gl1e(struct vcpu *v, 19.30 gfn = guest_l1e_get_gfn(*new_gl1e); 19.31 gmfn = vcpu_gfn_to_mfn(v, gfn); 19.32 19.33 - mmio = (hvm_guest(v) && shadow_vcpu_mode_translate(v) && !valid_mfn(gmfn)); 19.34 + mmio = (is_hvm_vcpu(v) && shadow_vcpu_mode_translate(v) && !valid_mfn(gmfn)); 19.35 l1e_propagate_from_guest(v, new_gl1e, _mfn(INVALID_MFN), gmfn, &new_sl1e, 19.36 ft_prefetch, mmio); 19.37 19.38 @@ -2523,7 +2523,7 @@ static void sh_prefetch(struct vcpu *v, 19.39 /* Look at the gfn that the l1e is pointing at */ 19.40 gfn = guest_l1e_get_gfn(gl1e); 19.41 gmfn = vcpu_gfn_to_mfn(v, gfn); 19.42 - mmio = ( hvm_guest(v) 19.43 + mmio = ( is_hvm_vcpu(v) 19.44 && shadow_vcpu_mode_translate(v) 19.45 && mmio_space(gfn_to_paddr(gfn)) ); 19.46 19.47 @@ -2585,7 +2585,8 @@ static int sh_page_fault(struct vcpu *v, 19.48 { 19.49 if ( sh_l1e_is_gnp(sl1e) ) 19.50 { 19.51 - if ( likely(!hvm_guest(v) || shadow_vcpu_mode_translate(v)) ) 19.52 + if ( likely(!is_hvm_domain(d) || 19.53 + shadow_vcpu_mode_translate(v)) ) 19.54 { 19.55 /* Not-present in a guest PT: pass to the guest as 19.56 * a not-present fault (by flipping two bits). */ 19.57 @@ -2647,7 +2648,7 @@ static int sh_page_fault(struct vcpu *v, 19.58 // 19.59 if ( unlikely(!(guest_l1e_get_flags(gw.eff_l1e) & _PAGE_PRESENT)) ) 19.60 { 19.61 - if ( hvm_guest(v) && !shadow_vcpu_mode_translate(v) ) 19.62 + if ( is_hvm_domain(d) && !shadow_vcpu_mode_translate(v) ) 19.63 { 19.64 /* Not present in p2m map, means this is mmio */ 19.65 gpa = va; 19.66 @@ -2704,9 +2705,9 @@ static int sh_page_fault(struct vcpu *v, 19.67 /* What mfn is the guest trying to access? */ 19.68 gfn = guest_l1e_get_gfn(gw.eff_l1e); 19.69 gmfn = vcpu_gfn_to_mfn(v, gfn); 19.70 - mmio = ( hvm_guest(v) 19.71 - && shadow_vcpu_mode_translate(v) 19.72 - && mmio_space(gfn_to_paddr(gfn)) ); 19.73 + mmio = (is_hvm_domain(d) 19.74 + && shadow_vcpu_mode_translate(v) 19.75 + && mmio_space(gfn_to_paddr(gfn))); 19.76 19.77 if ( !mmio && !valid_mfn(gmfn) ) 19.78 { 19.79 @@ -2775,14 +2776,15 @@ static int sh_page_fault(struct vcpu *v, 19.80 emulate: 19.81 /* Take the register set we were called with */ 19.82 emul_regs = *regs; 19.83 - if ( hvm_guest(v) ) 19.84 + if ( is_hvm_domain(d) ) 19.85 { 19.86 /* Add the guest's segment selectors, rip, rsp. rflags */ 19.87 hvm_store_cpu_guest_regs(v, &emul_regs, NULL); 19.88 } 19.89 emul_ctxt.regs = &emul_regs; 19.90 emul_ctxt.cr2 = va; 19.91 - emul_ctxt.mode = hvm_guest(v) ? hvm_guest_x86_mode(v) : X86EMUL_MODE_HOST; 19.92 + emul_ctxt.mode = (is_hvm_domain(d) ? 19.93 + hvm_guest_x86_mode(v) : X86EMUL_MODE_HOST); 19.94 19.95 SHADOW_PRINTK("emulate: eip=%#lx\n", emul_regs.eip); 19.96 19.97 @@ -2813,7 +2815,7 @@ static int sh_page_fault(struct vcpu *v, 19.98 goto not_a_shadow_fault; 19.99 19.100 /* Emulator has changed the user registers: write back */ 19.101 - if ( hvm_guest(v) ) 19.102 + if ( is_hvm_domain(d) ) 19.103 { 19.104 /* Write back the guest's segment selectors, rip, rsp. rflags */ 19.105 hvm_load_cpu_guest_regs(v, &emul_regs); 19.106 @@ -3317,7 +3319,7 @@ sh_update_cr3(struct vcpu *v) 19.107 19.108 #ifndef NDEBUG 19.109 /* Double-check that the HVM code has sent us a sane guest_table */ 19.110 - if ( hvm_guest(v) ) 19.111 + if ( is_hvm_domain(d) ) 19.112 { 19.113 gfn_t gfn; 19.114 19.115 @@ -3492,7 +3494,7 @@ sh_update_cr3(struct vcpu *v) 19.116 /// 19.117 if ( shadow_mode_external(d) ) 19.118 { 19.119 - ASSERT(hvm_guest(v)); 19.120 + ASSERT(is_hvm_domain(d)); 19.121 #if SHADOW_PAGING_LEVELS == 3 19.122 /* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */ 19.123 v->arch.hvm_vcpu.hw_cr3 = virt_to_maddr(&v->arch.shadow.l3table); 19.124 @@ -3890,7 +3892,7 @@ static char * sh_audit_flags(struct vcpu 19.125 { 19.126 if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_PRESENT) ) 19.127 return "shadow is present but guest is not present"; 19.128 - if ( (sflags & _PAGE_GLOBAL) && !hvm_guest(v) ) 19.129 + if ( (sflags & _PAGE_GLOBAL) && !is_hvm_vcpu(v) ) 19.130 return "global bit set in PV shadow"; 19.131 if ( (level == 1 || (level == 2 && (gflags & _PAGE_PSE))) 19.132 && ((sflags & _PAGE_DIRTY) && !(gflags & _PAGE_DIRTY)) )
20.1 --- a/xen/arch/x86/mm/shadow/types.h Wed Nov 01 15:56:55 2006 +0000 20.2 +++ b/xen/arch/x86/mm/shadow/types.h Wed Nov 01 16:08:19 2006 +0000 20.3 @@ -205,13 +205,13 @@ static inline shadow_l4e_t shadow_l4e_fr 20.4 __sh_linear_l1_table; \ 20.5 }) 20.6 20.7 -// XXX -- these should not be conditional on hvm_guest(v), but rather on 20.8 +// XXX -- these should not be conditional on is_hvm_vcpu(v), but rather on 20.9 // shadow_mode_external(d)... 20.10 // 20.11 #define sh_linear_l2_table(v) ({ \ 20.12 ASSERT(current == (v)); \ 20.13 ((shadow_l2e_t *) \ 20.14 - (hvm_guest(v) ? __linear_l1_table : __sh_linear_l1_table) + \ 20.15 + (is_hvm_vcpu(v) ? __linear_l1_table : __sh_linear_l1_table) + \ 20.16 shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)); \ 20.17 }) 20.18 20.19 @@ -219,7 +219,7 @@ static inline shadow_l4e_t shadow_l4e_fr 20.20 #define sh_linear_l3_table(v) ({ \ 20.21 ASSERT(current == (v)); \ 20.22 ((shadow_l3e_t *) \ 20.23 - (hvm_guest(v) ? __linear_l2_table : __sh_linear_l2_table) + \ 20.24 + (is_hvm_vcpu(v) ? __linear_l2_table : __sh_linear_l2_table) + \ 20.25 shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)); \ 20.26 }) 20.27 20.28 @@ -228,7 +228,7 @@ static inline shadow_l4e_t shadow_l4e_fr 20.29 #define sh_linear_l4_table(v) ({ \ 20.30 ASSERT(current == (v)); \ 20.31 ((l4_pgentry_t *) \ 20.32 - (hvm_guest(v) ? __linear_l3_table : __sh_linear_l3_table) + \ 20.33 + (is_hvm_vcpu(v) ? __linear_l3_table : __sh_linear_l3_table) + \ 20.34 shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)); \ 20.35 }) 20.36 #endif 20.37 @@ -585,7 +585,7 @@ accumulate_guest_flags(struct vcpu *v, w 20.38 // In 64-bit PV guests, the _PAGE_USER bit is implied in all guest 20.39 // entries (since even the guest kernel runs in ring 3). 20.40 // 20.41 - if ( (GUEST_PAGING_LEVELS == 4) && !hvm_guest(v) ) 20.42 + if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_vcpu(v) ) 20.43 accumulated_flags |= _PAGE_USER; 20.44 20.45 return accumulated_flags;
21.1 --- a/xen/arch/x86/oprofile/xenoprof.c Wed Nov 01 15:56:55 2006 +0000 21.2 +++ b/xen/arch/x86/oprofile/xenoprof.c Wed Nov 01 16:08:19 2006 +0000 21.3 @@ -701,7 +701,7 @@ int xenoprofile_get_mode(struct vcpu *v, 21.4 if ( !guest_mode(regs) ) 21.5 return 2; 21.6 21.7 - if ( hvm_guest(v) ) 21.8 + if ( is_hvm_vcpu(v) ) 21.9 return ((regs->cs & 3) != 3); 21.10 21.11 return guest_kernel_mode(v, regs);
22.1 --- a/xen/arch/x86/setup.c Wed Nov 01 15:56:55 2006 +0000 22.2 +++ b/xen/arch/x86/setup.c Wed Nov 01 16:08:19 2006 +0000 22.3 @@ -249,7 +249,7 @@ static void __init init_idle_domain(void 22.4 /* Domain creation requires that scheduler structures are initialised. */ 22.5 scheduler_init(); 22.6 22.7 - idle_domain = domain_create(IDLE_DOMAIN_ID); 22.8 + idle_domain = domain_create(IDLE_DOMAIN_ID, 0); 22.9 if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) ) 22.10 BUG(); 22.11 22.12 @@ -640,12 +640,13 @@ void __init __start_xen(multiboot_info_t 22.13 acm_init(_policy_start, _policy_len); 22.14 22.15 /* Create initial domain 0. */ 22.16 - dom0 = domain_create(0); 22.17 + dom0 = domain_create(0, 0); 22.18 if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) ) 22.19 panic("Error creating domain 0\n"); 22.20 22.21 - set_bit(_DOMF_privileged, &dom0->domain_flags); 22.22 - /* post-create hooks sets security label */ 22.23 + dom0->is_privileged = 1; 22.24 + 22.25 + /* Post-create hook sets security label. */ 22.26 acm_post_domain0_create(dom0->domain_id); 22.27 22.28 /* Grab the DOM0 command line. */
23.1 --- a/xen/arch/x86/traps.c Wed Nov 01 15:56:55 2006 +0000 23.2 +++ b/xen/arch/x86/traps.c Wed Nov 01 16:08:19 2006 +0000 23.3 @@ -134,7 +134,7 @@ static void show_guest_stack(struct cpu_ 23.4 int i; 23.5 unsigned long *stack, addr; 23.6 23.7 - if ( hvm_guest(current) ) 23.8 + if ( is_hvm_vcpu(current) ) 23.9 return; 23.10 23.11 if ( vm86_mode(regs) )
24.1 --- a/xen/arch/x86/x86_32/domain_page.c Wed Nov 01 15:56:55 2006 +0000 24.2 +++ b/xen/arch/x86/x86_32/domain_page.c Wed Nov 01 16:08:19 2006 +0000 24.3 @@ -29,7 +29,7 @@ static inline struct vcpu *mapcache_curr 24.4 * then it means we are running on the idle domain's page table and must 24.5 * therefore use its mapcache. 24.6 */ 24.7 - if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !hvm_guest(v) ) 24.8 + if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !is_hvm_vcpu(v) ) 24.9 { 24.10 /* If we really are idling, perform lazy context switch now. */ 24.11 if ( (v = idle_vcpu[smp_processor_id()]) == current )
25.1 --- a/xen/arch/x86/x86_32/traps.c Wed Nov 01 15:56:55 2006 +0000 25.2 +++ b/xen/arch/x86/x86_32/traps.c Wed Nov 01 16:08:19 2006 +0000 25.3 @@ -45,7 +45,7 @@ void show_registers(struct cpu_user_regs 25.4 unsigned long fault_crs[8]; 25.5 const char *context; 25.6 25.7 - if ( hvm_guest(current) && guest_mode(regs) ) 25.8 + if ( is_hvm_vcpu(current) && guest_mode(regs) ) 25.9 { 25.10 context = "hvm"; 25.11 hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs); 25.12 @@ -515,7 +515,7 @@ static void hypercall_page_initialise_ri 25.13 25.14 void hypercall_page_initialise(struct domain *d, void *hypercall_page) 25.15 { 25.16 - if ( hvm_guest(d->vcpu[0]) ) 25.17 + if ( is_hvm_domain(d) ) 25.18 hvm_hypercall_page_initialise(d, hypercall_page); 25.19 else if ( supervisor_mode_kernel ) 25.20 hypercall_page_initialise_ring0_kernel(hypercall_page);
26.1 --- a/xen/arch/x86/x86_64/traps.c Wed Nov 01 15:56:55 2006 +0000 26.2 +++ b/xen/arch/x86/x86_64/traps.c Wed Nov 01 16:08:19 2006 +0000 26.3 @@ -42,7 +42,7 @@ void show_registers(struct cpu_user_regs 26.4 unsigned long fault_crs[8]; 26.5 const char *context; 26.6 26.7 - if ( hvm_guest(current) && guest_mode(regs) ) 26.8 + if ( is_hvm_vcpu(current) && guest_mode(regs) ) 26.9 { 26.10 context = "hvm"; 26.11 hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs); 26.12 @@ -229,7 +229,7 @@ unsigned long do_iret(void) 26.13 regs->rsp = iret_saved.rsp; 26.14 regs->ss = iret_saved.ss | 3; /* force guest privilege */ 26.15 26.16 - if ( !(iret_saved.flags & VGCF_IN_SYSCALL) ) 26.17 + if ( !(iret_saved.flags & VGCF_in_sycall) ) 26.18 { 26.19 regs->entry_vector = 0; 26.20 regs->r11 = iret_saved.r11; 26.21 @@ -500,7 +500,7 @@ static void hypercall_page_initialise_ri 26.22 26.23 void hypercall_page_initialise(struct domain *d, void *hypercall_page) 26.24 { 26.25 - if ( hvm_guest(d->vcpu[0]) ) 26.26 + if ( is_hvm_domain(d) ) 26.27 hvm_hypercall_page_initialise(d, hypercall_page); 26.28 else 26.29 hypercall_page_initialise_ring3_kernel(hypercall_page);
27.1 --- a/xen/common/domain.c Wed Nov 01 15:56:55 2006 +0000 27.2 +++ b/xen/common/domain.c Wed Nov 01 16:08:19 2006 +0000 27.3 @@ -114,7 +114,7 @@ struct vcpu *alloc_idle_vcpu(unsigned in 27.4 unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS; 27.5 27.6 d = (vcpu_id == 0) ? 27.7 - domain_create(IDLE_DOMAIN_ID) : 27.8 + domain_create(IDLE_DOMAIN_ID, 0) : 27.9 idle_vcpu[cpu_id - vcpu_id]->domain; 27.10 BUG_ON(d == NULL); 27.11 27.12 @@ -124,13 +124,16 @@ struct vcpu *alloc_idle_vcpu(unsigned in 27.13 return v; 27.14 } 27.15 27.16 -struct domain *domain_create(domid_t domid) 27.17 +struct domain *domain_create(domid_t domid, unsigned int domcr_flags) 27.18 { 27.19 struct domain *d, **pd; 27.20 27.21 if ( (d = alloc_domain(domid)) == NULL ) 27.22 return NULL; 27.23 27.24 + if ( domcr_flags & DOMCRF_hvm ) 27.25 + d->is_hvm = 1; 27.26 + 27.27 rangeset_domain_initialise(d); 27.28 27.29 if ( !is_idle_domain(d) )
28.1 --- a/xen/common/domctl.c Wed Nov 01 15:56:55 2006 +0000 28.2 +++ b/xen/common/domctl.c Wed Nov 01 16:08:19 2006 +0000 28.3 @@ -241,12 +241,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 28.4 struct domain *d; 28.5 domid_t dom; 28.6 static domid_t rover = 0; 28.7 + unsigned int domcr_flags; 28.8 28.9 - /* 28.10 - * Running the domain 0 kernel in ring 0 is not compatible 28.11 - * with multiple guests. 28.12 - */ 28.13 - if ( supervisor_mode_kernel ) 28.14 + if ( supervisor_mode_kernel || 28.15 + (op->u.createdomain.flags & ~XEN_DOMCTL_CDF_hvm_guest) ) 28.16 return -EINVAL; 28.17 28.18 dom = op->domain; 28.19 @@ -273,8 +271,12 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 28.20 rover = dom; 28.21 } 28.22 28.23 + domcr_flags = 0; 28.24 + if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest ) 28.25 + domcr_flags |= DOMCRF_hvm; 28.26 + 28.27 ret = -ENOMEM; 28.28 - if ( (d = domain_create(dom)) == NULL ) 28.29 + if ( (d = domain_create(dom, domcr_flags)) == NULL ) 28.30 break; 28.31 28.32 memcpy(d->handle, op->u.createdomain.handle,
29.1 --- a/xen/include/asm-x86/hvm/support.h Wed Nov 01 15:56:55 2006 +0000 29.2 +++ b/xen/include/asm-x86/hvm/support.h Wed Nov 01 16:08:19 2006 +0000 29.3 @@ -32,7 +32,8 @@ 29.4 #define HVM_DEBUG 1 29.5 #endif 29.6 29.7 -#define hvm_guest(v) ((v)->arch.guest_context.flags & VGCF_HVM_GUEST) 29.8 +#define is_hvm_domain(d) ((d)->is_hvm) 29.9 +#define is_hvm_vcpu(v) (is_hvm_domain(v->domain)) 29.10 29.11 static inline shared_iopage_t *get_sp(struct domain *d) 29.12 {
30.1 --- a/xen/include/asm-x86/processor.h Wed Nov 01 15:56:55 2006 +0000 30.2 +++ b/xen/include/asm-x86/processor.h Wed Nov 01 16:08:19 2006 +0000 30.3 @@ -107,7 +107,7 @@ 30.4 #define TRAP_deferred_nmi 31 30.5 30.6 /* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */ 30.7 -/* NB. Same as VGCF_IN_SYSCALL. No bits in common with any other TRAP_ defn. */ 30.8 +/* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */ 30.9 #define TRAP_syscall 256 30.10 30.11 /*
31.1 --- a/xen/include/asm-x86/regs.h Wed Nov 01 15:56:55 2006 +0000 31.2 +++ b/xen/include/asm-x86/regs.h Wed Nov 01 16:08:19 2006 +0000 31.3 @@ -39,7 +39,7 @@ enum EFLAGS { 31.4 /* If a guest frame, it must be have guest privs (unless HVM guest). */ \ 31.5 /* We permit CS==0 which can come from an uninitialised trap entry. */ \ 31.6 ASSERT((diff != 0) || vm86_mode(r) || ((r->cs&3) >= GUEST_KERNEL_RPL) || \ 31.7 - (r->cs == 0) || hvm_guest(current)); \ 31.8 + (r->cs == 0) || is_hvm_vcpu(current)); \ 31.9 /* If not a guest frame, it must be a hypervisor frame. */ \ 31.10 ASSERT((diff == 0) || (!vm86_mode(r) && (r->cs == __HYPERVISOR_CS))); \ 31.11 /* Return TRUE if it's a guest frame. */ \
32.1 --- a/xen/include/asm-x86/shadow.h Wed Nov 01 15:56:55 2006 +0000 32.2 +++ b/xen/include/asm-x86/shadow.h Wed Nov 01 16:08:19 2006 +0000 32.3 @@ -64,7 +64,7 @@ 32.4 #define shadow_mode_external(_d) ((_d)->arch.shadow.mode & SHM2_external) 32.5 32.6 /* Xen traps & emulates all reads of all page table pages: 32.7 - *not yet supported 32.8 + * not yet supported 32.9 */ 32.10 #define shadow_mode_trap_reads(_d) ({ (void)(_d); 0; }) 32.11 32.12 @@ -77,7 +77,7 @@ 32.13 #ifdef __x86_64__ 32.14 #define pv_32bit_guest(_v) 0 // not yet supported 32.15 #else 32.16 -#define pv_32bit_guest(_v) !hvm_guest(v) 32.17 +#define pv_32bit_guest(_v) !is_hvm_vcpu(v) 32.18 #endif 32.19 32.20 /* The shadow lock.
33.1 --- a/xen/include/public/arch-x86_64.h Wed Nov 01 15:56:55 2006 +0000 33.2 +++ b/xen/include/public/arch-x86_64.h Wed Nov 01 16:08:19 2006 +0000 33.3 @@ -152,7 +152,7 @@ typedef unsigned long xen_ulong_t; 33.4 * directly with 33.5 * orb $3,1*8(%rsp) 33.6 * iretq 33.7 - * If flags contains VGCF_IN_SYSCALL: 33.8 + * If flags contains VGCF_in_syscall: 33.9 * Restore RAX, RIP, RFLAGS, RSP. 33.10 * Discard R11, RCX, CS, SS. 33.11 * Otherwise: 33.12 @@ -160,7 +160,9 @@ typedef unsigned long xen_ulong_t; 33.13 * All other registers are saved on hypercall entry and restored to user. 33.14 */ 33.15 /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ 33.16 -#define VGCF_IN_SYSCALL (1<<8) 33.17 +#define _VGCF_in_syscall 8 33.18 +#define VGCF_in_syscall (1<<_VGCF_in_syscall) 33.19 +#define VGCF_IN_SYSCALL VGCF_in_syscall 33.20 struct iret_context { 33.21 /* Top of stack (%rsp at point of hypercall). */ 33.22 uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
34.1 --- a/xen/include/public/domctl.h Wed Nov 01 15:56:55 2006 +0000 34.2 +++ b/xen/include/public/domctl.h Wed Nov 01 16:08:19 2006 +0000 34.3 @@ -16,7 +16,7 @@ 34.4 34.5 #include "xen.h" 34.6 34.7 -#define XEN_DOMCTL_INTERFACE_VERSION 0x00000003 34.8 +#define XEN_DOMCTL_INTERFACE_VERSION 0x00000004 34.9 34.10 struct xenctl_cpumap { 34.11 XEN_GUEST_HANDLE(uint8_t) bitmap; 34.12 @@ -32,6 +32,9 @@ struct xen_domctl_createdomain { 34.13 /* IN parameters */ 34.14 uint32_t ssidref; 34.15 xen_domain_handle_t handle; 34.16 +#define _XEN_DOMCTL_CDF_hvm_guest 0 34.17 +#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest) 34.18 + uint32_t flags; 34.19 }; 34.20 typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; 34.21 DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
35.1 --- a/xen/include/xen/sched.h Wed Nov 01 15:56:55 2006 +0000 35.2 +++ b/xen/include/xen/sched.h Wed Nov 01 16:08:19 2006 +0000 35.3 @@ -144,6 +144,12 @@ struct domain 35.4 35.5 unsigned long domain_flags; 35.6 35.7 + /* Boolean: Is this an HVM guest? */ 35.8 + char is_hvm; 35.9 + 35.10 + /* Boolean: Is this guest fully privileged (aka dom0)? */ 35.11 + char is_privileged; 35.12 + 35.13 spinlock_t pause_lock; 35.14 unsigned int pause_count; 35.15 35.16 @@ -237,26 +243,30 @@ static inline void get_knownalive_domain 35.17 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED)); 35.18 } 35.19 35.20 -extern struct domain *domain_create(domid_t domid); 35.21 -extern int construct_dom0( 35.22 +struct domain *domain_create(domid_t domid, unsigned int domcr_flags); 35.23 + /* DOMCRF_hvm: Create an HVM domain, as opposed to a PV domain. */ 35.24 +#define _DOMCRF_hvm 0 35.25 +#define DOMCRF_hvm (1U<<_DOMCRF_hvm) 35.26 + 35.27 +int construct_dom0( 35.28 struct domain *d, 35.29 unsigned long image_start, unsigned long image_len, 35.30 unsigned long initrd_start, unsigned long initrd_len, 35.31 char *cmdline); 35.32 -extern int set_info_guest(struct domain *d, xen_domctl_vcpucontext_t *); 35.33 +int set_info_guest(struct domain *d, xen_domctl_vcpucontext_t *); 35.34 35.35 struct domain *find_domain_by_id(domid_t dom); 35.36 -extern void domain_destroy(struct domain *d); 35.37 -extern void domain_kill(struct domain *d); 35.38 -extern void domain_shutdown(struct domain *d, u8 reason); 35.39 -extern void domain_pause_for_debugger(void); 35.40 +void domain_destroy(struct domain *d); 35.41 +void domain_kill(struct domain *d); 35.42 +void domain_shutdown(struct domain *d, u8 reason); 35.43 +void domain_pause_for_debugger(void); 35.44 35.45 /* 35.46 * Mark specified domain as crashed. This function always returns, even if the 35.47 * caller is the specified domain. The domain is not synchronously descheduled 35.48 * from any processor. 35.49 */ 35.50 -extern void __domain_crash(struct domain *d); 35.51 +void __domain_crash(struct domain *d); 35.52 #define domain_crash(d) do { \ 35.53 printk("domain_crash called from %s:%d\n", __FILE__, __LINE__); \ 35.54 __domain_crash(d); \ 35.55 @@ -266,7 +276,7 @@ extern void __domain_crash(struct domain 35.56 * Mark current domain as crashed and synchronously deschedule from the local 35.57 * processor. This function never returns. 35.58 */ 35.59 -extern void __domain_crash_synchronous(void) __attribute__((noreturn)); 35.60 +void __domain_crash_synchronous(void) __attribute__((noreturn)); 35.61 #define domain_crash_synchronous() do { \ 35.62 printk("domain_crash_sync called from %s:%d\n", __FILE__, __LINE__); \ 35.63 __domain_crash_synchronous(); \ 35.64 @@ -293,7 +303,7 @@ void vcpu_sleep_sync(struct vcpu *d); 35.65 * this call will ensure that all its state is committed to memory and that 35.66 * no CPU is using critical state (e.g., page tables) belonging to the VCPU. 35.67 */ 35.68 -extern void sync_vcpu_execstate(struct vcpu *v); 35.69 +void sync_vcpu_execstate(struct vcpu *v); 35.70 35.71 /* 35.72 * Called by the scheduler to switch to another VCPU. This function must 35.73 @@ -302,7 +312,7 @@ extern void sync_vcpu_execstate(struct v 35.74 * implementing lazy context switching, it suffices to ensure that invoking 35.75 * sync_vcpu_execstate() will switch and commit @prev's state. 35.76 */ 35.77 -extern void context_switch( 35.78 +void context_switch( 35.79 struct vcpu *prev, 35.80 struct vcpu *next); 35.81 35.82 @@ -312,10 +322,10 @@ extern void context_switch( 35.83 * saved to memory. Alternatively, if implementing lazy context switching, 35.84 * ensure that invoking sync_vcpu_execstate() will switch and commit @prev. 35.85 */ 35.86 -extern void context_saved(struct vcpu *prev); 35.87 +void context_saved(struct vcpu *prev); 35.88 35.89 /* Called by the scheduler to continue running the current VCPU. */ 35.90 -extern void continue_running( 35.91 +void continue_running( 35.92 struct vcpu *same); 35.93 35.94 void startup_cpu_idle_loop(void); 35.95 @@ -396,26 +406,23 @@ extern struct domain *domain_list; 35.96 /* 35.97 * Per-domain flags (domain_flags). 35.98 */ 35.99 - /* Is this domain privileged? */ 35.100 -#define _DOMF_privileged 0 35.101 -#define DOMF_privileged (1UL<<_DOMF_privileged) 35.102 /* Guest shut itself down for some reason. */ 35.103 -#define _DOMF_shutdown 1 35.104 +#define _DOMF_shutdown 0 35.105 #define DOMF_shutdown (1UL<<_DOMF_shutdown) 35.106 /* Death rattle. */ 35.107 -#define _DOMF_dying 2 35.108 +#define _DOMF_dying 1 35.109 #define DOMF_dying (1UL<<_DOMF_dying) 35.110 /* Domain is paused by controller software. */ 35.111 -#define _DOMF_ctrl_pause 3 35.112 +#define _DOMF_ctrl_pause 2 35.113 #define DOMF_ctrl_pause (1UL<<_DOMF_ctrl_pause) 35.114 /* Domain is being debugged by controller software. */ 35.115 -#define _DOMF_debugging 4 35.116 +#define _DOMF_debugging 3 35.117 #define DOMF_debugging (1UL<<_DOMF_debugging) 35.118 /* Are any VCPUs polling event channels (SCHEDOP_poll)? */ 35.119 -#define _DOMF_polling 5 35.120 +#define _DOMF_polling 4 35.121 #define DOMF_polling (1UL<<_DOMF_polling) 35.122 /* Domain is paused by the hypervisor? */ 35.123 -#define _DOMF_paused 6 35.124 +#define _DOMF_paused 5 35.125 #define DOMF_paused (1UL<<_DOMF_paused) 35.126 35.127 static inline int vcpu_runnable(struct vcpu *v) 35.128 @@ -450,8 +457,7 @@ static inline void vcpu_unblock(struct v 35.129 vcpu_wake(v); 35.130 } 35.131 35.132 -#define IS_PRIV(_d) \ 35.133 - (test_bit(_DOMF_privileged, &(_d)->domain_flags)) 35.134 +#define IS_PRIV(_d) ((_d)->is_privileged) 35.135 35.136 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist)) 35.137