ia64/xen-unstable

changeset 8971:cb14f4db7a1e

Per-vcpu IO evtchn support for VT-i guests (ia64).

Signed-off-by Kevin Tian <kevin.tian@intel.com>
Signed-off-by Xin Li <xin.b.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Feb 23 11:24:37 2006 +0100 (2006-02-23)
parents b5bb9920bf48
children 323d40eefbce
files xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_support.c xen/arch/ia64/xen/process.c xen/include/asm-ia64/vmx.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/mmio.c	Thu Feb 23 11:22:25 2006 +0100
     1.2 +++ b/xen/arch/ia64/vmx/mmio.c	Thu Feb 23 11:24:37 2006 +0100
     1.3 @@ -154,7 +154,7 @@ static void low_mmio_access(VCPU *vcpu, 
     1.4  
     1.5      set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
     1.6      p->state = STATE_IOREQ_READY;
     1.7 -    evtchn_send(iopacket_port(v->domain));
     1.8 +    evtchn_send(iopacket_port(v));
     1.9      vmx_wait_io();
    1.10      if(dir==IOREQ_READ){ //read
    1.11          *val=p->u.data;
    1.12 @@ -187,7 +187,7 @@ static void legacy_io_access(VCPU *vcpu,
    1.13  
    1.14      set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
    1.15      p->state = STATE_IOREQ_READY;
    1.16 -    evtchn_send(iopacket_port(v->domain));
    1.17 +    evtchn_send(iopacket_port(v));
    1.18  
    1.19      vmx_wait_io();
    1.20      if(dir==IOREQ_READ){ //read
     2.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Thu Feb 23 11:22:25 2006 +0100
     2.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Thu Feb 23 11:24:37 2006 +0100
     2.3 @@ -49,6 +49,7 @@
     2.4  #include <xen/mm.h>
     2.5  #include <public/arch-ia64.h>
     2.6  #include <asm/hvm/vioapic.h>
     2.7 +#include <public/event_channel.h>
     2.8  
     2.9  /* Global flag to identify whether Intel vmx feature is on */
    2.10  u32 vmx_enabled = 0;
    2.11 @@ -254,9 +255,6 @@ vmx_final_setup_guest(struct vcpu *v)
    2.12  {
    2.13  	vpd_t *vpd;
    2.14  
    2.15 -	/* Allocate resources for vcpu 0 */
    2.16 -	//memset(&v->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
    2.17 -
    2.18  	vpd = alloc_vpd();
    2.19  	ASSERT(vpd);
    2.20  
    2.21 @@ -375,20 +373,15 @@ int vmx_alloc_contig_pages(struct domain
    2.22  
    2.23  void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c)
    2.24  {
    2.25 -	shared_iopage_t *sp;
    2.26 -
    2.27  	ASSERT(d != dom0); /* only for non-privileged vti domain */
    2.28  	d->arch.vmx_platform.shared_page_va =
    2.29  		__va(__gpa_to_mpa(d, IO_PAGE_START));
    2.30 -	sp = get_sp(d);
    2.31 -	//memset((char *)sp,0,PAGE_SIZE);
    2.32  	/* TEMP */
    2.33  	d->arch.vmx_platform.pib_base = 0xfee00000UL;
    2.34  
    2.35  	/* Only open one port for I/O and interrupt emulation */
    2.36  	memset(&d->shared_info->evtchn_mask[0], 0xff,
    2.37  	    sizeof(d->shared_info->evtchn_mask));
    2.38 -	clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
    2.39  
    2.40  	/* Initialize the virtual interrupt lines */
    2.41  	vmx_virq_line_init(d);
    2.42 @@ -397,4 +390,16 @@ void vmx_setup_platform(struct domain *d
    2.43  	hvm_vioapic_init(d);
    2.44  }
    2.45  
    2.46 +void vmx_do_launch(struct vcpu *v)
    2.47 +{
    2.48 +	if (evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0) {
    2.49 +	    printk("VMX domain bind port %d to vcpu %d failed!\n",
    2.50 +		iopacket_port(v), v->vcpu_id);
    2.51 +	    domain_crash_synchronous();
    2.52 +	}
    2.53  
    2.54 +	clear_bit(iopacket_port(v),
    2.55 +		&v->domain->shared_info->evtchn_mask[0]);
    2.56 +
    2.57 +	vmx_load_all_rr(v);
    2.58 +}
     3.1 --- a/xen/arch/ia64/vmx/vmx_support.c	Thu Feb 23 11:22:25 2006 +0100
     3.2 +++ b/xen/arch/ia64/vmx/vmx_support.c	Thu Feb 23 11:24:37 2006 +0100
     3.3 @@ -38,7 +38,7 @@ void vmx_wait_io(void)
     3.4  {
     3.5      struct vcpu *v = current;
     3.6      struct domain *d = v->domain;
     3.7 -    int port = iopacket_port(d);
     3.8 +    int port = iopacket_port(v);
     3.9  
    3.10      do {
    3.11  	if (!test_bit(port,
    3.12 @@ -129,7 +129,7 @@ void vmx_intr_assist(struct vcpu *v)
    3.13      struct domain *d = v->domain;
    3.14      extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
    3.15  					unsigned long *pend_irr);
    3.16 -    int port = iopacket_port(d);
    3.17 +    int port = iopacket_port(v);
    3.18  
    3.19      /* I/O emulation is atomic, so it's impossible to see execution flow
    3.20       * out of vmx_wait_io, when guest is still waiting for response.
     4.1 --- a/xen/arch/ia64/xen/process.c	Thu Feb 23 11:22:25 2006 +0100
     4.2 +++ b/xen/arch/ia64/xen/process.c	Thu Feb 23 11:24:37 2006 +0100
     4.3 @@ -71,7 +71,7 @@ void schedule_tail(struct vcpu *prev)
     4.4  	context_saved(prev);
     4.5  
     4.6  	if (VMX_DOMAIN(current)) {
     4.7 -		vmx_load_all_rr(current);
     4.8 +		vmx_do_launch(current);
     4.9  	} else {
    4.10  		load_region_regs(current);
    4.11  		vcpu_load_kernel_regs(current);
     5.1 --- a/xen/include/asm-ia64/vmx.h	Thu Feb 23 11:22:25 2006 +0100
     5.2 +++ b/xen/include/asm-ia64/vmx.h	Thu Feb 23 11:24:37 2006 +0100
     5.3 @@ -40,9 +40,9 @@ static inline vcpu_iodata_t *get_vio(str
     5.4      return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
     5.5  }
     5.6  
     5.7 -static inline int iopacket_port(struct domain *d)
     5.8 +static inline int iopacket_port(struct vcpu *v)
     5.9  {
    5.10 -    return ((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->sp_global.eport;
    5.11 +    return get_vio(v->domain, v->vcpu_id)->vp_eport;
    5.12  }
    5.13  
    5.14  static inline shared_iopage_t *get_sp(struct domain *d)