direct-io.hg

changeset 11328:66c27919578f

[IA64] catch up `new Xen event channels'

DM over to using the new Xen event channels for IPF

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Wed Aug 23 13:13:51 2006 -0600 (2006-08-23)
parents 36b76e551456
children 3fdc31e91384
files xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_support.c xen/arch/ia64/xen/domain.c xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_vpd.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/mmio.c	Wed Aug 23 12:56:10 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/mmio.c	Wed Aug 23 13:13:51 2006 -0600
     1.3 @@ -155,10 +155,9 @@ static void low_mmio_access(VCPU *vcpu, 
     1.4      p->type = 1;
     1.5      p->df = 0;
     1.6  
     1.7 -    set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
     1.8 -    p->state = STATE_IOREQ_READY;
     1.9 -    evtchn_send(iopacket_port(v));
    1.10 -    vmx_wait_io();
    1.11 +    p->io_count++;
    1.12 +
    1.13 +    vmx_send_assist_req(v);
    1.14      if(dir==IOREQ_READ){ //read
    1.15          *val=p->u.data;
    1.16      }
    1.17 @@ -187,11 +186,9 @@ static void legacy_io_access(VCPU *vcpu,
    1.18      p->type = 0;
    1.19      p->df = 0;
    1.20  
    1.21 -    set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
    1.22 -    p->state = STATE_IOREQ_READY;
    1.23 -    evtchn_send(iopacket_port(v));
    1.24 +    p->io_count++;
    1.25  
    1.26 -    vmx_wait_io();
    1.27 +    vmx_send_assist_req(v);
    1.28      if(dir==IOREQ_READ){ //read
    1.29          *val=p->u.data;
    1.30      }
     2.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Wed Aug 23 12:56:10 2006 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Wed Aug 23 13:13:51 2006 -0600
     2.3 @@ -206,7 +206,7 @@ vmx_create_vp(struct vcpu *v)
     2.4  	u64 ret;
     2.5  	vpd_t *vpd = (vpd_t *)v->arch.privregs;
     2.6  	u64 ivt_base;
     2.7 -    extern char vmx_ia64_ivt;
     2.8 +	extern char vmx_ia64_ivt;
     2.9  	/* ia64_ivt is function pointer, so need this tranlation */
    2.10  	ivt_base = (u64) &vmx_ia64_ivt;
    2.11  	printk("ivt_base: 0x%lx\n", ivt_base);
    2.12 @@ -265,6 +265,29 @@ vmx_load_state(struct vcpu *v)
    2.13  	 * anchored in vcpu */
    2.14  }
    2.15  
    2.16 +static void vmx_create_event_channels(struct vcpu *v)
    2.17 +{
    2.18 +	vcpu_iodata_t *p;
    2.19 +	struct vcpu *o;
    2.20 +
    2.21 +	if (v->vcpu_id == 0) {
    2.22 +		/* Ugly: create event channels for every vcpu when vcpu 0
    2.23 +		   starts, so that they're available for ioemu to bind to. */
    2.24 +		for_each_vcpu(v->domain, o) {
    2.25 +			p = get_vio(v->domain, o->vcpu_id);
    2.26 +			o->arch.arch_vmx.xen_port = p->vp_eport =
    2.27 +			                alloc_unbound_xen_event_channel(o, 0);
    2.28 +			DPRINTK("Allocated port %d for hvm.\n",
    2.29 +			        o->arch.arch_vmx.xen_port);
    2.30 +		}
    2.31 +	}
    2.32 +}
    2.33 +
    2.34 +static void vmx_release_assist_channel(struct vcpu *v)
    2.35 +{
    2.36 +	free_xen_event_channel(v, v->arch.arch_vmx.xen_port);
    2.37 +}
    2.38 +
    2.39  /*
    2.40   * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
    2.41   * is registered here.
    2.42 @@ -286,6 +309,8 @@ vmx_final_setup_guest(struct vcpu *v)
    2.43  #ifndef HASH_VHPT     
    2.44          init_domain_tlb(v);
    2.45  #endif
    2.46 +	vmx_create_event_channels(v);
    2.47 +
    2.48  	/* v->arch.schedule_tail = arch_vmx_do_launch; */
    2.49  	vmx_create_vp(v);
    2.50  
    2.51 @@ -304,6 +329,15 @@ vmx_final_setup_guest(struct vcpu *v)
    2.52  }
    2.53  
    2.54  void
    2.55 +vmx_relinquish_guest_resources(struct domain *d)
    2.56 +{
    2.57 +	struct vcpu *v;
    2.58 +
    2.59 +	for_each_vcpu(d, v)
    2.60 +		vmx_release_assist_channel(v);
    2.61 +}
    2.62 +
    2.63 +void
    2.64  vmx_relinquish_vcpu_resources(struct vcpu *v)
    2.65  {
    2.66  	vtime_t *vtm = &(v->arch.arch_vmx.vtm);
    2.67 @@ -420,13 +454,5 @@ void vmx_setup_platform(struct domain *d
    2.68  
    2.69  void vmx_do_launch(struct vcpu *v)
    2.70  {
    2.71 -	if (evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0) {
    2.72 -	    printk("VMX domain bind port %d to vcpu %d failed!\n",
    2.73 -		iopacket_port(v), v->vcpu_id);
    2.74 -	    domain_crash_synchronous();
    2.75 -	}
    2.76 -
    2.77 -	clear_bit(iopacket_port(v), &v->domain->shared_info->evtchn_mask[0]);
    2.78 -
    2.79  	vmx_load_all_rr(v);
    2.80  }
     3.1 --- a/xen/arch/ia64/vmx/vmx_support.c	Wed Aug 23 12:56:10 2006 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmx_support.c	Wed Aug 23 13:13:51 2006 -0600
     3.3 @@ -1,4 +1,3 @@
     3.4 -
     3.5  /* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     3.6  /*
     3.7   * vmx_support.c: vmx specific support interface.
     3.8 @@ -22,47 +21,13 @@
     3.9  #include <xen/config.h>
    3.10  #include <xen/sched.h>
    3.11  #include <xen/hypercall.h>
    3.12 +#include <xen/event.h>
    3.13  #include <public/sched.h>
    3.14  #include <public/hvm/ioreq.h>
    3.15  #include <asm/vmx.h>
    3.16  #include <asm/vmx_vcpu.h>
    3.17  
    3.18  /*
    3.19 - * I/O emulation should be atomic from domain point of view. However,
    3.20 - * when emulation code is waiting for I/O completion by blocking,
    3.21 - * other events like DM interrupt, VBD, etc. may come and unblock
    3.22 - * current exection flow. So we have to prepare for re-block if unblocked
    3.23 - * by non I/O completion event. After io emulation is done, re-enable
    3.24 - * pending indicaion if other ports are pending
    3.25 - */
    3.26 -void vmx_wait_io(void)
    3.27 -{
    3.28 -    struct vcpu *v = current;
    3.29 -    struct domain *d = v->domain;
    3.30 -    int port = iopacket_port(v);
    3.31 -
    3.32 -    for (;;) {
    3.33 -        if (test_and_clear_bit(0, &v->vcpu_info->evtchn_upcall_pending) &&
    3.34 -            test_and_clear_bit(port / BITS_PER_LONG,
    3.35 -                                     &v->vcpu_info->evtchn_pending_sel) &&
    3.36 -            test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]))
    3.37 -            vmx_io_assist(v);
    3.38 -
    3.39 -        if (!test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
    3.40 -            break;
    3.41 -
    3.42 -        do_sched_op_compat(SCHEDOP_block, 0);
    3.43 -    }
    3.44 -
    3.45 -    /* re-enable indication if other pending events */
    3.46 -    if (d->shared_info->evtchn_pending[port / BITS_PER_LONG])
    3.47 -        set_bit(port / BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
    3.48 -
    3.49 -    if (v->vcpu_info->evtchn_pending_sel)
    3.50 -        set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
    3.51 -}
    3.52 -
    3.53 -/*
    3.54   * Only place to call vmx_io_assist is mmio/legacy_io emulation.
    3.55   * Since I/O emulation is synchronous, it shouldn't be called in
    3.56   * other places. This is not like x86, since IA-64 implements a
    3.57 @@ -83,17 +48,15 @@ void vmx_io_assist(struct vcpu *v)
    3.58  
    3.59      p = &vio->vp_ioreq;
    3.60  
    3.61 -    if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
    3.62 -	if (p->state != STATE_IORESP_READY) {
    3.63 -	    /* Can't block here, for the same reason as other places to
    3.64 -	     * use vmx_wait_io. Simple return is safe since vmx_wait_io will
    3.65 -	     * try to block again
    3.66 -	     */
    3.67 -	    return; 
    3.68 -	} else
    3.69 -	    p->state = STATE_INVALID;
    3.70 -
    3.71 -	clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
    3.72 +    if (p->state == STATE_IORESP_READY) {
    3.73 +        p->state = STATE_INVALID;
    3.74 +    }
    3.75 +    else {
    3.76 +        /* Can't block here, for the same reason as other places to
    3.77 +         * use vmx_wait_io. Simple return is safe since vmx_wait_io will
    3.78 +         * try to block again
    3.79 +         */
    3.80 +        return;
    3.81      }
    3.82  }
    3.83  
    3.84 @@ -108,31 +71,6 @@ void vmx_io_assist(struct vcpu *v)
    3.85   */
    3.86  void vmx_intr_assist(struct vcpu *v)
    3.87  {
    3.88 -    vcpu_iodata_t *vio;
    3.89 -    struct domain *d = v->domain;
    3.90 -    extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
    3.91 -					unsigned long *pend_irr);
    3.92 -    int port = iopacket_port(v);
    3.93 -
    3.94 -    if (test_bit(port, &d->shared_info->evtchn_pending[0]) ||
    3.95 -	test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
    3.96 -	vmx_wait_io();
    3.97 -
    3.98 -    /* I/O emulation is atomic, so it's impossible to see execution flow
    3.99 -     * out of vmx_wait_io, when guest is still waiting for response.
   3.100 -     */
   3.101 -    if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
   3.102 -	panic_domain(vcpu_regs(v),"!!!Bad resume to guest before I/O emulation is done.\n");
   3.103 -
   3.104 -    /* Even without event pending, we still need to sync pending bits
   3.105 -     * between DM and vlsapic. The reason is that interrupt delivery
   3.106 -     * shares same event channel as I/O emulation, with corresponding
   3.107 -     * indicator possibly cleared when vmx_wait_io().
   3.108 -     */
   3.109 -    vio = get_vio(v->domain, v->vcpu_id);
   3.110 -    if (!vio)
   3.111 -	panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", (unsigned long)vio);
   3.112 -
   3.113  #ifdef V_IOSAPIC_READY
   3.114      /* Confirm virtual interrupt line signals, and set pending bits in vpd */
   3.115      if(v->vcpu_id==0)
   3.116 @@ -140,3 +78,53 @@ void vmx_intr_assist(struct vcpu *v)
   3.117  #endif
   3.118      return;
   3.119  }
   3.120 +
   3.121 +void vmx_send_assist_req(struct vcpu *v)
   3.122 +{
   3.123 +    ioreq_t *p;
   3.124 +
   3.125 +    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
   3.126 +    if (unlikely(p->state != STATE_INVALID)) {
   3.127 +        /* This indicates a bug in the device model.  Crash the
   3.128 +           domain. */
   3.129 +        printk("Device model set bad IO state %d.\n", p->state);
   3.130 +        domain_crash(v->domain);
   3.131 +        return;
   3.132 +    }
   3.133 +    wmb();
   3.134 +    p->state = STATE_IOREQ_READY;
   3.135 +    notify_via_xen_event_channel(v->arch.arch_vmx.xen_port);
   3.136 +
   3.137 +    /*
   3.138 +     * Waiting for MMIO completion
   3.139 +     *   like the wait_on_xen_event_channel() macro like...
   3.140 +     *   but, we can't call do_softirq() at this point..
   3.141 +     */
   3.142 +    for (;;) {
   3.143 +        if (p->state != STATE_IOREQ_READY &&
   3.144 +            p->state != STATE_IOREQ_INPROCESS)
   3.145 +            break;
   3.146 +
   3.147 +        set_bit(_VCPUF_blocked_in_xen, &current->vcpu_flags);
   3.148 +        mb(); /* set blocked status /then/ re-evaluate condition */
   3.149 +        if (p->state != STATE_IOREQ_READY &&
   3.150 +            p->state != STATE_IOREQ_INPROCESS)
   3.151 +        {
   3.152 +            clear_bit(_VCPUF_blocked_in_xen, &current->vcpu_flags);
   3.153 +            break;
   3.154 +        }
   3.155 +
   3.156 +        /* I want to call __enter_scheduler() only */
   3.157 +        do_sched_op_compat(SCHEDOP_yield, 0);
   3.158 +        mb();
   3.159 +    }
   3.160 +
   3.161 +    /* the code under this line is completer phase... */
   3.162 +    vmx_io_assist(v);
   3.163 +}
   3.164 +
   3.165 +/* Wake up a vcpu whihc is waiting for interrupts to come in */
   3.166 +void vmx_prod_vcpu(struct vcpu *v)
   3.167 +{
   3.168 +    vcpu_unblock(v);
   3.169 +}
     4.1 --- a/xen/arch/ia64/xen/domain.c	Wed Aug 23 12:56:10 2006 -0600
     4.2 +++ b/xen/arch/ia64/xen/domain.c	Wed Aug 23 13:13:51 2006 -0600
     4.3 @@ -543,6 +543,9 @@ void domain_relinquish_resources(struct 
     4.4      // relase page traversing d->arch.mm.
     4.5      relinquish_mm(d);
     4.6  
     4.7 +    if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0]))
     4.8 +	    vmx_relinquish_guest_resources(d);
     4.9 +
    4.10      relinquish_memory(d, &d->xenpage_list);
    4.11      relinquish_memory(d, &d->page_list);
    4.12  
     5.1 --- a/xen/include/asm-ia64/vmx.h	Wed Aug 23 12:56:10 2006 -0600
     5.2 +++ b/xen/include/asm-ia64/vmx.h	Wed Aug 23 13:13:51 2006 -0600
     5.3 @@ -35,7 +35,6 @@ extern void vmx_final_setup_guest(struct
     5.4  extern void vmx_save_state(struct vcpu *v);
     5.5  extern void vmx_load_state(struct vcpu *v);
     5.6  extern void vmx_setup_platform(struct domain *d);
     5.7 -extern void vmx_wait_io(void);
     5.8  extern void vmx_io_assist(struct vcpu *v);
     5.9  extern int ia64_hypercall (struct pt_regs *regs);
    5.10  extern void vmx_save_state(struct vcpu *v);
    5.11 @@ -53,6 +52,7 @@ extern void inject_guest_interruption(st
    5.12  extern void vmx_intr_assist(struct vcpu *v);
    5.13  extern void set_illegal_op_isr (struct vcpu *vcpu);
    5.14  extern void illegal_op (struct vcpu *vcpu);
    5.15 +extern void vmx_relinquish_guest_resources(struct domain *d);
    5.16  extern void vmx_relinquish_vcpu_resources(struct vcpu *v);
    5.17  extern void vmx_die_if_kernel(char *str, struct pt_regs *regs, long err);
    5.18  
    5.19 @@ -61,11 +61,6 @@ static inline vcpu_iodata_t *get_vio(str
    5.20      return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
    5.21  }
    5.22  
    5.23 -static inline int iopacket_port(struct vcpu *v)
    5.24 -{
    5.25 -    return get_vio(v->domain, v->vcpu_id)->vp_eport;
    5.26 -}
    5.27 -
    5.28  static inline shared_iopage_t *get_sp(struct domain *d)
    5.29  {
    5.30      return (shared_iopage_t *)d->arch.vmx_platform.shared_page_va;
     6.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Wed Aug 23 12:56:10 2006 -0600
     6.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Wed Aug 23 13:13:51 2006 -0600
     6.3 @@ -96,7 +96,8 @@ struct arch_vmx_struct {
     6.4  //    unsigned long   rfi_ipsr;
     6.5  //    unsigned long   rfi_ifs;
     6.6  //	unsigned long	in_service[4];	// vLsapic inservice IRQ bits
     6.7 -	unsigned long   flags;
     6.8 +    unsigned long   flags;
     6.9 +    unsigned long   xen_port;
    6.10  #ifdef VTI_DEBUG
    6.11      unsigned long  ivt_current;
    6.12      struct ivt_debug ivt_debug[IVT_DEBUG_MAX];