ia64/xen-unstable

changeset 14848:ba8d4bc2435a

hvm: Xen must take care to hold a reference to ioreq pages, to ensure
that domain runs only when it has valid mapped ioreq pages, and to
safely drop ioreq page references when a domain dies.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Apr 13 12:29:06 2007 +0100 (2007-04-13)
parents 0b14423e75f8
children 5f6b31335cda
files xen/arch/x86/domain.c xen/arch/x86/hvm/hvm.c xen/arch/x86/x86_32/domain_page.c xen/common/domain.c xen/include/asm-x86/hvm/hvm.h xen/include/xen/domain_page.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Fri Apr 13 12:01:37 2007 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Fri Apr 13 12:29:06 2007 +0100
     1.3 @@ -1540,8 +1540,10 @@ void domain_relinquish_resources(struct 
     1.4      relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);
     1.5      relinquish_memory(d, &d->page_list, PGT_l2_page_table);
     1.6  
     1.7 -    /* Free page used by xen oprofile buffer */
     1.8 +    /* Free page used by xen oprofile buffer. */
     1.9      free_xenoprof_pages(d);
    1.10 +
    1.11 +    hvm_domain_relinquish_resources(d);
    1.12  }
    1.13  
    1.14  void arch_dump_domain_info(struct domain *d)
     2.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Apr 13 12:01:37 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Apr 13 12:29:06 2007 +0100
     2.3 @@ -146,6 +146,59 @@ void hvm_do_resume(struct vcpu *v)
     2.4      }
     2.5  }
     2.6  
     2.7 +static void hvm_clear_ioreq_pfn(
     2.8 +    struct domain *d, unsigned long *pva)
     2.9 +{
    2.10 +    unsigned long va, mfn;
    2.11 +
    2.12 +    BUG_ON(!d->is_dying);
    2.13 +
    2.14 +    if ( (va = xchg(pva, 0UL)) == 0UL )
    2.15 +        return;
    2.16 +
    2.17 +    mfn = mfn_from_mapped_domain_page((void *)va);
    2.18 +    unmap_domain_page_global((void *)va);
    2.19 +    put_page_and_type(mfn_to_page(mfn));
    2.20 +}
    2.21 +
    2.22 +static int hvm_set_ioreq_pfn(
    2.23 +    struct domain *d, unsigned long *pva, unsigned long gmfn)
    2.24 +{
    2.25 +    unsigned long mfn;
    2.26 +    void *va;
    2.27 +
    2.28 +    mfn = gmfn_to_mfn(d, gmfn);
    2.29 +    if ( !mfn_valid(mfn) ||
    2.30 +         !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
    2.31 +        return -EINVAL;
    2.32 +
    2.33 +    va = map_domain_page_global(mfn);
    2.34 +    if ( va == NULL )
    2.35 +    {
    2.36 +        put_page_and_type(mfn_to_page(mfn));
    2.37 +        return -ENOMEM;
    2.38 +    }
    2.39 +
    2.40 +    if ( cmpxchg(pva, 0UL, (unsigned long)va) != 0UL )
    2.41 +    {
    2.42 +        unmap_domain_page_global(va);
    2.43 +        put_page_and_type(mfn_to_page(mfn));
    2.44 +        return -EINVAL;
    2.45 +    }
    2.46 +
    2.47 +    /*
    2.48 +     * Check dying status /after/ setting *pva. cmpxchg() is a barrier.
    2.49 +     * We race against hvm_domain_relinquish_resources(). 
    2.50 +     */
    2.51 +    if ( d->is_dying )
    2.52 +        hvm_clear_ioreq_pfn(d, pva);
    2.53 +
    2.54 +    /* Balance the domain_pause() in hvm_domain_initialise(). */
    2.55 +    domain_unpause(d);
    2.56 +
    2.57 +    return 0;
    2.58 +}
    2.59 +
    2.60  int hvm_domain_initialise(struct domain *d)
    2.61  {
    2.62      int rc;
    2.63 @@ -161,7 +214,6 @@ int hvm_domain_initialise(struct domain 
    2.64      spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
    2.65      spin_lock_init(&d->arch.hvm_domain.irq_lock);
    2.66  
    2.67 -    /* paging support will be determined inside paging.c */
    2.68      rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
    2.69      if ( rc != 0 )
    2.70          return rc;
    2.71 @@ -169,22 +221,25 @@ int hvm_domain_initialise(struct domain 
    2.72      vpic_init(d);
    2.73      vioapic_init(d);
    2.74  
    2.75 +    /* Do not allow domain to run until it has ioreq shared pages. */
    2.76 +    domain_pause(d); /* HVM_PARAM_IOREQ_PFN */
    2.77 +    domain_pause(d); /* HVM_PARAM_BUFIOREQ_PFN */
    2.78 +
    2.79      return 0;
    2.80  }
    2.81  
    2.82 +void hvm_domain_relinquish_resources(struct domain *d)
    2.83 +{
    2.84 +    hvm_clear_ioreq_pfn(d, &d->arch.hvm_domain.shared_page_va);
    2.85 +    hvm_clear_ioreq_pfn(d, &d->arch.hvm_domain.buffered_io_va);
    2.86 +}
    2.87 +
    2.88  void hvm_domain_destroy(struct domain *d)
    2.89  {
    2.90      pit_deinit(d);
    2.91      rtc_deinit(d);
    2.92      pmtimer_deinit(d);
    2.93      hpet_deinit(d);
    2.94 -
    2.95 -    if ( d->arch.hvm_domain.shared_page_va )
    2.96 -        unmap_domain_page_global(
    2.97 -            (void *)d->arch.hvm_domain.shared_page_va);
    2.98 -
    2.99 -    if ( d->arch.hvm_domain.buffered_io_va )
   2.100 -        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
   2.101  }
   2.102  
   2.103  static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
   2.104 @@ -928,8 +983,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
   2.105          struct xen_hvm_param a;
   2.106          struct domain *d;
   2.107          struct vcpu *v;
   2.108 -        unsigned long mfn;
   2.109 -        void *p;
   2.110  
   2.111          if ( copy_from_guest(&a, arg, 1) )
   2.112              return -EFAULT;
   2.113 @@ -956,30 +1009,19 @@ long do_hvm_op(unsigned long op, XEN_GUE
   2.114              switch ( a.index )
   2.115              {
   2.116              case HVM_PARAM_IOREQ_PFN:
   2.117 -                if ( d->arch.hvm_domain.shared_page_va )
   2.118 -                    goto param_fail;
   2.119 -                mfn = gmfn_to_mfn(d, a.value);
   2.120 -                if ( mfn == INVALID_MFN )
   2.121 -                    goto param_fail;
   2.122 -                p = map_domain_page_global(mfn);
   2.123 -                if ( p == NULL )
   2.124 -                    goto param_fail;
   2.125 -                d->arch.hvm_domain.shared_page_va = (unsigned long)p;
   2.126 -                /* Initialise evtchn port info if VCPUs already created. */
   2.127 -                for_each_vcpu ( d, v )
   2.128 -                    get_vio(d, v->vcpu_id)->vp_eport =
   2.129 -                    v->arch.hvm_vcpu.xen_port;
   2.130 +                rc = hvm_set_ioreq_pfn(
   2.131 +                    d, &d->arch.hvm_domain.shared_page_va, a.value);
   2.132 +                if ( rc == 0 )
   2.133 +                {
   2.134 +                    /* Initialise evtchn port info if VCPUs already created. */
   2.135 +                    for_each_vcpu ( d, v )
   2.136 +                        get_vio(d, v->vcpu_id)->vp_eport =
   2.137 +                        v->arch.hvm_vcpu.xen_port;
   2.138 +                }
   2.139                  break;
   2.140              case HVM_PARAM_BUFIOREQ_PFN:
   2.141 -                if ( d->arch.hvm_domain.buffered_io_va )
   2.142 -                    goto param_fail;
   2.143 -                mfn = gmfn_to_mfn(d, a.value);
   2.144 -                if ( mfn == INVALID_MFN )
   2.145 -                    goto param_fail;
   2.146 -                p = map_domain_page_global(mfn);
   2.147 -                if ( p == NULL )
   2.148 -                    goto param_fail;
   2.149 -                d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
   2.150 +                rc = hvm_set_ioreq_pfn(
   2.151 +                    d, &d->arch.hvm_domain.buffered_io_va, a.value);
   2.152                  break;
   2.153              case HVM_PARAM_CALLBACK_IRQ:
   2.154                  hvm_set_callback_via(d, a.value);
     3.1 --- a/xen/arch/x86/x86_32/domain_page.c	Fri Apr 13 12:01:37 2007 +0100
     3.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Fri Apr 13 12:29:06 2007 +0100
     3.3 @@ -251,3 +251,24 @@ void unmap_domain_page_global(void *va)
     3.4      idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
     3.5      set_bit(idx, garbage);
     3.6  }
     3.7 +
     3.8 +unsigned long mfn_from_mapped_domain_page(void *va) 
     3.9 +{
    3.10 +    unsigned long __va = (unsigned long)va;
    3.11 +    l2_pgentry_t *pl2e;
    3.12 +    l1_pgentry_t *pl1e;
    3.13 +    unsigned int idx;
    3.14 +    struct mapcache *cache;
    3.15 +
    3.16 +    if ( (__va >= MAPCACHE_VIRT_START) && (__va < MAPCACHE_VIRT_END) )
    3.17 +    {
    3.18 +        cache = &mapcache_current_vcpu()->domain->arch.mapcache;
    3.19 +        idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
    3.20 +        return l1e_get_pfn(cache->l1tab[idx]);
    3.21 +    }
    3.22 +
    3.23 +    ASSERT(__va >= IOREMAP_VIRT_START);
    3.24 +    pl2e = virt_to_xen_l2e(__va);
    3.25 +    pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
    3.26 +    return l1e_get_pfn(*pl1e);
    3.27 +}
     4.1 --- a/xen/common/domain.c	Fri Apr 13 12:01:37 2007 +0100
     4.2 +++ b/xen/common/domain.c	Fri Apr 13 12:29:06 2007 +0100
     4.3 @@ -314,7 +314,7 @@ void domain_kill(struct domain *d)
     4.4      }
     4.5  
     4.6      /* Tear down state /after/ setting the dying flag. */
     4.7 -    smp_wmb();
     4.8 +    smp_mb();
     4.9  
    4.10      gnttab_release_mappings(d);
    4.11      domain_relinquish_resources(d);
     5.1 --- a/xen/include/asm-x86/hvm/hvm.h	Fri Apr 13 12:01:37 2007 +0100
     5.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Fri Apr 13 12:29:06 2007 +0100
     5.3 @@ -145,6 +145,7 @@ struct hvm_function_table {
     5.4  extern struct hvm_function_table hvm_funcs;
     5.5  
     5.6  int hvm_domain_initialise(struct domain *d);
     5.7 +void hvm_domain_relinquish_resources(struct domain *d);
     5.8  void hvm_domain_destroy(struct domain *d);
     5.9  
    5.10  int hvm_vcpu_initialise(struct vcpu *v);
     6.1 --- a/xen/include/xen/domain_page.h	Fri Apr 13 12:01:37 2007 +0100
     6.2 +++ b/xen/include/xen/domain_page.h	Fri Apr 13 12:29:06 2007 +0100
     6.3 @@ -34,6 +34,13 @@ void unmap_domain_page(void *va);
     6.4  void *map_domain_page_global(unsigned long mfn);
     6.5  void unmap_domain_page_global(void *va);
     6.6  
     6.7 +/* 
     6.8 + * Convert a VA (within a page previously mapped in the context of the
     6.9 + * currently-executing VCPU via a call to map_domain_page(), or via a
    6.10 + * previous call to map_domain_page_global()) to the mapped page frame.
    6.11 + */
    6.12 +unsigned long mfn_from_mapped_domain_page(void *va);
    6.13 +
    6.14  #define DMCACHE_ENTRY_VALID 1U
    6.15  #define DMCACHE_ENTRY_HELD  2U
    6.16  
    6.17 @@ -96,12 +103,14 @@ domain_mmap_cache_destroy(struct domain_
    6.18  
    6.19  #else /* !CONFIG_DOMAIN_PAGE */
    6.20  
    6.21 -#define map_domain_page(mfn)                maddr_to_virt((mfn)<<PAGE_SHIFT)
    6.22 +#define map_domain_page(mfn)                mfn_to_virt(mfn)
    6.23  #define unmap_domain_page(va)               ((void)(va))
    6.24  
    6.25 -#define map_domain_page_global(mfn)         maddr_to_virt((mfn)<<PAGE_SHIFT)
    6.26 +#define map_domain_page_global(mfn)         mfn_to_virt(mfn)
    6.27  #define unmap_domain_page_global(va)        ((void)(va))
    6.28  
    6.29 +#define mfn_from_mapped_domain_page(va)     virt_to_mfn(va)
    6.30 +
    6.31  struct domain_mmap_cache { 
    6.32  };
    6.33