ia64/xen-unstable

changeset 12616:b08e7ed94991

[XENOPROFILE] fix shared_xenoprof_page_with_guest() and cleanup.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kfraser@localhost.localdomain
date Tue Nov 28 13:43:25 2006 +0000 (2006-11-28)
parents bbcaa0cad3d2
children 7cb4376044b5
files xen/common/xenoprof.c
line diff
     1.1 --- a/xen/common/xenoprof.c	Tue Nov 28 13:34:15 2006 +0000
     1.2 +++ b/xen/common/xenoprof.c	Tue Nov 28 13:43:25 2006 +0000
     1.3 @@ -92,19 +92,35 @@ static void xenoprof_reset_buf(struct do
     1.4      }
     1.5  }
     1.6  
     1.7 -static void
     1.8 -share_xenoprof_page_with_guest(struct domain* d, unsigned long mfn, int npages)
     1.9 +static int
    1.10 +share_xenoprof_page_with_guest(struct domain *d, unsigned long mfn, int npages)
    1.11  {
    1.12      int i;
    1.13 -    
    1.14 -    for ( i = 0; i < npages; i++ )
    1.15 -        share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable);
    1.16 +
    1.17 +   /* Check if previous page owner has released the page. */
    1.18 +   for ( i = 0; i < npages; i++ )
    1.19 +   {
    1.20 +       struct page_info *page = mfn_to_page(mfn + i);
    1.21 +       if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
    1.22 +       {
    1.23 +           gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%x\n",
    1.24 +                    mfn + i, page->count_info);
    1.25 +           return -EBUSY;
    1.26 +       }
    1.27 +       page_set_owner(page, NULL);
    1.28 +   }
    1.29 +
    1.30 +   for ( i = 0; i < npages; i++ )
    1.31 +       share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable);
    1.32 +
    1.33 +   return 0;
    1.34  }
    1.35  
    1.36  static void
    1.37 -unshare_xenoprof_page_with_guest(unsigned long mfn, int npages)
    1.38 +unshare_xenoprof_page_with_guest(struct xenoprof *x)
    1.39  {
    1.40 -    int i;
    1.41 +    int i, npages = x->npages;
    1.42 +    unsigned long mfn = virt_to_mfn(x->rawbuf);
    1.43  
    1.44      for ( i = 0; i < npages; i++ )
    1.45      {
    1.46 @@ -117,7 +133,7 @@ unshare_xenoprof_page_with_guest(unsigne
    1.47  
    1.48  static void
    1.49  xenoprof_shared_gmfn_with_guest(
    1.50 -    struct domain* d, unsigned long maddr, unsigned long gmaddr, int npages)
    1.51 +    struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages)
    1.52  {
    1.53      int i;
    1.54      
    1.55 @@ -128,23 +144,6 @@ xenoprof_shared_gmfn_with_guest(
    1.56      }
    1.57  }
    1.58  
    1.59 -static char *alloc_xenoprof_buf(struct domain *d, int npages)
    1.60 -{
    1.61 -    char *rawbuf;
    1.62 -    int order;
    1.63 -
    1.64 -    /* allocate pages to store sample buffer shared with domain */
    1.65 -    order  = get_order_from_pages(npages);
    1.66 -    rawbuf = alloc_xenheap_pages(order);
    1.67 -    if ( rawbuf == NULL )
    1.68 -    {
    1.69 -        printk("alloc_xenoprof_buf(): memory allocation failed\n");
    1.70 -        return 0;
    1.71 -    }
    1.72 -
    1.73 -    return rawbuf;
    1.74 -}
    1.75 -
    1.76  static int alloc_xenoprof_struct(
    1.77      struct domain *d, int max_samples, int is_passive)
    1.78  {
    1.79 @@ -157,8 +156,7 @@ static int alloc_xenoprof_struct(
    1.80  
    1.81      if ( d->xenoprof == NULL )
    1.82      {
    1.83 -        printk ("alloc_xenoprof_struct(): memory "
    1.84 -                "allocation (xmalloc) failed\n");
    1.85 +        printk("alloc_xenoprof_struct(): memory allocation failed\n");
    1.86          return -ENOMEM;
    1.87      }
    1.88  
    1.89 @@ -178,9 +176,8 @@ static int alloc_xenoprof_struct(
    1.90      bufsize = sizeof(struct xenoprof_buf) +
    1.91          (max_samples - 1) * sizeof(struct event_log);
    1.92      npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
    1.93 -    
    1.94 -    d->xenoprof->rawbuf = alloc_xenoprof_buf(is_passive ? dom0 : d, npages);
    1.95  
    1.96 +    d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages));
    1.97      if ( d->xenoprof->rawbuf == NULL )
    1.98      {
    1.99          xfree(d->xenoprof);
   1.100 @@ -294,14 +291,14 @@ static void reset_passive(struct domain 
   1.101  {
   1.102      struct xenoprof *x;
   1.103  
   1.104 -    if ( d == 0 )
   1.105 +    if ( d == NULL )
   1.106          return;
   1.107  
   1.108      x = d->xenoprof;
   1.109      if ( x == NULL )
   1.110          return;
   1.111  
   1.112 -    unshare_xenoprof_page_with_guest(virt_to_mfn(x->rawbuf), x->npages);
   1.113 +    unshare_xenoprof_page_with_guest(x);
   1.114      x->domain_type = XENOPROF_DOMAIN_IGNORED;
   1.115  }
   1.116  
   1.117 @@ -375,9 +372,14 @@ static int add_passive_list(XEN_GUEST_HA
   1.118          }
   1.119      }
   1.120  
   1.121 -    share_xenoprof_page_with_guest(
   1.122 +    ret = share_xenoprof_page_with_guest(
   1.123          current->domain, virt_to_mfn(d->xenoprof->rawbuf),
   1.124          d->xenoprof->npages);
   1.125 +    if ( ret < 0 )
   1.126 +    {
   1.127 +        put_domain(d);
   1.128 +        return ret;
   1.129 +    }
   1.130  
   1.131      d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
   1.132      passive.nbuf = d->xenoprof->nbuf;
   1.133 @@ -512,8 +514,10 @@ static int xenoprof_op_get_buffer(XEN_GU
   1.134              return ret;
   1.135      }
   1.136  
   1.137 -    share_xenoprof_page_with_guest(
   1.138 +    ret = share_xenoprof_page_with_guest(
   1.139          d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages);
   1.140 +    if ( ret < 0 )
   1.141 +        return ret;
   1.142  
   1.143      xenoprof_reset_buf(d);
   1.144  
   1.145 @@ -687,7 +691,7 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
   1.146          if ( (ret = reset_active(current->domain)) != 0 )
   1.147              break;
   1.148          x = current->domain->xenoprof;
   1.149 -        unshare_xenoprof_page_with_guest(virt_to_mfn(x->rawbuf), x->npages);
   1.150 +        unshare_xenoprof_page_with_guest(x);
   1.151          break;
   1.152      }
   1.153