direct-io.hg

changeset 11515:0cdac06f1a9d

[POWERPC][XEN] Safety with foreign get_page() calls and RMA

The following patch deals with get_page() possibly failing for H_ENTER
on a foreign page and returning the correct error. We also tag and
checke that a put_page() for RMA pages will panic (for now) if the
domain is _not_ dying.

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Thu Sep 14 01:41:13 2006 -0400 (2006-09-14)
parents 9bf0fc041e14
children a17aa5e65209
files xen/arch/powerpc/mm.c xen/arch/powerpc/papr/xlate.c xen/include/asm-powerpc/mm.h
line diff
     1.1 --- a/xen/arch/powerpc/mm.c	Thu Sep 14 01:36:39 2006 -0400
     1.2 +++ b/xen/arch/powerpc/mm.c	Thu Sep 14 01:41:13 2006 -0400
     1.3 @@ -287,6 +287,7 @@ int allocate_rma(struct domain *d, unsig
     1.4      struct vcpu *v;
     1.5      ulong rma_base;
     1.6      ulong rma_sz;
     1.7 +    int i;
     1.8  
     1.9      if (d->arch.rma_page)
    1.10          return -EINVAL;
    1.11 @@ -301,11 +302,17 @@ int allocate_rma(struct domain *d, unsig
    1.12  
    1.13      rma_base = page_to_maddr(d->arch.rma_page);
    1.14      rma_sz = rma_size(d->arch.rma_order);
    1.15 +
    1.16      BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
    1.17  
    1.18 -    /* XXX shouldn't be needed */
    1.19 -    printk("clearing RMA: 0x%lx[0x%lx]\n", rma_base, rma_sz);
    1.20 -    memset((void *)rma_base, 0, rma_sz);
    1.21 +    printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n",
    1.22 +           d->domain_id, rma_base, rma_sz);
    1.23 +
    1.24 +    for (i = 0; i < (1 << d->arch.rma_order); i++ ) {
    1.25 +        /* Add in any extra CPUs that need flushing because of this page. */
    1.26 +        d->arch.rma_page[i].count_info |= PGC_page_RMA;
    1.27 +        clear_page((void *)page_to_maddr(&d->arch.rma_page[i]));
    1.28 +    }
    1.29  
    1.30      d->shared_info = (shared_info_t *)
    1.31          (rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
    1.32 @@ -318,6 +325,13 @@ int allocate_rma(struct domain *d, unsig
    1.33  
    1.34      return 0;
    1.35  }
    1.36 +void free_rma_check(struct page_info *page)
    1.37 +{
    1.38 +    if (test_bit(_PGC_page_RMA, &page->count_info) &&
    1.39 +        !test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags))
    1.40 +        panic("Attempt to free an RMA page: 0x%lx\n", page_to_mfn(page));
    1.41 +}
    1.42 +
    1.43  
    1.44  ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
    1.45  {
     2.1 --- a/xen/arch/powerpc/papr/xlate.c	Thu Sep 14 01:36:39 2006 -0400
     2.2 +++ b/xen/arch/powerpc/papr/xlate.c	Thu Sep 14 01:41:13 2006 -0400
     2.3 @@ -123,6 +123,9 @@ static void h_enter(struct cpu_user_regs
     2.4      struct vcpu *v = get_current();
     2.5      struct domain *d = v->domain;
     2.6      int mtype;
     2.7 +    struct page_info *pg = NULL;
     2.8 +    struct domain *f = NULL;
     2.9 +
    2.10  
    2.11      htab = &d->arch.htab;
    2.12      if (ptex > (1UL << htab->log_num_ptes)) {
    2.13 @@ -203,15 +206,39 @@ static void h_enter(struct cpu_user_regs
    2.14      pte.bits.ts = 0x0;
    2.15      pte.bits.res2 = 0x0;
    2.16  
    2.17 +    if (mtype == PFN_TYPE_FOREIGN) {
    2.18 +        pg = mfn_to_page(mfn);
    2.19 +        f = page_get_owner(pg);
    2.20 +        
    2.21 +        BUG_ON(f == d);
    2.22 +
    2.23 +        if (unlikely(!get_domain(f))) {
    2.24 +            regs->gprs[3] = H_Rescinded;
    2.25 +            return;
    2.26 +        }
    2.27 +        if (unlikely(!get_page(pg, f))) {
    2.28 +            put_domain(f);
    2.29 +            regs->gprs[3] = H_Rescinded;
    2.30 +            return;
    2.31 +        }
    2.32 +    }
    2.33 +
    2.34      if ( !(flags & H_EXACT) ) {
    2.35          /* PTEG (not specific PTE); clear 3 lowest bits */
    2.36          ptex &= ~0x7UL;
    2.37          limit = 7;
    2.38      }
    2.39  
    2.40 -        /* data manipulations should be done prior to the pte insertion. */
    2.41 +    /* data manipulations should be done prior to the pte insertion. */
    2.42      if ( flags & H_ZERO_PAGE ) {
    2.43 -        memset((void *)(mfn << PAGE_SHIFT), 0, 1UL << pgshift);
    2.44 +        ulong pg = mfn << PAGE_SHIFT;
    2.45 +        ulong pgs = 1UL << pgshift;
    2.46 +
    2.47 +        while (pgs > 0) {
    2.48 +            clear_page((void *)pg);
    2.49 +            pg += PAGE_SIZE;
    2.50 +            --pgs;
    2.51 +        }
    2.52      }
    2.53  
    2.54      if ( flags & H_ICACHE_INVALIDATE ) {
    2.55 @@ -252,27 +279,6 @@ static void h_enter(struct cpu_user_regs
    2.56              regs->gprs[3] = H_Success;
    2.57              regs->gprs[4] = idx;
    2.58  
    2.59 -            
    2.60 -            switch (mtype) {
    2.61 -            case PFN_TYPE_IO:
    2.62 -                break;
    2.63 -            case PFN_TYPE_FOREIGN:
    2.64 -            {
    2.65 -                struct page_info *pg = mfn_to_page(mfn);
    2.66 -                struct domain *f = page_get_owner(pg);
    2.67 -
    2.68 -                BUG_ON(f == d);
    2.69 -                get_domain(f);
    2.70 -                get_page(pg, f);
    2.71 -            }
    2.72 -                break;
    2.73 -            case PFN_TYPE_RMA:
    2.74 -            case PFN_TYPE_LOGICAL:
    2.75 -                break;
    2.76 -            default:
    2.77 -                BUG();
    2.78 -            }
    2.79 -
    2.80              return;
    2.81          }
    2.82      }
    2.83 @@ -282,6 +288,12 @@ static void h_enter(struct cpu_user_regs
    2.84      printk("%s: PTEG FULL\n", __func__);
    2.85  #endif
    2.86  
    2.87 +    if (pg != NULL)
    2.88 +        put_page(pg);
    2.89 +
    2.90 +    if (f != NULL)
    2.91 +        put_domain(f);
    2.92 +
    2.93      regs->gprs[3] = H_PTEG_Full;
    2.94  }
    2.95  
     3.1 --- a/xen/include/asm-powerpc/mm.h	Thu Sep 14 01:36:39 2006 -0400
     3.2 +++ b/xen/include/asm-powerpc/mm.h	Thu Sep 14 01:41:13 2006 -0400
     3.3 @@ -122,8 +122,11 @@ struct page_extents {
     3.4   /* Set when is using a page as a page table */
     3.5  #define _PGC_page_table      29
     3.6  #define PGC_page_table      (1U<<_PGC_page_table)
     3.7 +/* Set when using page for RMA */
     3.8 +#define _PGC_page_RMA      28
     3.9 +#define PGC_page_RMA      (1U<<_PGC_page_RMA)
    3.10   /* 29-bit count of references to this frame. */
    3.11 -#define PGC_count_mask      ((1U<<29)-1)
    3.12 +#define PGC_count_mask      ((1U<<28)-1)
    3.13  
    3.14  #define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
    3.15  
    3.16 @@ -142,6 +145,7 @@ extern struct page_info *frame_table;
    3.17  extern unsigned long max_page;
    3.18  extern unsigned long total_pages;
    3.19  void init_frametable(void);
    3.20 +void free_rma_check(struct page_info *page);
    3.21  
    3.22  static inline void put_page(struct page_info *page)
    3.23  {
    3.24 @@ -154,6 +158,8 @@ static inline void put_page(struct page_
    3.25      while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
    3.26  
    3.27      if ( unlikely((nx & PGC_count_mask) == 0) ) {
    3.28 +        /* RMA pages can only be released while the domain is dying */
    3.29 +        free_rma_check(page);
    3.30          free_domheap_page(page);
    3.31      }
    3.32  }