direct-io.hg

changeset 10610:39562deee9b8

[IA64] memory exchange: support memory exchange

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Fri Jun 23 15:16:10 2006 -0600 (2006-06-23)
parents 056622b0f8f7
children a9c1e614c9ff
files xen/arch/ia64/xen/mm.c
line diff
     1.1 --- a/xen/arch/ia64/xen/mm.c	Fri Jun 23 15:13:54 2006 -0600
     1.2 +++ b/xen/arch/ia64/xen/mm.c	Fri Jun 23 15:16:10 2006 -0600
     1.3 @@ -969,7 +969,8 @@ assign_domain_page_replace(struct domain
     1.4          if (mfn != old_mfn) {
     1.5              struct page_info* old_page = mfn_to_page(old_mfn);
     1.6  
     1.7 -            if (page_get_owner(old_page) == d) {
     1.8 +            if (page_get_owner(old_page) == d ||
     1.9 +                page_get_owner(old_page) == NULL) {
    1.10                  BUG_ON(get_gpfn_from_mfn(old_mfn) != (mpaddr >> PAGE_SHIFT));
    1.11                  set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
    1.12              }
    1.13 @@ -1068,7 +1069,10 @@ zap_domain_page_one(struct domain *d, un
    1.14          pte_t ret_pte;
    1.15  
    1.16      again:
    1.17 -        BUG_ON(page_get_owner(mfn_to_page(mfn)) != d);
    1.18 +        // memory_exchange() calls guest_physmap_remove_page() with
    1.19 +        // a stealed page. i.e. page owner = NULL.
    1.20 +        BUG_ON(page_get_owner(mfn_to_page(mfn)) != d &&
    1.21 +               page_get_owner(mfn_to_page(mfn)) != NULL);
    1.22          old_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;
    1.23          old_pte = pfn_pte(mfn, __pgprot(old_arflags));
    1.24          new_pte = __pte(0);
    1.25 @@ -1093,14 +1097,22 @@ zap_domain_page_one(struct domain *d, un
    1.26      page = mfn_to_page(mfn);
    1.27      BUG_ON((page->count_info & PGC_count_mask) == 0);
    1.28  
    1.29 -    if (page_get_owner(page) == d) {
    1.30 +    if (page_get_owner(page) == d ||
    1.31 +        page_get_owner(page) == NULL) {
    1.32 +        // exchange_memory() calls
    1.33 +        //   steal_page()
    1.34 +        //     page owner is set to NULL
    1.35 +        //   guest_physmap_remove_page()
    1.36 +        //     zap_domain_page_one()
    1.37          BUG_ON(get_gpfn_from_mfn(mfn) != (mpaddr >> PAGE_SHIFT));
    1.38          set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
    1.39      }
    1.40  
    1.41      domain_page_flush(d, mpaddr, mfn, INVALID_MFN);
    1.42  
    1.43 -    try_to_clear_PGC_allocate(d, page);
    1.44 +    if (page_get_owner(page) != NULL) {
    1.45 +        try_to_clear_PGC_allocate(d, page);
    1.46 +    }
    1.47      put_page(page);
    1.48  }
    1.49  
    1.50 @@ -1250,6 +1262,11 @@ destroy_grant_host_mapping(unsigned long
    1.51  }
    1.52  
    1.53  // heavily depends on the struct page layout.
    1.54 +// gnttab_transfer() calls steal_page() with memflags = 0
    1.55 +//   For grant table transfer, we must fill the page.
    1.56 +// memory_exchange() calls steal_page() with memflags = MEMF_no_refcount
    1.57 +//   For memory exchange, we don't have to fill the page because
    1.58 +//   memory_exchange() does it.
    1.59  int
    1.60  steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
    1.61  {
    1.62 @@ -1258,40 +1275,49 @@ steal_page(struct domain *d, struct page
    1.63  #endif
    1.64      u32 _d, _nd;
    1.65      u64 x, nx, y;
    1.66 -    unsigned long gpfn;
    1.67 -    struct page_info *new;
    1.68 -    unsigned long new_mfn;
    1.69 -    int ret;
    1.70 -    new = alloc_domheap_page(d);
    1.71 -    if (new == NULL) {
    1.72 -        DPRINTK("alloc_domheap_page() failed\n");
    1.73 +
    1.74 +    if (page_get_owner(page) != d) {
    1.75 +        DPRINTK("%s d 0x%p owner 0x%p\n", __func__, d, page_get_owner(page));
    1.76          return -1;
    1.77      }
    1.78 -    // zero out pages for security reasons
    1.79 -    clear_page(page_to_virt(new));
    1.80 -    // assign_domain_page_cmpxchg_rel() has release semantics
    1.81 -    // so smp_mb() isn't needed.
    1.82 +    
    1.83 +    if (!(memflags & MEMF_no_refcount)) {
    1.84 +        unsigned long gpfn;
    1.85 +        struct page_info *new;
    1.86 +        unsigned long new_mfn;
    1.87 +        int ret;
    1.88  
    1.89 -    ret = get_page(new, d);
    1.90 -    BUG_ON(ret == 0);
    1.91 +        new = alloc_domheap_page(d);
    1.92 +        if (new == NULL) {
    1.93 +            DPRINTK("alloc_domheap_page() failed\n");
    1.94 +            return -1;
    1.95 +        }
    1.96 +        // zero out pages for security reasons
    1.97 +        clear_page(page_to_virt(new));
    1.98 +        // assign_domain_page_cmpxchg_rel() has release semantics
    1.99 +        // so smp_mb() isn't needed.
   1.100  
   1.101 -    gpfn = get_gpfn_from_mfn(page_to_mfn(page));
   1.102 -    if (gpfn == INVALID_M2P_ENTRY) {
   1.103 -        free_domheap_page(new);
   1.104 -        return -1;
   1.105 -    }
   1.106 -    new_mfn = page_to_mfn(new);
   1.107 -    set_gpfn_from_mfn(new_mfn, gpfn);
   1.108 -    // smp_mb() isn't needed because assign_domain_pge_cmpxchg_rel()
   1.109 -    // has release semantics.
   1.110 +        ret = get_page(new, d);
   1.111 +        BUG_ON(ret == 0);
   1.112  
   1.113 -    ret = assign_domain_page_cmpxchg_rel(d, gpfn << PAGE_SHIFT, page, new,
   1.114 -                                         ASSIGN_writable);
   1.115 -    if (ret < 0) {
   1.116 -        DPRINTK("assign_domain_page_cmpxchg_rel failed %d\n", ret);
   1.117 -        set_gpfn_from_mfn(new_mfn, INVALID_M2P_ENTRY);
   1.118 -        free_domheap_page(new);
   1.119 -        return -1;
   1.120 +        gpfn = get_gpfn_from_mfn(page_to_mfn(page));
   1.121 +        if (gpfn == INVALID_M2P_ENTRY) {
   1.122 +            free_domheap_page(new);
   1.123 +            return -1;
   1.124 +        }
   1.125 +        new_mfn = page_to_mfn(new);
   1.126 +        set_gpfn_from_mfn(new_mfn, gpfn);
   1.127 +        // smp_mb() isn't needed because assign_domain_pge_cmpxchg_rel()
   1.128 +        // has release semantics.
   1.129 +
   1.130 +        ret = assign_domain_page_cmpxchg_rel(d, gpfn << PAGE_SHIFT, page, new,
   1.131 +                                             ASSIGN_writable);
   1.132 +        if (ret < 0) {
   1.133 +            DPRINTK("assign_domain_page_cmpxchg_rel failed %d\n", ret);
   1.134 +            set_gpfn_from_mfn(new_mfn, INVALID_M2P_ENTRY);
   1.135 +            free_domheap_page(new);
   1.136 +            return -1;
   1.137 +        }
   1.138      }
   1.139  
   1.140      spin_lock(&d->page_alloc_lock);
   1.141 @@ -1310,28 +1336,40 @@ steal_page(struct domain *d, struct page
   1.142          // page->u.inused._domain = 0;
   1.143          _nd = x >> 32;
   1.144  
   1.145 -        if (unlikely((x & (PGC_count_mask | PGC_allocated)) !=
   1.146 -                     (1 | PGC_allocated)) ||
   1.147 +        if (unlikely(!(memflags & MEMF_no_refcount) &&
   1.148 +                     ((x & (PGC_count_mask | PGC_allocated)) !=
   1.149 +                      (1 | PGC_allocated))) ||
   1.150 +
   1.151 +            // when MEMF_no_refcount, page isn't de-assigned from
   1.152 +            // this domain yet. So count_info = 2
   1.153 +            unlikely((memflags & MEMF_no_refcount) &&
   1.154 +                     ((x & (PGC_count_mask | PGC_allocated)) !=
   1.155 +                      (2 | PGC_allocated))) ||
   1.156 +
   1.157              unlikely(_nd != _d)) {
   1.158              struct domain* nd = unpickle_domptr(_nd);
   1.159              if (nd == NULL) {
   1.160                  DPRINTK("gnttab_transfer: Bad page %p: ed=%p(%u) 0x%x, "
   1.161                          "sd=%p 0x%x,"
   1.162 -                        " caf=%016lx, taf=%" PRtype_info "\n",
   1.163 +                        " caf=%016lx, taf=%" PRtype_info
   1.164 +                        " memflags 0x%x\n",
   1.165                          (void *) page_to_mfn(page),
   1.166                          d, d->domain_id, _d,
   1.167                          nd, _nd,
   1.168                          x,
   1.169 -                        page->u.inuse.type_info);
   1.170 +                        page->u.inuse.type_info,
   1.171 +                        memflags);
   1.172              } else {
   1.173                  DPRINTK("gnttab_transfer: Bad page %p: ed=%p(%u) 0x%x, "
   1.174                          "sd=%p(%u) 0x%x,"
   1.175 -                        " caf=%016lx, taf=%" PRtype_info "\n",
   1.176 +                        " caf=%016lx, taf=%" PRtype_info
   1.177 +                        " memflags 0x%x\n",
   1.178                          (void *) page_to_mfn(page),
   1.179                          d, d->domain_id, _d,
   1.180                          nd, nd->domain_id, _nd,
   1.181                          x,
   1.182 -                        page->u.inuse.type_info);
   1.183 +                        page->u.inuse.type_info,
   1.184 +                        memflags);
   1.185              }
   1.186              spin_unlock(&d->page_alloc_lock);
   1.187              return -1;
   1.188 @@ -1361,8 +1399,6 @@ guest_physmap_add_page(struct domain *d,
   1.189      BUG_ON(!mfn_valid(mfn));
   1.190      ret = get_page(mfn_to_page(mfn), d);
   1.191      BUG_ON(ret == 0);
   1.192 -    BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&
   1.193 -           get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
   1.194      set_gpfn_from_mfn(mfn, gpfn);
   1.195      smp_mb();
   1.196      assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, ASSIGN_writable);