direct-io.hg

changeset 12432:371d2837a1fe

[IA64] preliminary clean up ia64 mm.c for blktap dom0 mount support.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Fri Nov 10 11:34:39 2006 -0700 (2006-11-10)
parents 4816a891b3d6
children 3ac52066af81
files xen/arch/ia64/xen/mm.c xen/include/asm-ia64/perfc_defn.h
line diff
     1.1 --- a/xen/arch/ia64/xen/mm.c	Fri Nov 10 11:19:57 2006 -0700
     1.2 +++ b/xen/arch/ia64/xen/mm.c	Fri Nov 10 11:34:39 2006 -0700
     1.3 @@ -36,7 +36,7 @@
     1.4   * 
     1.5   *   operations on this structure:
     1.6   *   - global tlb purge
     1.7 - *     vcpu_ptc_g(), vcpu_ptc_ga() and domain_page_flush()
     1.8 + *     vcpu_ptc_g(), vcpu_ptc_ga() and domain_page_flush_and_put()
     1.9   *     I.e. callers of domain_flush_vtlb_range() and domain_flush_vtlb_all()
    1.10   *     These functions invalidate VHPT entry and vcpu->arch.{i, d}tlb
    1.11   * 
    1.12 @@ -179,8 +179,9 @@
    1.13  #include <asm/page.h>
    1.14  #include <public/memory.h>
    1.15  
    1.16 -static void domain_page_flush(struct domain* d, unsigned long mpaddr,
    1.17 -                              volatile pte_t* ptep, pte_t old_pte);
    1.18 +static void domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
    1.19 +                                      volatile pte_t* ptep, pte_t old_pte, 
    1.20 +                                      struct page_info* page);
    1.21  
    1.22  extern unsigned long ia64_iobase;
    1.23  
    1.24 @@ -1038,6 +1039,25 @@ assign_domain_mach_page(struct domain *d
    1.25      return mpaddr;
    1.26  }
    1.27  
    1.28 +static void
    1.29 +domain_put_page(struct domain* d, unsigned long mpaddr,
    1.30 +                volatile pte_t* ptep, pte_t old_pte, int clear_PGC_allocate)
    1.31 +{
    1.32 +    unsigned long mfn = pte_pfn(old_pte);
    1.33 +    struct page_info* page = mfn_to_page(mfn);
    1.34 +
    1.35 +    if (page_get_owner(page) == d ||
    1.36 +        page_get_owner(page) == NULL) {
    1.37 +        BUG_ON(get_gpfn_from_mfn(mfn) != (mpaddr >> PAGE_SHIFT));
    1.38 +        set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
    1.39 +    }
    1.40 +
    1.41 +    if (clear_PGC_allocate)
    1.42 +        try_to_clear_PGC_allocate(d, page);
    1.43 +
    1.44 +    domain_page_flush_and_put(d, mpaddr, ptep, old_pte, page);
    1.45 +}
    1.46 +
    1.47  // caller must get_page(mfn_to_page(mfn)) before call.
    1.48  // caller must call set_gpfn_from_mfn() before call if necessary.
    1.49  // because set_gpfn_from_mfn() result must be visible before pte xchg
    1.50 @@ -1068,18 +1088,7 @@ assign_domain_page_replace(struct domain
    1.51          //   => create_host_mapping()
    1.52          //      => assign_domain_page_replace()
    1.53          if (mfn != old_mfn) {
    1.54 -            struct page_info* old_page = mfn_to_page(old_mfn);
    1.55 -
    1.56 -            if (page_get_owner(old_page) == d ||
    1.57 -                page_get_owner(old_page) == NULL) {
    1.58 -                BUG_ON(get_gpfn_from_mfn(old_mfn) != (mpaddr >> PAGE_SHIFT));
    1.59 -                set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
    1.60 -            }
    1.61 -
    1.62 -            domain_page_flush(d, mpaddr, pte, old_pte);
    1.63 -
    1.64 -            try_to_clear_PGC_allocate(d, old_page);
    1.65 -            put_page(old_page);
    1.66 +            domain_put_page(d, mpaddr, pte, old_pte, 1);
    1.67          }
    1.68      }
    1.69      perfc_incrc(assign_domain_page_replace);
    1.70 @@ -1143,8 +1152,7 @@ assign_domain_page_cmpxchg_rel(struct do
    1.71  
    1.72      set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
    1.73  
    1.74 -    domain_page_flush(d, mpaddr, pte, old_pte);
    1.75 -    put_page(old_page);
    1.76 +    domain_page_flush_and_put(d, mpaddr, pte, old_pte, old_page);
    1.77      perfc_incrc(assign_domain_pge_cmpxchg_rel);
    1.78      return 0;
    1.79  }
    1.80 @@ -1201,23 +1209,12 @@ zap_domain_page_one(struct domain *d, un
    1.81      page = mfn_to_page(mfn);
    1.82      BUG_ON((page->count_info & PGC_count_mask) == 0);
    1.83  
    1.84 -    if (page_get_owner(page) == d ||
    1.85 -        page_get_owner(page) == NULL) {
    1.86 -        // exchange_memory() calls
    1.87 -        //   steal_page()
    1.88 -        //     page owner is set to NULL
    1.89 -        //   guest_physmap_remove_page()
    1.90 -        //     zap_domain_page_one()
    1.91 -        BUG_ON(get_gpfn_from_mfn(mfn) != (mpaddr >> PAGE_SHIFT));
    1.92 -        set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
    1.93 -    }
    1.94 -
    1.95 -    domain_page_flush(d, mpaddr, pte, old_pte);
    1.96 -
    1.97 -    if (page_get_owner(page) != NULL) {
    1.98 -        try_to_clear_PGC_allocate(d, page);
    1.99 -    }
   1.100 -    put_page(page);
   1.101 +    // exchange_memory() calls
   1.102 +    //   steal_page()
   1.103 +    //     page owner is set to NULL
   1.104 +    //   guest_physmap_remove_page()
   1.105 +    //     zap_domain_page_one()
   1.106 +    domain_put_page(d, mpaddr, pte, old_pte, (page_get_owner(page) != NULL));
   1.107      perfc_incrc(zap_dcomain_page_one);
   1.108  }
   1.109  
   1.110 @@ -1445,12 +1442,13 @@ destroy_grant_host_mapping(unsigned long
   1.111                 unsigned long mfn, unsigned int flags)
   1.112  {
   1.113      struct domain* d = current->domain;
   1.114 +    unsigned long gpfn = gpaddr >> PAGE_SHIFT;
   1.115      volatile pte_t* pte;
   1.116      unsigned long cur_arflags;
   1.117      pte_t cur_pte;
   1.118      pte_t new_pte;
   1.119      pte_t old_pte;
   1.120 -    struct page_info* page;
   1.121 +    struct page_info* page = mfn_to_page(mfn);
   1.122  
   1.123      if (flags & (GNTMAP_application_map | GNTMAP_contains_pte)) {
   1.124          gdprintk(XENLOG_INFO, "%s: flags 0x%x\n", __func__, flags);
   1.125 @@ -1467,7 +1465,8 @@ destroy_grant_host_mapping(unsigned long
   1.126   again:
   1.127      cur_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;
   1.128      cur_pte = pfn_pte(mfn, __pgprot(cur_arflags));
   1.129 -    if (!pte_present(cur_pte)) {
   1.130 +    if (!pte_present(cur_pte) ||
   1.131 +        (page_get_owner(page) == d && get_gpfn_from_mfn(mfn) == gpfn)) {
   1.132          gdprintk(XENLOG_INFO, "%s: gpaddr 0x%lx mfn 0x%lx cur_pte 0x%lx\n",
   1.133                  __func__, gpaddr, mfn, pte_val(cur_pte));
   1.134          return GNTST_general_error;
   1.135 @@ -1492,11 +1491,10 @@ destroy_grant_host_mapping(unsigned long
   1.136      }
   1.137      BUG_ON(pte_pfn(old_pte) != mfn);
   1.138  
   1.139 -    domain_page_flush(d, gpaddr, pte, old_pte);
   1.140 -
   1.141 -    page = mfn_to_page(mfn);
   1.142 -    BUG_ON(page_get_owner(page) == d);//try_to_clear_PGC_allocate(d, page) is not needed.
   1.143 -    put_page(page);
   1.144 +    /* try_to_clear_PGC_allocate(d, page) is not needed. */
   1.145 +    BUG_ON(page_get_owner(page) == d &&
   1.146 +           get_gpfn_from_mfn(mfn) == gpfn);
   1.147 +    domain_page_flush_and_put(d, gpaddr, pte, old_pte, page);
   1.148  
   1.149      perfc_incrc(destroy_grant_host_mapping);
   1.150      return GNTST_okay;
   1.151 @@ -1580,10 +1578,12 @@ steal_page(struct domain *d, struct page
   1.152          // page->u.inused._domain = 0;
   1.153          _nd = x >> 32;
   1.154  
   1.155 -        if (unlikely(!(memflags & MEMF_no_refcount) &&
   1.156 +        if (
   1.157 +            // when !MEMF_no_refcount, page might be put_page()'d or
   1.158 +            // it will be put_page()'d later depending on queued.
   1.159 +            unlikely(!(memflags & MEMF_no_refcount) &&
   1.160                       ((x & (PGC_count_mask | PGC_allocated)) !=
   1.161                        (1 | PGC_allocated))) ||
   1.162 -
   1.163              // when MEMF_no_refcount, page isn't de-assigned from
   1.164              // this domain yet. So count_info = 2
   1.165              unlikely((memflags & MEMF_no_refcount) &&
   1.166 @@ -1664,11 +1664,10 @@ guest_physmap_remove_page(struct domain 
   1.167      perfc_incrc(guest_physmap_remove_page);
   1.168  }
   1.169  
   1.170 -//XXX sledgehammer.
   1.171 -//    flush finer range.
   1.172  static void
   1.173 -domain_page_flush(struct domain* d, unsigned long mpaddr,
   1.174 -                  volatile pte_t* ptep, pte_t old_pte)
   1.175 +domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
   1.176 +                          volatile pte_t* ptep, pte_t old_pte,
   1.177 +                          struct page_info* page)
   1.178  {
   1.179  #ifdef CONFIG_XEN_IA64_TLB_TRACK
   1.180      struct tlb_track_entry* entry;
   1.181 @@ -1678,26 +1677,63 @@ domain_page_flush(struct domain* d, unsi
   1.182          shadow_mark_page_dirty(d, mpaddr >> PAGE_SHIFT);
   1.183  
   1.184  #ifndef CONFIG_XEN_IA64_TLB_TRACK
   1.185 +    //XXX sledgehammer.
   1.186 +    //    flush finer range.
   1.187      domain_flush_vtlb_all();
   1.188 +    put_page(page);
   1.189  #else
   1.190      switch (tlb_track_search_and_remove(d->arch.tlb_track,
   1.191                                          ptep, old_pte, &entry)) {
   1.192      case TLB_TRACK_NOT_TRACKED:
   1.193          // dprintk(XENLOG_WARNING, "%s TLB_TRACK_NOT_TRACKED\n", __func__);
   1.194 +        /* This page is zapped from this domain
   1.195 +         * by memory decrease or exchange or dom0vp_zap_physmap.
   1.196 +         * I.e. the page is zapped for returning this page to xen
   1.197 +         * (balloon driver or DMA page allocation) or
   1.198 +         * foreign domain mapped page is unmapped from the domain.
   1.199 +         * In the former case the page is to be freed so that
   1.200 +         * we can defer freeing page to batch.
   1.201 +         * In the latter case the page is unmapped so that
   1.202 +         * we need to flush it. But to optimize it, we
   1.203 +         * queue the page and flush vTLB only once.
   1.204 +         * I.e. The caller must call dfree_flush() explicitly.
   1.205 +         */
   1.206          domain_flush_vtlb_all();
   1.207 +        put_page(page);
   1.208          break;
   1.209      case TLB_TRACK_NOT_FOUND:
   1.210 +        // dprintk(XENLOG_WARNING, "%s TLB_TRACK_NOT_FOUND\n", __func__);
   1.211 +        /* This page is zapped from this domain
   1.212 +         * by grant table page unmap.
   1.213 +         * Luckily the domain that mapped this page didn't
   1.214 +         * access this page so that we don't have to flush vTLB.
   1.215 +         * Probably the domain did only DMA.
   1.216 +         */
   1.217          /* do nothing */
   1.218 -        // dprintk(XENLOG_WARNING, "%s TLB_TRACK_NOT_FOUND\n", __func__);
   1.219 +        put_page(page)
   1.220          break;
   1.221      case TLB_TRACK_FOUND:
   1.222          // dprintk(XENLOG_WARNING, "%s TLB_TRACK_FOUND\n", __func__);
   1.223 +        /* This page is zapped from this domain
   1.224 +         * by grant table page unmap.
   1.225 +         * Fortunately this page is accessced via only one virtual
   1.226 +         * memory address. So it is easy to flush it.
   1.227 +         */
   1.228          domain_flush_vtlb_track_entry(d, entry);
   1.229          tlb_track_free_entry(d->arch.tlb_track, entry);
   1.230 +        put_page(page)
   1.231          break;
   1.232      case TLB_TRACK_MANY:
   1.233          gdprintk(XENLOG_INFO, "%s TLB_TRACK_MANY\n", __func__);
   1.234 +        /* This page is zapped from this domain
   1.235 +         * by grant table page unmap.
   1.236 +         * Unfortunately this page is accessced via many virtual
   1.237 +         * memory address (or too many times with single virtual address).
   1.238 +         * So we abondaned to track virtual addresses.
   1.239 +         * full vTLB flush is necessary.
   1.240 +         */
   1.241          domain_flush_vtlb_all();
   1.242 +        put_page(page)
   1.243          break;
   1.244      case TLB_TRACK_AGAIN:
   1.245          gdprintk(XENLOG_ERR, "%s TLB_TRACK_AGAIN\n", __func__);
   1.246 @@ -1705,7 +1741,7 @@ domain_page_flush(struct domain* d, unsi
   1.247          break;
   1.248      }
   1.249  #endif
   1.250 -    perfc_incrc(domain_page_flush);
   1.251 +    perfc_incrc(domain_page_flush_and_put);
   1.252  }
   1.253  
   1.254  int
     2.1 --- a/xen/include/asm-ia64/perfc_defn.h	Fri Nov 10 11:19:57 2006 -0700
     2.2 +++ b/xen/include/asm-ia64/perfc_defn.h	Fri Nov 10 11:34:39 2006 -0700
     2.3 @@ -134,7 +134,7 @@ PERFCOUNTER_CPU(steal_page_refcount,    
     2.4  PERFCOUNTER_CPU(steal_page,                     "steal_page")
     2.5  PERFCOUNTER_CPU(guest_physmap_add_page,         "guest_physmap_add_page")
     2.6  PERFCOUNTER_CPU(guest_physmap_remove_page,      "guest_physmap_remove_page")
     2.7 -PERFCOUNTER_CPU(domain_page_flush,              "domain_page_flush")
     2.8 +PERFCOUNTER_CPU(domain_page_flush_and_put,      "domain_page_flush_and_put")
     2.9  
    2.10  // dom0vp
    2.11  PERFCOUNTER_CPU(dom0vp_phystomach,              "dom0vp_phystomach")