ia64/xen-unstable

changeset 16515:0c234da66b4a

x86: clean up mm.c and cache 'current' where appropriate.

Attached patch caches current vcpu and current->domain where
appropriate. Make mod_l4_entry() definition matching with
mod_l?_entry(). Since current->domain was always passed to it, this
has no functional change.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Dec 04 11:04:57 2007 +0000 (2007-12-04)
parents ebfb3b26010d
children aa430556d33f
files xen/arch/x86/mm.c
line diff
     1.1 --- a/xen/arch/x86/mm.c	Tue Dec 04 10:50:28 2007 +0000
     1.2 +++ b/xen/arch/x86/mm.c	Tue Dec 04 11:04:57 2007 +0000
     1.3 @@ -620,6 +620,7 @@ get_page_from_l1e(
     1.4      unsigned long mfn = l1e_get_pfn(l1e);
     1.5      struct page_info *page = mfn_to_page(mfn);
     1.6      uint32_t l1f = l1e_get_flags(l1e);
     1.7 +    struct vcpu *curr = current;
     1.8      int okay;
     1.9  
    1.10      if ( !(l1f & _PAGE_PRESENT) )
    1.11 @@ -635,7 +636,7 @@ get_page_from_l1e(
    1.12      {
    1.13          /* DOMID_IO reverts to caller for privilege checks. */
    1.14          if ( d == dom_io )
    1.15 -            d = current->domain;
    1.16 +            d = curr->domain;
    1.17  
    1.18          if ( !iomem_access_permitted(d, mfn, mfn) )
    1.19          {
    1.20 @@ -653,7 +654,7 @@ get_page_from_l1e(
    1.21       * qemu-dm helper process in dom0 to map the domain's memory without
    1.22       * messing up the count of "real" writable mappings.) */
    1.23      okay = (((l1f & _PAGE_RW) && 
    1.24 -             !(unlikely(paging_mode_external(d) && (d != current->domain))))
    1.25 +             !(unlikely(paging_mode_external(d) && (d != curr->domain))))
    1.26              ? get_page_and_type(page, d, PGT_writable_page)
    1.27              : get_page(page, d));
    1.28      if ( !okay )
    1.29 @@ -673,7 +674,7 @@ get_page_from_l1e(
    1.30          {
    1.31              if ( (l1f & _PAGE_RW) &&
    1.32                   !(unlikely(paging_mode_external(d) &&
    1.33 -                            (d != current->domain))) )
    1.34 +                            (d != curr->domain))) )
    1.35                  put_page_type(page);
    1.36              put_page(page);
    1.37              MEM_LOG("Attempt to change cache attributes of Xen heap page");
    1.38 @@ -1384,14 +1385,15 @@ static int mod_l1_entry(l1_pgentry_t *pl
    1.39                          unsigned long gl1mfn)
    1.40  {
    1.41      l1_pgentry_t ol1e;
    1.42 -    struct domain *d = current->domain;
    1.43 +    struct vcpu *curr = current;
    1.44 +    struct domain *d = curr->domain;
    1.45      unsigned long mfn;
    1.46  
    1.47      if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
    1.48          return 0;
    1.49  
    1.50      if ( unlikely(paging_mode_refcounts(d)) )
    1.51 -        return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current);
    1.52 +        return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr);
    1.53  
    1.54      if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
    1.55      {
    1.56 @@ -1413,12 +1415,12 @@ static int mod_l1_entry(l1_pgentry_t *pl
    1.57  
    1.58          /* Fast path for identical mapping, r/w and presence. */
    1.59          if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
    1.60 -            return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current);
    1.61 +            return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr);
    1.62  
    1.63          if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
    1.64              return 0;
    1.65          
    1.66 -        if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current)) )
    1.67 +        if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr)) )
    1.68          {
    1.69              put_page_from_l1e(nl1e, d);
    1.70              return 0;
    1.71 @@ -1426,7 +1428,7 @@ static int mod_l1_entry(l1_pgentry_t *pl
    1.72      }
    1.73      else
    1.74      {
    1.75 -        if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current)) )
    1.76 +        if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr)) )
    1.77              return 0;
    1.78      }
    1.79  
    1.80 @@ -1442,7 +1444,8 @@ static int mod_l2_entry(l2_pgentry_t *pl
    1.81                          unsigned long type)
    1.82  {
    1.83      l2_pgentry_t ol2e;
    1.84 -    struct domain *d = current->domain;
    1.85 +    struct vcpu *curr = current;
    1.86 +    struct domain *d = curr->domain;
    1.87  
    1.88      if ( unlikely(!is_guest_l2_slot(d, type, pgentry_ptr_to_slot(pl2e))) )
    1.89      {
    1.90 @@ -1466,18 +1469,18 @@ static int mod_l2_entry(l2_pgentry_t *pl
    1.91  
    1.92          /* Fast path for identical mapping and presence. */
    1.93          if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT))
    1.94 -            return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, current);
    1.95 +            return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr);
    1.96  
    1.97          if ( unlikely(!get_page_from_l2e(nl2e, pfn, d)) )
    1.98              return 0;
    1.99  
   1.100 -        if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, current)) )
   1.101 +        if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr)) )
   1.102          {
   1.103              put_page_from_l2e(nl2e, pfn);
   1.104              return 0;
   1.105          }
   1.106      }
   1.107 -    else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, current)) )
   1.108 +    else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr)) )
   1.109      {
   1.110          return 0;
   1.111      }
   1.112 @@ -1494,7 +1497,8 @@ static int mod_l3_entry(l3_pgentry_t *pl
   1.113                          unsigned long pfn)
   1.114  {
   1.115      l3_pgentry_t ol3e;
   1.116 -    struct domain *d = current->domain;
   1.117 +    struct vcpu *curr = current;
   1.118 +    struct domain *d = curr->domain;
   1.119      int okay;
   1.120  
   1.121      if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) )
   1.122 @@ -1528,18 +1532,18 @@ static int mod_l3_entry(l3_pgentry_t *pl
   1.123  
   1.124          /* Fast path for identical mapping and presence. */
   1.125          if (!l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT))
   1.126 -            return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, current);
   1.127 +            return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr);
   1.128  
   1.129          if ( unlikely(!get_page_from_l3e(nl3e, pfn, d)) )
   1.130              return 0;
   1.131  
   1.132 -        if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, current)) )
   1.133 +        if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr)) )
   1.134          {
   1.135              put_page_from_l3e(nl3e, pfn);
   1.136              return 0;
   1.137          }
   1.138      }
   1.139 -    else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, current)) )
   1.140 +    else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr)) )
   1.141      {
   1.142          return 0;
   1.143      }
   1.144 @@ -1558,11 +1562,12 @@ static int mod_l3_entry(l3_pgentry_t *pl
   1.145  #if CONFIG_PAGING_LEVELS >= 4
   1.146  
   1.147  /* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */
   1.148 -static int mod_l4_entry(struct domain *d,
   1.149 -                        l4_pgentry_t *pl4e, 
   1.150 +static int mod_l4_entry(l4_pgentry_t *pl4e, 
   1.151                          l4_pgentry_t nl4e, 
   1.152                          unsigned long pfn)
   1.153  {
   1.154 +    struct vcpu *curr = current;
   1.155 +    struct domain *d = curr->domain;
   1.156      l4_pgentry_t ol4e;
   1.157  
   1.158      if ( unlikely(!is_guest_l4_slot(d, pgentry_ptr_to_slot(pl4e))) )
   1.159 @@ -1583,22 +1588,22 @@ static int mod_l4_entry(struct domain *d
   1.160              return 0;
   1.161          }
   1.162  
   1.163 -        adjust_guest_l4e(nl4e, current->domain);
   1.164 +        adjust_guest_l4e(nl4e, d);
   1.165  
   1.166          /* Fast path for identical mapping and presence. */
   1.167          if (!l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT))
   1.168 -            return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, current);
   1.169 -
   1.170 -        if ( unlikely(!get_page_from_l4e(nl4e, pfn, current->domain)) )
   1.171 +            return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr);
   1.172 +
   1.173 +        if ( unlikely(!get_page_from_l4e(nl4e, pfn, d)) )
   1.174              return 0;
   1.175  
   1.176 -        if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, current)) )
   1.177 +        if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr)) )
   1.178          {
   1.179              put_page_from_l4e(nl4e, pfn);
   1.180              return 0;
   1.181          }
   1.182      }
   1.183 -    else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, current)) )
   1.184 +    else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr)) )
   1.185      {
   1.186          return 0;
   1.187      }
   1.188 @@ -1937,7 +1942,6 @@ int new_guest_cr3(unsigned long mfn)
   1.189          okay = paging_mode_refcounts(d)
   1.190              ? 0 /* Old code was broken, but what should it be? */
   1.191              : mod_l4_entry(
   1.192 -                    d,
   1.193                      __va(pagetable_get_paddr(v->arch.guest_table)),
   1.194                      l4e_from_pfn(
   1.195                          mfn,
   1.196 @@ -2169,7 +2173,7 @@ int do_mmuext_op(
   1.197              type = PGT_l4_page_table;
   1.198  
   1.199          pin_page:
   1.200 -            rc = xsm_memory_pin_page(current->domain, page);
   1.201 +            rc = xsm_memory_pin_page(d, page);
   1.202              if ( rc )
   1.203                  break;
   1.204  
   1.205 @@ -2459,14 +2463,14 @@ int do_mmu_update(
   1.206               */
   1.207          case MMU_NORMAL_PT_UPDATE:
   1.208  
   1.209 -            rc = xsm_mmu_normal_update(current->domain, req.val);
   1.210 +            rc = xsm_mmu_normal_update(d, req.val);
   1.211              if ( rc )
   1.212                  break;
   1.213  
   1.214              gmfn = req.ptr >> PAGE_SHIFT;
   1.215              mfn = gmfn_to_mfn(d, gmfn);
   1.216  
   1.217 -            if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
   1.218 +            if ( unlikely(!get_page_from_pagenr(mfn, d)) )
   1.219              {
   1.220                  MEM_LOG("Could not get page for normal update");
   1.221                  break;
   1.222 @@ -2520,7 +2524,7 @@ int do_mmu_update(
   1.223                  case PGT_l4_page_table:
   1.224                  {
   1.225                      l4_pgentry_t l4e = l4e_from_intpte(req.val);
   1.226 -                    okay = mod_l4_entry(d, va, l4e, mfn);
   1.227 +                    okay = mod_l4_entry(va, l4e, mfn);
   1.228                  }
   1.229                  break;
   1.230  #endif
   1.231 @@ -2553,7 +2557,7 @@ int do_mmu_update(
   1.232              mfn = req.ptr >> PAGE_SHIFT;
   1.233              gpfn = req.val;
   1.234  
   1.235 -            rc = xsm_mmu_machphys_update(current->domain, mfn);
   1.236 +            rc = xsm_mmu_machphys_update(d, mfn);
   1.237              if ( rc )
   1.238                  break;
   1.239  
   1.240 @@ -2832,6 +2836,7 @@ int create_grant_host_mapping(uint64_t a
   1.241  int replace_grant_host_mapping(
   1.242      uint64_t addr, unsigned long frame, uint64_t new_addr, unsigned int flags)
   1.243  {
   1.244 +    struct vcpu *curr = current;
   1.245      l1_pgentry_t *pl1e, ol1e;
   1.246      unsigned long gl1mfn;
   1.247      int rc;
   1.248 @@ -2839,16 +2844,16 @@ int replace_grant_host_mapping(
   1.249      if ( flags & GNTMAP_contains_pte )
   1.250      {
   1.251          if ( !new_addr )
   1.252 -            return destroy_grant_pte_mapping(addr, frame, current->domain);
   1.253 +            return destroy_grant_pte_mapping(addr, frame, curr->domain);
   1.254          
   1.255          MEM_LOG("Unsupported grant table operation");
   1.256          return GNTST_general_error;
   1.257      }
   1.258  
   1.259      if ( !new_addr )
   1.260 -        return destroy_grant_va_mapping(addr, frame, current);
   1.261 -
   1.262 -    pl1e = guest_map_l1e(current, new_addr, &gl1mfn);
   1.263 +        return destroy_grant_va_mapping(addr, frame, curr);
   1.264 +
   1.265 +    pl1e = guest_map_l1e(curr, new_addr, &gl1mfn);
   1.266      if ( !pl1e )
   1.267      {
   1.268          MEM_LOG("Could not find L1 PTE for address %lx",
   1.269 @@ -2857,19 +2862,18 @@ int replace_grant_host_mapping(
   1.270      }
   1.271      ol1e = *pl1e;
   1.272  
   1.273 -    if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, l1e_empty(),
   1.274 -                                gl1mfn, current)) )
   1.275 +    if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, l1e_empty(), gl1mfn, curr)) )
   1.276      {
   1.277          MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
   1.278 -        guest_unmap_l1e(current, pl1e);
   1.279 +        guest_unmap_l1e(curr, pl1e);
   1.280          return GNTST_general_error;
   1.281      }
   1.282  
   1.283 -    guest_unmap_l1e(current, pl1e);
   1.284 -
   1.285 -    rc = replace_grant_va_mapping(addr, frame, ol1e, current);
   1.286 -    if ( rc && !paging_mode_refcounts(current->domain) )
   1.287 -        put_page_from_l1e(ol1e, current->domain);
   1.288 +    guest_unmap_l1e(curr, pl1e);
   1.289 +
   1.290 +    rc = replace_grant_va_mapping(addr, frame, ol1e, curr);
   1.291 +    if ( rc && !paging_mode_refcounts(curr->domain) )
   1.292 +        put_page_from_l1e(ol1e, curr->domain);
   1.293  
   1.294      return rc;
   1.295  }
   1.296 @@ -2983,8 +2987,8 @@ int do_update_va_mapping(unsigned long v
   1.297          switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
   1.298          {
   1.299          case UVMF_LOCAL:
   1.300 -            if ( !paging_mode_enabled(d) 
   1.301 -                 || (paging_invlpg(current, va) != 0) ) 
   1.302 +            if ( !paging_mode_enabled(d) ||
   1.303 +                 (paging_invlpg(v, va) != 0) ) 
   1.304                  flush_tlb_one_local(va);
   1.305              break;
   1.306          case UVMF_ALL:
   1.307 @@ -3093,6 +3097,7 @@ long do_set_gdt(XEN_GUEST_HANDLE(ulong) 
   1.308  {
   1.309      int nr_pages = (entries + 511) / 512;
   1.310      unsigned long frames[16];
   1.311 +    struct vcpu *curr = current;
   1.312      long ret;
   1.313  
   1.314      /* Rechecked in set_gdt, but ensures a sane limit for copy_from_user(). */
   1.315 @@ -3102,12 +3107,12 @@ long do_set_gdt(XEN_GUEST_HANDLE(ulong) 
   1.316      if ( copy_from_guest(frames, frame_list, nr_pages) )
   1.317          return -EFAULT;
   1.318  
   1.319 -    LOCK_BIGLOCK(current->domain);
   1.320 -
   1.321 -    if ( (ret = set_gdt(current, frames, entries)) == 0 )
   1.322 +    LOCK_BIGLOCK(curr->domain);
   1.323 +
   1.324 +    if ( (ret = set_gdt(curr, frames, entries)) == 0 )
   1.325          flush_tlb_local();
   1.326  
   1.327 -    UNLOCK_BIGLOCK(current->domain);
   1.328 +    UNLOCK_BIGLOCK(curr->domain);
   1.329  
   1.330      return ret;
   1.331  }