ia64/xen-unstable

changeset 11009:7e9699af7e12

[XEN] Make per-cpu mm information explicitly PER_CPU.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Aug 08 14:18:57 2006 +0100 (2006-08-08)
parents 0caf8d9218cc
children ce619ad2d139
files xen/arch/x86/mm.c
line diff
     1.1 --- a/xen/arch/x86/mm.c	Tue Aug 08 14:02:14 2006 +0100
     1.2 +++ b/xen/arch/x86/mm.c	Tue Aug 08 14:18:57 2006 +0100
     1.3 @@ -139,20 +139,21 @@ static int mod_l2_entry(l2_pgentry_t *, 
     1.4  static int mod_l1_entry(l1_pgentry_t *, l1_pgentry_t);
     1.5  
     1.6  /* Used to defer flushing of memory structures. */
     1.7 -static struct {
     1.8 +struct percpu_mm_info {
     1.9  #define DOP_FLUSH_TLB      (1<<0) /* Flush the local TLB.                    */
    1.10  #define DOP_FLUSH_ALL_TLBS (1<<1) /* Flush TLBs of all VCPUs of current dom. */
    1.11  #define DOP_RELOAD_LDT     (1<<2) /* Reload the LDT shadow mapping.          */
    1.12      unsigned int   deferred_ops;
    1.13      /* If non-NULL, specifies a foreign subject domain for some operations. */
    1.14      struct domain *foreign;
    1.15 -} __cacheline_aligned percpu_info[NR_CPUS];
    1.16 +};
    1.17 +static DEFINE_PER_CPU(struct percpu_mm_info, percpu_mm_info);
    1.18  
    1.19  /*
    1.20   * Returns the current foreign domain; defaults to the currently-executing
    1.21   * domain if a foreign override hasn't been specified.
    1.22   */
    1.23 -#define FOREIGNDOM (percpu_info[smp_processor_id()].foreign ?: current->domain)
    1.24 +#define FOREIGNDOM (this_cpu(percpu_mm_info).foreign ?: current->domain)
    1.25  
    1.26  /* Private domain structs for DOMID_XEN and DOMID_IO. */
    1.27  static struct domain *dom_xen, *dom_io;
    1.28 @@ -190,8 +191,6 @@ void arch_init_memory(void)
    1.29  
    1.30      unsigned long i, pfn, rstart_pfn, rend_pfn;
    1.31  
    1.32 -    memset(percpu_info, 0, sizeof(percpu_info));
    1.33 -
    1.34      /*
    1.35       * Initialise our DOMID_XEN domain.
    1.36       * Any Xen-heap pages that we will allow to be mapped will have
    1.37 @@ -378,7 +377,8 @@ void invalidate_shadow_ldt(struct vcpu *
    1.38      }
    1.39  
    1.40      /* Dispose of the (now possibly invalid) mappings from the TLB.  */
    1.41 -    percpu_info[v->processor].deferred_ops |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
    1.42 +    ASSERT(v->processor == smp_processor_id());
    1.43 +    this_cpu(percpu_mm_info).deferred_ops |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
    1.44  }
    1.45  
    1.46  
    1.47 @@ -1503,7 +1503,7 @@ void free_page_type(struct page_info *pa
    1.48           * (e.g., update_va_mapping()) or we could end up modifying a page
    1.49           * that is no longer a page table (and hence screw up ref counts).
    1.50           */
    1.51 -        percpu_info[smp_processor_id()].deferred_ops |= DOP_FLUSH_ALL_TLBS;
    1.52 +        this_cpu(percpu_mm_info).deferred_ops |= DOP_FLUSH_ALL_TLBS;
    1.53  
    1.54          if ( unlikely(shadow_mode_enabled(owner)) )
    1.55          {
    1.56 @@ -1781,7 +1781,8 @@ int new_guest_cr3(unsigned long mfn)
    1.57                  /* Failure here is unrecoverable: the VCPU has no pagetable! */
    1.58                  MEM_LOG("Fatal error while installing new baseptr %lx", mfn);
    1.59                  domain_crash(d);
    1.60 -                percpu_info[v->processor].deferred_ops = 0;
    1.61 +                ASSERT(v->processor == smp_processor_id());
    1.62 +                this_cpu(percpu_mm_info).deferred_ops = 0;
    1.63                  return 0;
    1.64              }
    1.65          }
    1.66 @@ -1817,13 +1818,14 @@ int new_guest_cr3(unsigned long mfn)
    1.67      return 1;
    1.68  }
    1.69  
    1.70 -static void process_deferred_ops(unsigned int cpu)
    1.71 +static void process_deferred_ops(void)
    1.72  {
    1.73      unsigned int deferred_ops;
    1.74      struct domain *d = current->domain;
    1.75 -
    1.76 -    deferred_ops = percpu_info[cpu].deferred_ops;
    1.77 -    percpu_info[cpu].deferred_ops = 0;
    1.78 +    struct percpu_mm_info *info = &this_cpu(percpu_mm_info);
    1.79 +
    1.80 +    deferred_ops = info->deferred_ops;
    1.81 +    info->deferred_ops = 0;
    1.82  
    1.83      if ( deferred_ops & (DOP_FLUSH_ALL_TLBS|DOP_FLUSH_TLB) )
    1.84      {
    1.85 @@ -1838,19 +1840,20 @@ static void process_deferred_ops(unsigne
    1.86      if ( deferred_ops & DOP_RELOAD_LDT )
    1.87          (void)map_ldt_shadow_page(0);
    1.88  
    1.89 -    if ( unlikely(percpu_info[cpu].foreign != NULL) )
    1.90 +    if ( unlikely(info->foreign != NULL) )
    1.91      {
    1.92 -        put_domain(percpu_info[cpu].foreign);
    1.93 -        percpu_info[cpu].foreign = NULL;
    1.94 +        put_domain(info->foreign);
    1.95 +        info->foreign = NULL;
    1.96      }
    1.97  }
    1.98  
    1.99 -static int set_foreigndom(unsigned int cpu, domid_t domid)
   1.100 +static int set_foreigndom(domid_t domid)
   1.101  {
   1.102      struct domain *e, *d = current->domain;
   1.103 +    struct percpu_mm_info *info = &this_cpu(percpu_mm_info);
   1.104      int okay = 1;
   1.105  
   1.106 -    ASSERT(percpu_info[cpu].foreign == NULL);
   1.107 +    ASSERT(info->foreign == NULL);
   1.108  
   1.109      if ( likely(domid == DOMID_SELF) )
   1.110          goto out;
   1.111 @@ -1867,7 +1870,7 @@ static int set_foreigndom(unsigned int c
   1.112          {
   1.113          case DOMID_IO:
   1.114              get_knownalive_domain(dom_io);
   1.115 -            percpu_info[cpu].foreign = dom_io;
   1.116 +            info->foreign = dom_io;
   1.117              break;
   1.118          default:
   1.119              MEM_LOG("Dom %u cannot set foreign dom", d->domain_id);
   1.120 @@ -1877,18 +1880,18 @@ static int set_foreigndom(unsigned int c
   1.121      }
   1.122      else
   1.123      {
   1.124 -        percpu_info[cpu].foreign = e = find_domain_by_id(domid);
   1.125 +        info->foreign = e = find_domain_by_id(domid);
   1.126          if ( e == NULL )
   1.127          {
   1.128              switch ( domid )
   1.129              {
   1.130              case DOMID_XEN:
   1.131                  get_knownalive_domain(dom_xen);
   1.132 -                percpu_info[cpu].foreign = dom_xen;
   1.133 +                info->foreign = dom_xen;
   1.134                  break;
   1.135              case DOMID_IO:
   1.136                  get_knownalive_domain(dom_io);
   1.137 -                percpu_info[cpu].foreign = dom_io;
   1.138 +                info->foreign = dom_io;
   1.139                  break;
   1.140              default:
   1.141                  MEM_LOG("Unknown domain '%u'", domid);
   1.142 @@ -1928,7 +1931,7 @@ int do_mmuext_op(
   1.143      unsigned int foreigndom)
   1.144  {
   1.145      struct mmuext_op op;
   1.146 -    int rc = 0, i = 0, okay, cpu = smp_processor_id();
   1.147 +    int rc = 0, i = 0, okay;
   1.148      unsigned long mfn, type;
   1.149      unsigned int done = 0;
   1.150      struct page_info *page;
   1.151 @@ -1946,7 +1949,7 @@ int do_mmuext_op(
   1.152              (void)copy_from_guest(&done, pdone, 1);
   1.153      }
   1.154  
   1.155 -    if ( !set_foreigndom(cpu, foreigndom) )
   1.156 +    if ( !set_foreigndom(foreigndom) )
   1.157      {
   1.158          rc = -ESRCH;
   1.159          goto out;
   1.160 @@ -2042,7 +2045,7 @@ int do_mmuext_op(
   1.161          case MMUEXT_NEW_BASEPTR:
   1.162              mfn = gmfn_to_mfn(current->domain, mfn);
   1.163              okay = new_guest_cr3(mfn);
   1.164 -            percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
   1.165 +            this_cpu(percpu_mm_info).deferred_ops &= ~DOP_FLUSH_TLB;
   1.166              break;
   1.167          
   1.168  #ifdef __x86_64__
   1.169 @@ -2065,7 +2068,7 @@ int do_mmuext_op(
   1.170  #endif
   1.171          
   1.172          case MMUEXT_TLB_FLUSH_LOCAL:
   1.173 -            percpu_info[cpu].deferred_ops |= DOP_FLUSH_TLB;
   1.174 +            this_cpu(percpu_mm_info).deferred_ops |= DOP_FLUSH_TLB;
   1.175              break;
   1.176      
   1.177          case MMUEXT_INVLPG_LOCAL:
   1.178 @@ -2137,9 +2140,9 @@ int do_mmuext_op(
   1.179                  v->arch.guest_context.ldt_base = ptr;
   1.180                  v->arch.guest_context.ldt_ents = ents;
   1.181                  load_LDT(v);
   1.182 -                percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT;
   1.183 +                this_cpu(percpu_mm_info).deferred_ops &= ~DOP_RELOAD_LDT;
   1.184                  if ( ents != 0 )
   1.185 -                    percpu_info[cpu].deferred_ops |= DOP_RELOAD_LDT;
   1.186 +                    this_cpu(percpu_mm_info).deferred_ops |= DOP_RELOAD_LDT;
   1.187              }
   1.188              break;
   1.189          }
   1.190 @@ -2160,7 +2163,7 @@ int do_mmuext_op(
   1.191      }
   1.192  
   1.193   out:
   1.194 -    process_deferred_ops(cpu);
   1.195 +    process_deferred_ops();
   1.196  
   1.197      /* Add incremental work we have done to the @done output parameter. */
   1.198      done += i;
   1.199 @@ -2181,7 +2184,7 @@ int do_mmu_update(
   1.200      void *va;
   1.201      unsigned long gpfn, gmfn, mfn;
   1.202      struct page_info *page;
   1.203 -    int rc = 0, okay = 1, i = 0, cpu = smp_processor_id();
   1.204 +    int rc = 0, okay = 1, i = 0;
   1.205      unsigned int cmd, done = 0;
   1.206      struct vcpu *v = current;
   1.207      struct domain *d = v->domain;
   1.208 @@ -2205,7 +2208,7 @@ int do_mmu_update(
   1.209      domain_mmap_cache_init(&mapcache);
   1.210      domain_mmap_cache_init(&sh_mapcache);
   1.211  
   1.212 -    if ( !set_foreigndom(cpu, foreigndom) )
   1.213 +    if ( !set_foreigndom(foreigndom) )
   1.214      {
   1.215          rc = -ESRCH;
   1.216          goto out;
   1.217 @@ -2396,7 +2399,7 @@ int do_mmu_update(
   1.218      domain_mmap_cache_destroy(&mapcache);
   1.219      domain_mmap_cache_destroy(&sh_mapcache);
   1.220  
   1.221 -    process_deferred_ops(cpu);
   1.222 +    process_deferred_ops();
   1.223  
   1.224      /* Add incremental work we have done to the @done output parameter. */
   1.225      done += i;
   1.226 @@ -2690,7 +2693,6 @@ int do_update_va_mapping(unsigned long v
   1.227      l1_pgentry_t   val = l1e_from_intpte(val64);
   1.228      struct vcpu   *v   = current;
   1.229      struct domain *d   = v->domain;
   1.230 -    unsigned int   cpu = smp_processor_id();
   1.231      unsigned long  vmask, bmap_ptr;
   1.232      cpumask_t      pmask;
   1.233      int            rc  = 0;
   1.234 @@ -2713,9 +2715,10 @@ int do_update_va_mapping(unsigned long v
   1.235  
   1.236      if ( likely(rc == 0) && unlikely(shadow_mode_enabled(d)) )
   1.237      {
   1.238 -        if ( unlikely(percpu_info[cpu].foreign &&
   1.239 +        if ( unlikely(this_cpu(percpu_mm_info).foreign &&
   1.240                        (shadow_mode_translate(d) ||
   1.241 -                       shadow_mode_translate(percpu_info[cpu].foreign))) )
   1.242 +                       shadow_mode_translate(
   1.243 +                           this_cpu(percpu_mm_info).foreign))) )
   1.244          {
   1.245              /*
   1.246               * The foreign domain's pfn's are in a different namespace. There's
   1.247 @@ -2773,7 +2776,7 @@ int do_update_va_mapping(unsigned long v
   1.248          break;
   1.249      }
   1.250  
   1.251 -    process_deferred_ops(cpu);
   1.252 +    process_deferred_ops();
   1.253      
   1.254      UNLOCK_BIGLOCK(d);
   1.255  
   1.256 @@ -2784,13 +2787,12 @@ int do_update_va_mapping_otherdomain(uns
   1.257                                       unsigned long flags,
   1.258                                       domid_t domid)
   1.259  {
   1.260 -    unsigned int cpu = smp_processor_id();
   1.261      int rc;
   1.262  
   1.263      if ( unlikely(!IS_PRIV(current->domain)) )
   1.264          return -EPERM;
   1.265  
   1.266 -    if ( !set_foreigndom(cpu, domid) )
   1.267 +    if ( !set_foreigndom(domid) )
   1.268          return -ESRCH;
   1.269  
   1.270      rc = do_update_va_mapping(va, val64, flags);