direct-io.hg

changeset 8504:82eafda1c710

Rename per-domain cpumask to more descriptive domain_dirty_cpumask.
Add a new per-vcpu dirty cpumask (vcpu_dirty_cpumask), useful for
state flushing and selective tlb flushing.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jan 06 17:45:31 2006 +0100 (2006-01-06)
parents 3eeabf448f91
children 3c84ce41d184
files xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/common/domain.c xen/common/grant_table.c xen/common/page_alloc.c xen/common/schedule.c xen/include/asm-x86/processor.h xen/include/asm-x86/shadow.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Fri Jan 06 16:47:25 2006 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Fri Jan 06 17:45:31 2006 +0100
     1.3 @@ -94,7 +94,8 @@ void startup_cpu_idle_loop(void)
     1.4  
     1.5      ASSERT(is_idle_domain(v->domain));
     1.6      percpu_ctxt[smp_processor_id()].curr_vcpu = v;
     1.7 -    cpu_set(smp_processor_id(), v->domain->cpumask);
     1.8 +    cpu_set(smp_processor_id(), v->domain->domain_dirty_cpumask);
     1.9 +    cpu_set(smp_processor_id(), v->vcpu_dirty_cpumask);
    1.10      v->arch.schedule_tail = continue_idle_domain;
    1.11  
    1.12      reset_stack_and_jump(idle_loop);
    1.13 @@ -724,7 +725,8 @@ static void __context_switch(void)
    1.14      }
    1.15  
    1.16      if ( p->domain != n->domain )
    1.17 -        cpu_set(cpu, n->domain->cpumask);
    1.18 +        cpu_set(cpu, n->domain->domain_dirty_cpumask);
    1.19 +    cpu_set(cpu, n->vcpu_dirty_cpumask);
    1.20  
    1.21      write_ptbase(n);
    1.22  
    1.23 @@ -737,7 +739,8 @@ static void __context_switch(void)
    1.24      }
    1.25  
    1.26      if ( p->domain != n->domain )
    1.27 -        cpu_clear(cpu, p->domain->cpumask);
    1.28 +        cpu_clear(cpu, p->domain->domain_dirty_cpumask);
    1.29 +    cpu_clear(cpu, n->vcpu_dirty_cpumask);
    1.30  
    1.31      percpu_ctxt[cpu].curr_vcpu = n;
    1.32  }
    1.33 @@ -812,20 +815,11 @@ int __sync_lazy_execstate(void)
    1.34  
    1.35  void sync_vcpu_execstate(struct vcpu *v)
    1.36  {
    1.37 -    unsigned int cpu = v->processor;
    1.38 -
    1.39 -    if ( !cpu_isset(cpu, v->domain->cpumask) )
    1.40 -        return;
    1.41 +    if ( cpu_isset(smp_processor_id(), v->vcpu_dirty_cpumask) )
    1.42 +        (void)__sync_lazy_execstate();
    1.43  
    1.44 -    if ( cpu == smp_processor_id() )
    1.45 -    {
    1.46 -        (void)__sync_lazy_execstate();
    1.47 -    }
    1.48 -    else
    1.49 -    {
    1.50 -        /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
    1.51 -        flush_tlb_mask(cpumask_of_cpu(cpu));
    1.52 -    }
    1.53 +    /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
    1.54 +    flush_tlb_mask(v->vcpu_dirty_cpumask);
    1.55  }
    1.56  
    1.57  unsigned long __hypercall_create_continuation(
    1.58 @@ -951,7 +945,7 @@ void domain_relinquish_resources(struct 
    1.59      struct vcpu *v;
    1.60      unsigned long pfn;
    1.61  
    1.62 -    BUG_ON(!cpus_empty(d->cpumask));
    1.63 +    BUG_ON(!cpus_empty(d->domain_dirty_cpumask));
    1.64  
    1.65      ptwr_destroy(d);
    1.66  
     2.1 --- a/xen/arch/x86/mm.c	Fri Jan 06 16:47:25 2006 +0100
     2.2 +++ b/xen/arch/x86/mm.c	Fri Jan 06 17:45:31 2006 +0100
     2.3 @@ -1457,7 +1457,8 @@ int get_page_type(struct pfn_info *page,
     2.4                       * was GDT/LDT) but those circumstances should be
     2.5                       * very rare.
     2.6                       */
     2.7 -                    cpumask_t mask = page_get_owner(page)->cpumask;
     2.8 +                    cpumask_t mask =
     2.9 +                        page_get_owner(page)->domain_dirty_cpumask;
    2.10                      tlbflush_filter(mask, page->tlbflush_timestamp);
    2.11  
    2.12                      if ( unlikely(!cpus_empty(mask)) )
    2.13 @@ -1619,7 +1620,7 @@ static void process_deferred_ops(unsigne
    2.14          if ( shadow_mode_enabled(d) )
    2.15              shadow_sync_all(d);
    2.16          if ( deferred_ops & DOP_FLUSH_ALL_TLBS )
    2.17 -            flush_tlb_mask(d->cpumask);
    2.18 +            flush_tlb_mask(d->domain_dirty_cpumask);
    2.19          else
    2.20              local_flush_tlb();
    2.21      }
    2.22 @@ -1691,7 +1692,7 @@ static inline cpumask_t vcpumask_to_pcpu
    2.23      struct domain *d, unsigned long vmask)
    2.24  {
    2.25      unsigned int vcpu_id;
    2.26 -    cpumask_t    pmask;
    2.27 +    cpumask_t    pmask = CPU_MASK_NONE;
    2.28      struct vcpu *v;
    2.29  
    2.30      while ( vmask != 0 )
    2.31 @@ -1700,7 +1701,7 @@ static inline cpumask_t vcpumask_to_pcpu
    2.32          vmask &= ~(1UL << vcpu_id);
    2.33          if ( (vcpu_id < MAX_VIRT_CPUS) &&
    2.34               ((v = d->vcpu[vcpu_id]) != NULL) )
    2.35 -            cpu_set(v->processor, pmask);
    2.36 +            cpus_or(pmask, pmask, v->vcpu_dirty_cpumask);
    2.37      }
    2.38  
    2.39      return pmask;
    2.40 @@ -1869,7 +1870,6 @@ int do_mmuext_op(
    2.41                  break;
    2.42              }
    2.43              pmask = vcpumask_to_pcpumask(d, vmask);
    2.44 -            cpus_and(pmask, pmask, d->cpumask);
    2.45              if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
    2.46                  flush_tlb_mask(pmask);
    2.47              else
    2.48 @@ -1878,11 +1878,11 @@ int do_mmuext_op(
    2.49          }
    2.50  
    2.51          case MMUEXT_TLB_FLUSH_ALL:
    2.52 -            flush_tlb_mask(d->cpumask);
    2.53 +            flush_tlb_mask(d->domain_dirty_cpumask);
    2.54              break;
    2.55      
    2.56          case MMUEXT_INVLPG_ALL:
    2.57 -            flush_tlb_one_mask(d->cpumask, op.arg1.linear_addr);
    2.58 +            flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr);
    2.59              break;
    2.60  
    2.61          case MMUEXT_FLUSH_CACHE:
    2.62 @@ -2548,13 +2548,12 @@ int do_update_va_mapping(unsigned long v
    2.63              local_flush_tlb();
    2.64              break;
    2.65          case UVMF_ALL:
    2.66 -            flush_tlb_mask(d->cpumask);
    2.67 +            flush_tlb_mask(d->domain_dirty_cpumask);
    2.68              break;
    2.69          default:
    2.70              if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) )
    2.71                  rc = -EFAULT;
    2.72              pmask = vcpumask_to_pcpumask(d, vmask);
    2.73 -            cpus_and(pmask, pmask, d->cpumask);
    2.74              flush_tlb_mask(pmask);
    2.75              break;
    2.76          }
    2.77 @@ -2569,13 +2568,12 @@ int do_update_va_mapping(unsigned long v
    2.78              local_flush_tlb_one(va);
    2.79              break;
    2.80          case UVMF_ALL:
    2.81 -            flush_tlb_one_mask(d->cpumask, va);
    2.82 +            flush_tlb_one_mask(d->domain_dirty_cpumask, va);
    2.83              break;
    2.84          default:
    2.85              if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) )
    2.86                  rc = -EFAULT;
    2.87              pmask = vcpumask_to_pcpumask(d, vmask);
    2.88 -            cpus_and(pmask, pmask, d->cpumask);
    2.89              flush_tlb_one_mask(pmask, va);
    2.90              break;
    2.91          }
    2.92 @@ -3018,7 +3016,7 @@ void ptwr_flush(struct domain *d, const 
    2.93  
    2.94      /* Ensure that there are no stale writable mappings in any TLB. */
    2.95      /* NB. INVLPG is a serialising instruction: flushes pending updates. */
    2.96 -    flush_tlb_one_mask(d->cpumask, l1va);
    2.97 +    flush_tlb_one_mask(d->domain_dirty_cpumask, l1va);
    2.98      PTWR_PRINTK("[%c] disconnected_l1va at %p now %"PRIpte"\n",
    2.99                  PTWR_PRINT_WHICH, ptep, pte.l1);
   2.100  
   2.101 @@ -3342,7 +3340,7 @@ int ptwr_do_page_fault(struct domain *d,
   2.102      if ( which == PTWR_PT_ACTIVE )
   2.103      {
   2.104          l2e_remove_flags(*pl2e, _PAGE_PRESENT);
   2.105 -        flush_tlb_mask(d->cpumask);
   2.106 +        flush_tlb_mask(d->domain_dirty_cpumask);
   2.107      }
   2.108      
   2.109      /* Temporarily map the L1 page, and make a copy of it. */
     3.1 --- a/xen/arch/x86/shadow.c	Fri Jan 06 16:47:25 2006 +0100
     3.2 +++ b/xen/arch/x86/shadow.c	Fri Jan 06 17:45:31 2006 +0100
     3.3 @@ -1800,7 +1800,7 @@ static void sync_all(struct domain *d)
     3.4      }
     3.5  
     3.6      /* Other VCPUs mustn't use the revoked writable mappings. */
     3.7 -    other_vcpus_mask = d->cpumask;
     3.8 +    other_vcpus_mask = d->domain_dirty_cpumask;
     3.9      cpu_clear(smp_processor_id(), other_vcpus_mask);
    3.10      flush_tlb_mask(other_vcpus_mask);
    3.11  
     4.1 --- a/xen/arch/x86/shadow32.c	Fri Jan 06 16:47:25 2006 +0100
     4.2 +++ b/xen/arch/x86/shadow32.c	Fri Jan 06 17:45:31 2006 +0100
     4.3 @@ -2586,7 +2586,7 @@ void __shadow_sync_all(struct domain *d)
     4.4      }
     4.5  
     4.6      /* Other VCPUs mustn't use the revoked writable mappings. */
     4.7 -    other_vcpus_mask = d->cpumask;
     4.8 +    other_vcpus_mask = d->domain_dirty_cpumask;
     4.9      cpu_clear(smp_processor_id(), other_vcpus_mask);
    4.10      flush_tlb_mask(other_vcpus_mask);
    4.11  
     5.1 --- a/xen/common/domain.c	Fri Jan 06 16:47:25 2006 +0100
     5.2 +++ b/xen/common/domain.c	Fri Jan 06 17:45:31 2006 +0100
     5.3 @@ -179,7 +179,7 @@ static void domain_shutdown_finalise(voi
     5.4      /* Make sure that every vcpu is descheduled before we finalise. */
     5.5      for_each_vcpu ( d, v )
     5.6          vcpu_sleep_sync(v);
     5.7 -    BUG_ON(!cpus_empty(d->cpumask));
     5.8 +    BUG_ON(!cpus_empty(d->domain_dirty_cpumask));
     5.9  
    5.10      sync_pagetable_state(d);
    5.11  
     6.1 --- a/xen/common/grant_table.c	Fri Jan 06 16:47:25 2006 +0100
     6.2 +++ b/xen/common/grant_table.c	Fri Jan 06 17:45:31 2006 +0100
     6.3 @@ -471,7 +471,7 @@ gnttab_unmap_grant_ref(
     6.4      for ( i = 0; i < count; i++ )
     6.5          (void)__gnttab_unmap_grant_ref(&uop[i]);
     6.6  
     6.7 -    flush_tlb_mask(current->domain->cpumask);
     6.8 +    flush_tlb_mask(current->domain->domain_dirty_cpumask);
     6.9  
    6.10      return 0;
    6.11  }
     7.1 --- a/xen/common/page_alloc.c	Fri Jan 06 16:47:25 2006 +0100
     7.2 +++ b/xen/common/page_alloc.c	Fri Jan 06 17:45:31 2006 +0100
     7.3 @@ -615,7 +615,7 @@ void free_domheap_pages(struct pfn_info 
     7.4              shadow_drop_references(d, &pg[i]);
     7.5              ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
     7.6              pg[i].tlbflush_timestamp  = tlbflush_current_time();
     7.7 -            pg[i].u.free.cpumask      = d->cpumask;
     7.8 +            pg[i].u.free.cpumask      = d->domain_dirty_cpumask;
     7.9              list_del(&pg[i].list);
    7.10          }
    7.11  
     8.1 --- a/xen/common/schedule.c	Fri Jan 06 16:47:25 2006 +0100
     8.2 +++ b/xen/common/schedule.c	Fri Jan 06 17:45:31 2006 +0100
     8.3 @@ -339,18 +339,23 @@ long sched_adjdom(struct sched_adjdom_cm
     8.4      do {
     8.5          succ = 0;
     8.6          __clear_cpu_bits(have_lock);
     8.7 -        for_each_vcpu(d, v) {
     8.8 +        for_each_vcpu ( d, v )
     8.9 +        {
    8.10              cpu = v->processor;
    8.11 -            if (!__get_cpu_bit(cpu, have_lock)) {
    8.12 +            if ( !__get_cpu_bit(cpu, have_lock) )
    8.13 +            {
    8.14                  /* if we don't have a lock on this CPU: acquire it*/
    8.15 -                if (spin_trylock(&schedule_data[cpu].schedule_lock)) {
    8.16 +                if ( spin_trylock(&schedule_data[cpu].schedule_lock) )
    8.17 +                {
    8.18                      /*we have this lock!*/
    8.19                      __set_cpu_bit(cpu, have_lock);
    8.20                      succ = 1;
    8.21 -                } else {
    8.22 +                }
    8.23 +                else
    8.24 +                {
    8.25                      /*we didn,t get this lock -> free all other locks too!*/
    8.26 -                    for (cpu = 0; cpu < NR_CPUS; cpu++)
    8.27 -                        if (__get_cpu_bit(cpu, have_lock))
    8.28 +                    for ( cpu = 0; cpu < NR_CPUS; cpu++ )
    8.29 +                        if ( __get_cpu_bit(cpu, have_lock) )
    8.30                              spin_unlock(&schedule_data[cpu].schedule_lock);
    8.31                      /* and start from the beginning! */
    8.32                      succ = 0;
    8.33 @@ -363,8 +368,8 @@ long sched_adjdom(struct sched_adjdom_cm
    8.34  
    8.35      SCHED_OP(adjdom, d, cmd);
    8.36  
    8.37 -    for (cpu = 0; cpu < NR_CPUS; cpu++)
    8.38 -        if (__get_cpu_bit(cpu, have_lock))
    8.39 +    for ( cpu = 0; cpu < NR_CPUS; cpu++ )
    8.40 +        if ( __get_cpu_bit(cpu, have_lock) )
    8.41              spin_unlock(&schedule_data[cpu].schedule_lock);
    8.42      __clear_cpu_bits(have_lock);
    8.43  
    8.44 @@ -380,8 +385,8 @@ long sched_adjdom(struct sched_adjdom_cm
    8.45   */
    8.46  static void __enter_scheduler(void)
    8.47  {
    8.48 -    struct vcpu *prev = current, *next = NULL;
    8.49 -    int                 cpu = prev->processor;
    8.50 +    struct vcpu        *prev = current, *next = NULL;
    8.51 +    int                 cpu = smp_processor_id();
    8.52      s_time_t            now;
    8.53      struct task_slice   next_slice;
    8.54      s32                 r_time;     /* time for new dom to run */
    8.55 @@ -502,7 +507,7 @@ static void s_timer_fn(void *unused)
    8.56  static void t_timer_fn(void *unused)
    8.57  {
    8.58      struct vcpu  *v  = current;
    8.59 -    unsigned int  cpu = v->processor;
    8.60 +    unsigned int  cpu = smp_processor_id();
    8.61  
    8.62      schedule_data[cpu].tick++;
    8.63  
     9.1 --- a/xen/include/asm-x86/processor.h	Fri Jan 06 16:47:25 2006 +0100
     9.2 +++ b/xen/include/asm-x86/processor.h	Fri Jan 06 17:45:31 2006 +0100
     9.3 @@ -190,7 +190,7 @@ extern void dodgy_tsc(void);
     9.4  #ifdef CONFIG_X86_HT
     9.5  extern void detect_ht(struct cpuinfo_x86 *c);
     9.6  #else
     9.7 -static inline void detect_ht(struct cpuinfo_x86 *c) {}
     9.8 +static always_inline void detect_ht(struct cpuinfo_x86 *c) {}
     9.9  #endif
    9.10  
    9.11  /*
    9.12 @@ -209,7 +209,7 @@ static inline void detect_ht(struct cpui
    9.13  /*
    9.14   * CPUID functions returning a single datum
    9.15   */
    9.16 -static inline unsigned int cpuid_eax(unsigned int op)
    9.17 +static always_inline unsigned int cpuid_eax(unsigned int op)
    9.18  {
    9.19      unsigned int eax;
    9.20  
    9.21 @@ -219,7 +219,7 @@ static inline unsigned int cpuid_eax(uns
    9.22              : "bx", "cx", "dx");
    9.23      return eax;
    9.24  }
    9.25 -static inline unsigned int cpuid_ebx(unsigned int op)
    9.26 +static always_inline unsigned int cpuid_ebx(unsigned int op)
    9.27  {
    9.28      unsigned int eax, ebx;
    9.29  
    9.30 @@ -229,7 +229,7 @@ static inline unsigned int cpuid_ebx(uns
    9.31              : "cx", "dx" );
    9.32      return ebx;
    9.33  }
    9.34 -static inline unsigned int cpuid_ecx(unsigned int op)
    9.35 +static always_inline unsigned int cpuid_ecx(unsigned int op)
    9.36  {
    9.37      unsigned int eax, ecx;
    9.38  
    9.39 @@ -239,7 +239,7 @@ static inline unsigned int cpuid_ecx(uns
    9.40              : "bx", "dx" );
    9.41      return ecx;
    9.42  }
    9.43 -static inline unsigned int cpuid_edx(unsigned int op)
    9.44 +static always_inline unsigned int cpuid_edx(unsigned int op)
    9.45  {
    9.46      unsigned int eax, edx;
    9.47  
    9.48 @@ -281,7 +281,7 @@ static inline unsigned int cpuid_edx(uns
    9.49   */
    9.50  extern unsigned long mmu_cr4_features;
    9.51  
    9.52 -static inline void set_in_cr4 (unsigned long mask)
    9.53 +static always_inline void set_in_cr4 (unsigned long mask)
    9.54  {
    9.55      unsigned long dummy;
    9.56      mmu_cr4_features |= mask;
    9.57 @@ -292,7 +292,7 @@ static inline void set_in_cr4 (unsigned 
    9.58          : "=&r" (dummy) : "irg" (mask) );
    9.59  }
    9.60  
    9.61 -static inline void clear_in_cr4 (unsigned long mask)
    9.62 +static always_inline void clear_in_cr4 (unsigned long mask)
    9.63  {
    9.64      unsigned long dummy;
    9.65      mmu_cr4_features &= ~mask;
    9.66 @@ -334,7 +334,7 @@ static inline void clear_in_cr4 (unsigne
    9.67  	outb((data), 0x23); \
    9.68  } while (0)
    9.69  
    9.70 -static inline void __monitor(const void *eax, unsigned long ecx,
    9.71 +static always_inline void __monitor(const void *eax, unsigned long ecx,
    9.72  		unsigned long edx)
    9.73  {
    9.74  	/* "monitor %eax,%ecx,%edx;" */
    9.75 @@ -343,7 +343,7 @@ static inline void __monitor(const void 
    9.76  		: :"a" (eax), "c" (ecx), "d"(edx));
    9.77  }
    9.78  
    9.79 -static inline void __mwait(unsigned long eax, unsigned long ecx)
    9.80 +static always_inline void __mwait(unsigned long eax, unsigned long ecx)
    9.81  {
    9.82  	/* "mwait %eax,%ecx;" */
    9.83  	asm volatile(
    9.84 @@ -460,7 +460,7 @@ struct extended_sigtable {
    9.85  };
    9.86  
    9.87  /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
    9.88 -static inline void rep_nop(void)
    9.89 +static always_inline void rep_nop(void)
    9.90  {
    9.91      __asm__ __volatile__ ( "rep;nop" : : : "memory" );
    9.92  }
    9.93 @@ -471,7 +471,7 @@ static inline void rep_nop(void)
    9.94  #ifdef 	CONFIG_MPENTIUMIII
    9.95  
    9.96  #define ARCH_HAS_PREFETCH
    9.97 -extern inline void prefetch(const void *x)
    9.98 +extern always_inline void prefetch(const void *x)
    9.99  {
   9.100      __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
   9.101  }
   9.102 @@ -482,12 +482,12 @@ extern inline void prefetch(const void *
   9.103  #define ARCH_HAS_PREFETCHW
   9.104  #define ARCH_HAS_SPINLOCK_PREFETCH
   9.105  
   9.106 -extern inline void prefetch(const void *x)
   9.107 +extern always_inline void prefetch(const void *x)
   9.108  {
   9.109      __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
   9.110  }
   9.111  
   9.112 -extern inline void prefetchw(const void *x)
   9.113 +extern always_inline void prefetchw(const void *x)
   9.114  {
   9.115      __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
   9.116  }
    10.1 --- a/xen/include/asm-x86/shadow.h	Fri Jan 06 16:47:25 2006 +0100
    10.2 +++ b/xen/include/asm-x86/shadow.h	Fri Jan 06 17:45:31 2006 +0100
    10.3 @@ -591,7 +591,7 @@ update_hl2e(struct vcpu *v, unsigned lon
    10.4          if ( need_flush )
    10.5          {
    10.6              perfc_incrc(update_hl2e_invlpg);
    10.7 -            flush_tlb_one_mask(v->domain->cpumask,
    10.8 +            flush_tlb_one_mask(v->domain->domain_dirty_cpumask,
    10.9                                 &linear_pg_table[l1_linear_offset(va)]);
   10.10          }
   10.11      }
    11.1 --- a/xen/include/xen/sched.h	Fri Jan 06 16:47:25 2006 +0100
    11.2 +++ b/xen/include/xen/sched.h	Fri Jan 06 17:45:31 2006 +0100
    11.3 @@ -78,8 +78,12 @@ struct vcpu
    11.4  
    11.5      atomic_t         pausecnt;
    11.6  
    11.7 +    /* Bitmask of CPUs on which this VCPU may run. */
    11.8      cpumask_t        cpu_affinity;
    11.9  
   11.10 +    /* Bitmask of CPUs which are holding onto this VCPU's state. */
   11.11 +    cpumask_t        vcpu_dirty_cpumask;
   11.12 +
   11.13      struct arch_vcpu arch;
   11.14  };
   11.15  
   11.16 @@ -139,7 +143,7 @@ struct domain
   11.17      struct vcpu *vcpu[MAX_VIRT_CPUS];
   11.18  
   11.19      /* Bitmask of CPUs which are holding onto this domain's state. */
   11.20 -    cpumask_t        cpumask;
   11.21 +    cpumask_t        domain_dirty_cpumask;
   11.22  
   11.23      struct arch_domain arch;
   11.24