ia64/xen-unstable

changeset 3677:d93748c50893

bitkeeper revision 1.1159.212.100 (42050e5fWLAKCQAvoZ3CPmyAaL-51g)

Reorganise 'struct domain' and 'struct exec_domain' to each have an
architecture-specific portion. Removed 'mm_struct'.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@viper.(none)
date Sat Feb 05 18:20:15 2005 +0000 (2005-02-05)
parents 4ba67049f771
children 87ad759770a2
files xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/i387.c xen/arch/x86/idle0_task.c xen/arch/x86/memory.c xen/arch/x86/setup.c xen/arch/x86/shadow.c xen/arch/x86/smpboot.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/domain_build.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/seg_fixup.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_64/domain_build.c xen/arch/x86/x86_64/mm.c xen/common/domain.c xen/common/physdev.c xen/include/asm-x86/domain.h xen/include/asm-x86/ldt.h xen/include/asm-x86/processor.h xen/include/asm-x86/shadow.h xen/include/asm-x86/vmx_vmcs.h xen/include/asm-x86/x86_32/current.h xen/include/asm-x86/x86_64/current.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/x86/dom0_ops.c	Sat Feb 05 15:42:48 2005 +0000
     1.2 +++ b/xen/arch/x86/dom0_ops.c	Sat Feb 05 18:20:15 2005 +0000
     1.3 @@ -340,49 +340,50 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
     1.4      return ret;
     1.5  }
     1.6  
     1.7 -void arch_getdomaininfo_ctxt(struct exec_domain *d, full_execution_context_t *c)
     1.8 +void arch_getdomaininfo_ctxt(
     1.9 +    struct exec_domain *ed, full_execution_context_t *c)
    1.10  { 
    1.11      int i;
    1.12  
    1.13      c->flags = 0;
    1.14      memcpy(&c->cpu_ctxt, 
    1.15 -           &d->thread.user_ctxt,
    1.16 -           sizeof(d->thread.user_ctxt));
    1.17 -    if ( test_bit(EDF_DONEFPUINIT, &d->ed_flags) )
    1.18 +           &ed->arch.user_ctxt,
    1.19 +           sizeof(ed->arch.user_ctxt));
    1.20 +    if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
    1.21          c->flags |= ECF_I387_VALID;
    1.22      memcpy(&c->fpu_ctxt,
    1.23 -           &d->thread.i387,
    1.24 -           sizeof(d->thread.i387));
    1.25 +           &ed->arch.i387,
    1.26 +           sizeof(ed->arch.i387));
    1.27      memcpy(&c->trap_ctxt,
    1.28 -           d->thread.traps,
    1.29 -           sizeof(d->thread.traps));
    1.30 +           ed->arch.traps,
    1.31 +           sizeof(ed->arch.traps));
    1.32  #ifdef ARCH_HAS_FAST_TRAP
    1.33 -    if ( (d->thread.fast_trap_desc.a == 0) &&
    1.34 -         (d->thread.fast_trap_desc.b == 0) )
    1.35 +    if ( (ed->arch.fast_trap_desc.a == 0) &&
    1.36 +         (ed->arch.fast_trap_desc.b == 0) )
    1.37          c->fast_trap_idx = 0;
    1.38      else
    1.39          c->fast_trap_idx = 
    1.40 -            d->thread.fast_trap_idx;
    1.41 +            ed->arch.fast_trap_idx;
    1.42  #endif
    1.43 -    c->ldt_base = d->mm.ldt_base;
    1.44 -    c->ldt_ents = d->mm.ldt_ents;
    1.45 +    c->ldt_base = ed->arch.ldt_base;
    1.46 +    c->ldt_ents = ed->arch.ldt_ents;
    1.47      c->gdt_ents = 0;
    1.48 -    if ( GET_GDT_ADDRESS(d) == GDT_VIRT_START(d) )
    1.49 +    if ( GET_GDT_ADDRESS(ed) == GDT_VIRT_START(ed) )
    1.50      {
    1.51          for ( i = 0; i < 16; i++ )
    1.52              c->gdt_frames[i] = 
    1.53 -                l1_pgentry_to_pagenr(d->mm.perdomain_ptes[i]);
    1.54 -        c->gdt_ents = GET_GDT_ENTRIES(d);
    1.55 +                l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[i]);
    1.56 +        c->gdt_ents = GET_GDT_ENTRIES(ed);
    1.57      }
    1.58 -    c->guestos_ss  = d->thread.guestos_ss;
    1.59 -    c->guestos_esp = d->thread.guestos_sp;
    1.60 +    c->guestos_ss  = ed->arch.guestos_ss;
    1.61 +    c->guestos_esp = ed->arch.guestos_sp;
    1.62      c->pt_base   = 
    1.63 -        pagetable_val(d->mm.pagetable);
    1.64 +        pagetable_val(ed->arch.pagetable);
    1.65      memcpy(c->debugreg, 
    1.66 -           d->thread.debugreg, 
    1.67 -           sizeof(d->thread.debugreg));
    1.68 -    c->event_callback_cs     = d->thread.event_selector;
    1.69 -    c->event_callback_eip    = d->thread.event_address;
    1.70 -    c->failsafe_callback_cs  = d->thread.failsafe_selector;
    1.71 -    c->failsafe_callback_eip = d->thread.failsafe_address;
    1.72 +           ed->arch.debugreg, 
    1.73 +           sizeof(ed->arch.debugreg));
    1.74 +    c->event_callback_cs     = ed->arch.event_selector;
    1.75 +    c->event_callback_eip    = ed->arch.event_address;
    1.76 +    c->failsafe_callback_cs  = ed->arch.failsafe_selector;
    1.77 +    c->failsafe_callback_eip = ed->arch.failsafe_address;
    1.78  }
     2.1 --- a/xen/arch/x86/domain.c	Sat Feb 05 15:42:48 2005 +0000
     2.2 +++ b/xen/arch/x86/domain.c	Sat Feb 05 18:20:15 2005 +0000
     2.3 @@ -1,3 +1,4 @@
     2.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     2.5  /******************************************************************************
     2.6   * arch/x86/domain.c
     2.7   * 
     2.8 @@ -231,7 +232,7 @@ void arch_free_exec_domain_struct(struct
     2.9  
    2.10  void free_perdomain_pt(struct domain *d)
    2.11  {
    2.12 -    free_xenheap_page((unsigned long)d->mm_perdomain_pt);
    2.13 +    free_xenheap_page((unsigned long)d->arch.mm_perdomain_pt);
    2.14  }
    2.15  
    2.16  static void continue_idle_task(struct exec_domain *ed)
    2.17 @@ -248,15 +249,15 @@ void arch_do_createdomain(struct exec_do
    2.18  {
    2.19      struct domain *d = ed->domain;
    2.20  
    2.21 -    SET_DEFAULT_FAST_TRAP(&ed->thread);
    2.22 +    SET_DEFAULT_FAST_TRAP(&ed->arch);
    2.23  
    2.24      if ( d->id == IDLE_DOMAIN_ID )
    2.25      {
    2.26 -        ed->thread.schedule_tail = continue_idle_task;
    2.27 +        ed->arch.schedule_tail = continue_idle_task;
    2.28      }
    2.29      else
    2.30      {
    2.31 -        ed->thread.schedule_tail = continue_nonidle_task;
    2.32 +        ed->arch.schedule_tail = continue_nonidle_task;
    2.33  
    2.34          d->shared_info = (void *)alloc_xenheap_page();
    2.35          memset(d->shared_info, 0, PAGE_SIZE);
    2.36 @@ -265,36 +266,37 @@ void arch_do_createdomain(struct exec_do
    2.37          machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 
    2.38                                 PAGE_SHIFT] = INVALID_P2M_ENTRY;
    2.39  
    2.40 -        d->mm_perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
    2.41 -        memset(d->mm_perdomain_pt, 0, PAGE_SIZE);
    2.42 -        machine_to_phys_mapping[virt_to_phys(d->mm_perdomain_pt) >> 
    2.43 +        d->arch.mm_perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
    2.44 +        memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
    2.45 +        machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> 
    2.46                                 PAGE_SHIFT] = INVALID_P2M_ENTRY;
    2.47 -        ed->mm.perdomain_ptes = d->mm_perdomain_pt;
    2.48 +        ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
    2.49      }
    2.50  }
    2.51  
    2.52  void arch_do_boot_vcpu(struct exec_domain *ed)
    2.53  {
    2.54      struct domain *d = ed->domain;
    2.55 -    ed->thread.schedule_tail = d->exec_domain[0]->thread.schedule_tail;
    2.56 -    ed->mm.perdomain_ptes = d->mm_perdomain_pt + (ed->eid << PDPT_VCPU_SHIFT);
    2.57 +    ed->arch.schedule_tail = d->exec_domain[0]->arch.schedule_tail;
    2.58 +    ed->arch.perdomain_ptes = 
    2.59 +        d->arch.mm_perdomain_pt + (ed->eid << PDPT_VCPU_SHIFT);
    2.60  }
    2.61  
    2.62  #ifdef CONFIG_VMX
    2.63  void arch_vmx_do_resume(struct exec_domain *ed) 
    2.64  {
    2.65 -    u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->thread.arch_vmx.vmcs);
    2.66 +    u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->arch.arch_vmx.vmcs);
    2.67  
    2.68 -    load_vmcs(&ed->thread.arch_vmx, vmcs_phys_ptr);
    2.69 +    load_vmcs(&ed->arch.arch_vmx, vmcs_phys_ptr);
    2.70      vmx_do_resume(ed);
    2.71      reset_stack_and_jump(vmx_asm_do_resume);
    2.72  }
    2.73  
    2.74  void arch_vmx_do_launch(struct exec_domain *ed) 
    2.75  {
    2.76 -    u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->thread.arch_vmx.vmcs);
    2.77 +    u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->arch.arch_vmx.vmcs);
    2.78  
    2.79 -    load_vmcs(&ed->thread.arch_vmx, vmcs_phys_ptr);
    2.80 +    load_vmcs(&ed->arch.arch_vmx, vmcs_phys_ptr);
    2.81      vmx_do_launch(ed);
    2.82      reset_stack_and_jump(vmx_asm_do_launch);
    2.83  }
    2.84 @@ -304,7 +306,6 @@ static void monitor_mk_pagetable(struct 
    2.85      unsigned long mpfn;
    2.86      l2_pgentry_t *mpl2e;
    2.87      struct pfn_info *mpfn_info;
    2.88 -    struct mm_struct *m = &ed->mm;
    2.89      struct domain *d = ed->domain;
    2.90  
    2.91      mpfn_info = alloc_domheap_page(NULL);
    2.92 @@ -318,11 +319,11 @@ static void monitor_mk_pagetable(struct 
    2.93             &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
    2.94             HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
    2.95  
    2.96 -    m->monitor_table = mk_pagetable(mpfn << L1_PAGETABLE_SHIFT);
    2.97 -    m->shadow_mode = SHM_full_32;
    2.98 +    ed->arch.monitor_table = mk_pagetable(mpfn << L1_PAGETABLE_SHIFT);
    2.99 +    d->arch.shadow_mode = SHM_full_32;
   2.100  
   2.101      mpl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
   2.102 -        mk_l2_pgentry((__pa(d->mm_perdomain_pt) & PAGE_MASK) 
   2.103 +        mk_l2_pgentry((__pa(d->arch.mm_perdomain_pt) & PAGE_MASK) 
   2.104                        | __PAGE_HYPERVISOR);
   2.105  
   2.106      unmap_domain_mem(mpl2e);
   2.107 @@ -333,13 +334,12 @@ static void monitor_mk_pagetable(struct 
   2.108   */
   2.109  static void monitor_rm_pagetable(struct exec_domain *ed)
   2.110  {
   2.111 -    struct mm_struct *m = &ed->mm;
   2.112      l2_pgentry_t *mpl2e;
   2.113      unsigned long mpfn;
   2.114  
   2.115 -    ASSERT( pagetable_val(m->monitor_table) );
   2.116 +    ASSERT( pagetable_val(ed->arch.monitor_table) );
   2.117      
   2.118 -    mpl2e = (l2_pgentry_t *) map_domain_mem(pagetable_val(m->monitor_table));
   2.119 +    mpl2e = (l2_pgentry_t *) map_domain_mem(pagetable_val(ed->arch.monitor_table));
   2.120      /*
   2.121       * First get the pfn for guest_pl2e_cache by looking at monitor_table
   2.122       */
   2.123 @@ -352,10 +352,10 @@ static void monitor_rm_pagetable(struct 
   2.124      /*
   2.125       * Then free monitor_table.
   2.126       */
   2.127 -    mpfn = (pagetable_val(m->monitor_table)) >> PAGE_SHIFT;
   2.128 +    mpfn = (pagetable_val(ed->arch.monitor_table)) >> PAGE_SHIFT;
   2.129      free_domheap_page(&frame_table[mpfn]);
   2.130  
   2.131 -    m->monitor_table = mk_pagetable(0);
   2.132 +    ed->arch.monitor_table = mk_pagetable(0);
   2.133  }
   2.134  
   2.135  static int vmx_final_setup_guestos(struct exec_domain *ed,
   2.136 @@ -375,21 +375,21 @@ static int vmx_final_setup_guestos(struc
   2.137          return -ENOMEM;
   2.138      }
   2.139  
   2.140 -    memset(&ed->thread.arch_vmx, 0, sizeof (struct arch_vmx_struct));
   2.141 +    memset(&ed->arch.arch_vmx, 0, sizeof (struct arch_vmx_struct));
   2.142  
   2.143 -    ed->thread.arch_vmx.vmcs = vmcs;
   2.144 -    error = construct_vmcs(&ed->thread.arch_vmx, context, full_context, VMCS_USE_HOST_ENV);
   2.145 +    ed->arch.arch_vmx.vmcs = vmcs;
   2.146 +    error = construct_vmcs(&ed->arch.arch_vmx, context, full_context, VMCS_USE_HOST_ENV);
   2.147      if (error < 0) {
   2.148          printk("Failed to construct a new VMCS\n");
   2.149          goto out;
   2.150      }
   2.151  
   2.152      monitor_mk_pagetable(ed);
   2.153 -    ed->thread.schedule_tail = arch_vmx_do_launch;
   2.154 -    clear_bit(VMX_CPU_STATE_PG_ENABLED, &ed->thread.arch_vmx.cpu_state);
   2.155 +    ed->arch.schedule_tail = arch_vmx_do_launch;
   2.156 +    clear_bit(VMX_CPU_STATE_PG_ENABLED, &ed->arch.arch_vmx.cpu_state);
   2.157  
   2.158  #if defined (__i386)
   2.159 -    ed->thread.arch_vmx.vmx_platform.real_mode_data = 
   2.160 +    ed->arch.arch_vmx.vmx_platform.real_mode_data = 
   2.161          (unsigned long *) context->esi;
   2.162  #endif
   2.163  
   2.164 @@ -406,12 +406,13 @@ static int vmx_final_setup_guestos(struc
   2.165  
   2.166  out:
   2.167      free_vmcs(vmcs);
   2.168 -    ed->thread.arch_vmx.vmcs = 0;
   2.169 +    ed->arch.arch_vmx.vmcs = 0;
   2.170      return error;
   2.171  }
   2.172  #endif
   2.173  
   2.174 -int arch_final_setup_guestos(struct exec_domain *d, full_execution_context_t *c)
   2.175 +int arch_final_setup_guestos(
   2.176 +    struct exec_domain *d, full_execution_context_t *c)
   2.177  {
   2.178      unsigned long phys_basetab;
   2.179      int i, rc;
   2.180 @@ -420,13 +421,13 @@ int arch_final_setup_guestos(struct exec
   2.181      if ( c->flags & ECF_I387_VALID )
   2.182          set_bit(EDF_DONEFPUINIT, &d->ed_flags);
   2.183  
   2.184 -    memcpy(&d->thread.user_ctxt,
   2.185 +    memcpy(&d->arch.user_ctxt,
   2.186             &c->cpu_ctxt,
   2.187 -           sizeof(d->thread.user_ctxt));
   2.188 +           sizeof(d->arch.user_ctxt));
   2.189  
   2.190      /* Clear IOPL for unprivileged domains. */
   2.191      if (!IS_PRIV(d->domain))
   2.192 -        d->thread.user_ctxt.eflags &= 0xffffcfff;
   2.193 +        d->arch.user_ctxt.eflags &= 0xffffcfff;
   2.194  
   2.195      /*
   2.196       * This is sufficient! If the descriptor DPL differs from CS RPL then we'll
   2.197 @@ -434,37 +435,37 @@ int arch_final_setup_guestos(struct exec
   2.198       * If SS RPL or DPL differs from CS RPL then we'll #GP.
   2.199       */
   2.200      if (!(c->flags & ECF_VMX_GUEST)) 
   2.201 -        if ( ((d->thread.user_ctxt.cs & 3) == 0) ||
   2.202 -             ((d->thread.user_ctxt.ss & 3) == 0) )
   2.203 +        if ( ((d->arch.user_ctxt.cs & 3) == 0) ||
   2.204 +             ((d->arch.user_ctxt.ss & 3) == 0) )
   2.205                  return -EINVAL;
   2.206  
   2.207 -    memcpy(&d->thread.i387,
   2.208 +    memcpy(&d->arch.i387,
   2.209             &c->fpu_ctxt,
   2.210 -           sizeof(d->thread.i387));
   2.211 +           sizeof(d->arch.i387));
   2.212  
   2.213 -    memcpy(d->thread.traps,
   2.214 +    memcpy(d->arch.traps,
   2.215             &c->trap_ctxt,
   2.216 -           sizeof(d->thread.traps));
   2.217 +           sizeof(d->arch.traps));
   2.218  
   2.219      if ( (rc = (int)set_fast_trap(d, c->fast_trap_idx)) != 0 )
   2.220          return rc;
   2.221  
   2.222 -    d->mm.ldt_base = c->ldt_base;
   2.223 -    d->mm.ldt_ents = c->ldt_ents;
   2.224 +    d->arch.ldt_base = c->ldt_base;
   2.225 +    d->arch.ldt_ents = c->ldt_ents;
   2.226  
   2.227 -    d->thread.guestos_ss = c->guestos_ss;
   2.228 -    d->thread.guestos_sp = c->guestos_esp;
   2.229 +    d->arch.guestos_ss = c->guestos_ss;
   2.230 +    d->arch.guestos_sp = c->guestos_esp;
   2.231  
   2.232      for ( i = 0; i < 8; i++ )
   2.233          (void)set_debugreg(d, i, c->debugreg[i]);
   2.234  
   2.235 -    d->thread.event_selector    = c->event_callback_cs;
   2.236 -    d->thread.event_address     = c->event_callback_eip;
   2.237 -    d->thread.failsafe_selector = c->failsafe_callback_cs;
   2.238 -    d->thread.failsafe_address  = c->failsafe_callback_eip;
   2.239 +    d->arch.event_selector    = c->event_callback_cs;
   2.240 +    d->arch.event_address     = c->event_callback_eip;
   2.241 +    d->arch.failsafe_selector = c->failsafe_callback_cs;
   2.242 +    d->arch.failsafe_address  = c->failsafe_callback_eip;
   2.243      
   2.244      phys_basetab = c->pt_base;
   2.245 -    d->mm.pagetable = mk_pagetable(phys_basetab);
   2.246 +    d->arch.pagetable = mk_pagetable(phys_basetab);
   2.247      if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d->domain, 
   2.248                              PGT_base_page_table) )
   2.249          return -EINVAL;
   2.250 @@ -494,7 +495,7 @@ void new_thread(struct exec_domain *d,
   2.251                  unsigned long start_stack,
   2.252                  unsigned long start_info)
   2.253  {
   2.254 -    execution_context_t *ec = &d->thread.user_ctxt;
   2.255 +    execution_context_t *ec = &d->arch.user_ctxt;
   2.256  
   2.257      /*
   2.258       * Initial register values:
   2.259 @@ -519,19 +520,18 @@ void new_thread(struct exec_domain *d,
   2.260  /*
   2.261   * This special macro can be used to load a debugging register
   2.262   */
   2.263 -#define loaddebug(thread,register) \
   2.264 -		__asm__("mov %0,%%db" #register  \
   2.265 +#define loaddebug(_ed,_reg) \
   2.266 +		__asm__("mov %0,%%db" #_reg  \
   2.267  			: /* no output */ \
   2.268 -			:"r" (thread->debugreg[register]))
   2.269 +			:"r" ((_ed)->debugreg[_reg]))
   2.270  
   2.271  void switch_to(struct exec_domain *prev_p, struct exec_domain *next_p)
   2.272  {
   2.273 -    struct thread_struct *next = &next_p->thread;
   2.274      struct tss_struct *tss = init_tss + smp_processor_id();
   2.275      execution_context_t *stack_ec = get_execution_context();
   2.276      int i;
   2.277  #ifdef CONFIG_VMX
   2.278 -    unsigned long vmx_domain = next_p->thread.arch_vmx.flags; 
   2.279 +    unsigned long vmx_domain = next_p->arch.arch_vmx.flags; 
   2.280  #endif
   2.281  
   2.282      __cli();
   2.283 @@ -539,73 +539,73 @@ void switch_to(struct exec_domain *prev_
   2.284      /* Switch guest general-register state. */
   2.285      if ( !is_idle_task(prev_p->domain) )
   2.286      {
   2.287 -        memcpy(&prev_p->thread.user_ctxt,
   2.288 +        memcpy(&prev_p->arch.user_ctxt,
   2.289                 stack_ec, 
   2.290                 sizeof(*stack_ec));
   2.291          unlazy_fpu(prev_p);
   2.292 -        CLEAR_FAST_TRAP(&prev_p->thread);
   2.293 +        CLEAR_FAST_TRAP(&prev_p->arch);
   2.294      }
   2.295  
   2.296      if ( !is_idle_task(next_p->domain) )
   2.297      {
   2.298          memcpy(stack_ec,
   2.299 -               &next_p->thread.user_ctxt,
   2.300 +               &next_p->arch.user_ctxt,
   2.301                 sizeof(*stack_ec));
   2.302  
   2.303          /* Maybe switch the debug registers. */
   2.304 -        if ( unlikely(next->debugreg[7]) )
   2.305 +        if ( unlikely(next_p->arch.debugreg[7]) )
   2.306          {
   2.307 -            loaddebug(next, 0);
   2.308 -            loaddebug(next, 1);
   2.309 -            loaddebug(next, 2);
   2.310 -            loaddebug(next, 3);
   2.311 +            loaddebug(&next_p->arch, 0);
   2.312 +            loaddebug(&next_p->arch, 1);
   2.313 +            loaddebug(&next_p->arch, 2);
   2.314 +            loaddebug(&next_p->arch, 3);
   2.315              /* no 4 and 5 */
   2.316 -            loaddebug(next, 6);
   2.317 -            loaddebug(next, 7);
   2.318 +            loaddebug(&next_p->arch, 6);
   2.319 +            loaddebug(&next_p->arch, 7);
   2.320          }
   2.321  
   2.322  #ifdef CONFIG_VMX
   2.323          if ( vmx_domain )
   2.324          {
   2.325              /* Switch page tables. */
   2.326 -            write_ptbase(&next_p->mm);
   2.327 +            write_ptbase(next_p);
   2.328   
   2.329              set_current(next_p);
   2.330              /* Switch GDT and LDT. */
   2.331 -            __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->mm.gdt));
   2.332 +            __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->arch.gdt));
   2.333  
   2.334              __sti();
   2.335              return;
   2.336          }
   2.337  #endif
   2.338   
   2.339 -        SET_FAST_TRAP(&next_p->thread);
   2.340 +        SET_FAST_TRAP(&next_p->arch);
   2.341  
   2.342  #ifdef __i386__
   2.343          /* Switch the guest OS ring-1 stack. */
   2.344 -        tss->esp1 = next->guestos_sp;
   2.345 -        tss->ss1  = next->guestos_ss;
   2.346 +        tss->esp1 = next_p->arch.guestos_sp;
   2.347 +        tss->ss1  = next_p->arch.guestos_ss;
   2.348  #endif
   2.349  
   2.350          /* Switch page tables. */
   2.351 -        write_ptbase(&next_p->mm);
   2.352 +        write_ptbase(next_p);
   2.353      }
   2.354  
   2.355 -    if ( unlikely(prev_p->thread.io_bitmap != NULL) )
   2.356 +    if ( unlikely(prev_p->arch.io_bitmap != NULL) )
   2.357      {
   2.358 -        for ( i = 0; i < sizeof(prev_p->thread.io_bitmap_sel) * 8; i++ )
   2.359 -            if ( !test_bit(i, &prev_p->thread.io_bitmap_sel) )
   2.360 +        for ( i = 0; i < sizeof(prev_p->arch.io_bitmap_sel) * 8; i++ )
   2.361 +            if ( !test_bit(i, &prev_p->arch.io_bitmap_sel) )
   2.362                  memset(&tss->io_bitmap[i * IOBMP_BYTES_PER_SELBIT],
   2.363                         ~0U, IOBMP_BYTES_PER_SELBIT);
   2.364          tss->bitmap = IOBMP_INVALID_OFFSET;
   2.365      }
   2.366  
   2.367 -    if ( unlikely(next_p->thread.io_bitmap != NULL) )
   2.368 +    if ( unlikely(next_p->arch.io_bitmap != NULL) )
   2.369      {
   2.370 -        for ( i = 0; i < sizeof(next_p->thread.io_bitmap_sel) * 8; i++ )
   2.371 -            if ( !test_bit(i, &next_p->thread.io_bitmap_sel) )
   2.372 +        for ( i = 0; i < sizeof(next_p->arch.io_bitmap_sel) * 8; i++ )
   2.373 +            if ( !test_bit(i, &next_p->arch.io_bitmap_sel) )
   2.374                  memcpy(&tss->io_bitmap[i * IOBMP_BYTES_PER_SELBIT],
   2.375 -                       &next_p->thread.io_bitmap[i * IOBMP_BYTES_PER_SELBIT],
   2.376 +                       &next_p->arch.io_bitmap[i * IOBMP_BYTES_PER_SELBIT],
   2.377                         IOBMP_BYTES_PER_SELBIT);
   2.378          tss->bitmap = IOBMP_OFFSET;
   2.379      }
   2.380 @@ -613,7 +613,7 @@ void switch_to(struct exec_domain *prev_
   2.381      set_current(next_p);
   2.382  
   2.383      /* Switch GDT and LDT. */
   2.384 -    __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->mm.gdt));
   2.385 +    __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->arch.gdt));
   2.386      load_LDT(next_p);
   2.387  
   2.388      __sti();
   2.389 @@ -731,9 +731,9 @@ static void vmx_domain_relinquish_memory
   2.390      /*
   2.391       * Free VMCS
   2.392       */
   2.393 -    ASSERT(ed->thread.arch_vmx.vmcs);
   2.394 -    free_vmcs(ed->thread.arch_vmx.vmcs);
   2.395 -    ed->thread.arch_vmx.vmcs = 0;
   2.396 +    ASSERT(ed->arch.arch_vmx.vmcs);
   2.397 +    free_vmcs(ed->arch.arch_vmx.vmcs);
   2.398 +    ed->arch.arch_vmx.vmcs = 0;
   2.399      
   2.400      monitor_rm_pagetable(ed);
   2.401  
   2.402 @@ -744,7 +744,7 @@ static void vmx_domain_relinquish_memory
   2.403          for (i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++) {
   2.404              unsigned long l1e;
   2.405              
   2.406 -            l1e = l1_pgentry_val(d->mm_perdomain_pt[i]);
   2.407 +            l1e = l1_pgentry_val(d->arch.mm_perdomain_pt[i]);
   2.408              if (l1e & _PAGE_PRESENT) {
   2.409                  pfn = l1e >> PAGE_SHIFT;
   2.410                  free_domheap_page(&frame_table[pfn]);
   2.411 @@ -768,8 +768,8 @@ void domain_relinquish_memory(struct dom
   2.412      /* Drop the in-use reference to the page-table base. */
   2.413      for_each_exec_domain ( d, ed )
   2.414      {
   2.415 -        if ( pagetable_val(ed->mm.pagetable) != 0 )
   2.416 -            put_page_and_type(&frame_table[pagetable_val(ed->mm.pagetable) >>
   2.417 +        if ( pagetable_val(ed->arch.pagetable) != 0 )
   2.418 +            put_page_and_type(&frame_table[pagetable_val(ed->arch.pagetable) >>
   2.419                                             PAGE_SHIFT]);
   2.420      }
   2.421  
     3.1 --- a/xen/arch/x86/i387.c	Sat Feb 05 15:42:48 2005 +0000
     3.2 +++ b/xen/arch/x86/i387.c	Sat Feb 05 18:20:15 2005 +0000
     3.3 @@ -1,3 +1,4 @@
     3.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     3.5  /*
     3.6   *  linux/arch/i386/kernel/i387.c
     3.7   *
     3.8 @@ -24,10 +25,10 @@ static inline void __save_init_fpu( stru
     3.9  {
    3.10      if ( cpu_has_fxsr ) {
    3.11          asm volatile( "fxsave %0 ; fnclex"
    3.12 -                      : "=m" (tsk->thread.i387) );
    3.13 +                      : "=m" (tsk->arch.i387) );
    3.14      } else {
    3.15          asm volatile( "fnsave %0 ; fwait"
    3.16 -                      : "=m" (tsk->thread.i387) );
    3.17 +                      : "=m" (tsk->arch.i387) );
    3.18      }
    3.19      clear_bit(EDF_USEDFPU, &tsk->ed_flags);
    3.20  }
    3.21 @@ -48,9 +49,9 @@ void restore_fpu( struct exec_domain *ts
    3.22  {
    3.23      if ( cpu_has_fxsr ) {
    3.24          asm volatile( "fxrstor %0"
    3.25 -                      : : "m" (tsk->thread.i387) );
    3.26 +                      : : "m" (tsk->arch.i387) );
    3.27      } else {
    3.28          asm volatile( "frstor %0"
    3.29 -                      : : "m" (tsk->thread.i387) );
    3.30 +                      : : "m" (tsk->arch.i387) );
    3.31      }
    3.32  }
     4.1 --- a/xen/arch/x86/idle0_task.c	Sat Feb 05 15:42:48 2005 +0000
     4.2 +++ b/xen/arch/x86/idle0_task.c	Sat Feb 05 18:20:15 2005 +0000
     4.3 @@ -1,24 +1,19 @@
     4.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     4.5 +
     4.6  #include <xen/config.h>
     4.7  #include <xen/sched.h>
     4.8  #include <asm/desc.h>
     4.9  
    4.10 -#define IDLE0_EXEC_DOMAIN(_ed,_d)    \
    4.11 -{                                    \
    4.12 -    processor:   0,                  \
    4.13 -    mm:          IDLE0_MM,           \
    4.14 -    thread:      INIT_THREAD,        \
    4.15 -    domain:      (_d)                \
    4.16 -}
    4.17 +struct domain idle0_domain = {
    4.18 +    id:          IDLE_DOMAIN_ID,
    4.19 +    d_flags:     1<<DF_IDLETASK,
    4.20 +    refcnt:      ATOMIC_INIT(1)
    4.21 +};
    4.22  
    4.23 -#define IDLE0_DOMAIN(_t)             \
    4.24 -{                                    \
    4.25 -    id:          IDLE_DOMAIN_ID,     \
    4.26 -    d_flags:     1<<DF_IDLETASK,     \
    4.27 -    refcnt:      ATOMIC_INIT(1)      \
    4.28 -}
    4.29 -
    4.30 -struct domain idle0_domain = IDLE0_DOMAIN(idle0_domain);
    4.31 -struct exec_domain idle0_exec_domain = IDLE0_EXEC_DOMAIN(idle0_exec_domain,
    4.32 -                                                         &idle0_domain);
    4.33 +struct exec_domain idle0_exec_domain = {
    4.34 +    processor:   0,
    4.35 +    domain:      &idle0_domain,
    4.36 +    arch:        IDLE0_ARCH_EXEC_DOMAIN
    4.37 +};
    4.38  
    4.39  struct tss_struct init_tss[NR_CPUS];
     5.1 --- a/xen/arch/x86/memory.c	Sat Feb 05 15:42:48 2005 +0000
     5.2 +++ b/xen/arch/x86/memory.c	Sat Feb 05 18:20:15 2005 +0000
     5.3 @@ -1,3 +1,4 @@
     5.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     5.5  /******************************************************************************
     5.6   * arch/x86/memory.c
     5.7   * 
     5.8 @@ -193,19 +194,41 @@ void arch_init_memory(void)
     5.9      subarch_init_memory(dom_xen);
    5.10  }
    5.11  
    5.12 +void write_ptbase(struct exec_domain *ed)
    5.13 +{
    5.14 +    struct domain *d = ed->domain;
    5.15 +    unsigned long pa;
    5.16 +
    5.17 +#ifdef CONFIG_VMX
    5.18 +    if ( unlikely(d->arch.shadow_mode) )
    5.19 +        pa = ((d->arch.shadow_mode == SHM_full_32) ?
    5.20 +              pagetable_val(ed->arch.monitor_table) :
    5.21 +              pagetable_val(ed->arch.shadow_table));
    5.22 +    else
    5.23 +        pa = pagetable_val(ed->arch.pagetable);
    5.24 +#else
    5.25 +    if ( unlikely(d->arch.shadow_mode) )
    5.26 +        pa = pagetable_val(ed->arch.shadow_table);    
    5.27 +    else
    5.28 +        pa = pagetable_val(ed->arch.pagetable);
    5.29 +#endif
    5.30 +
    5.31 +    write_cr3(pa);
    5.32 +}
    5.33 +
    5.34  static void __invalidate_shadow_ldt(struct exec_domain *d)
    5.35  {
    5.36      int i;
    5.37      unsigned long pfn;
    5.38      struct pfn_info *page;
    5.39      
    5.40 -    d->mm.shadow_ldt_mapcnt = 0;
    5.41 +    d->arch.shadow_ldt_mapcnt = 0;
    5.42  
    5.43      for ( i = 16; i < 32; i++ )
    5.44      {
    5.45 -        pfn = l1_pgentry_to_pagenr(d->mm.perdomain_ptes[i]);
    5.46 +        pfn = l1_pgentry_to_pagenr(d->arch.perdomain_ptes[i]);
    5.47          if ( pfn == 0 ) continue;
    5.48 -        d->mm.perdomain_ptes[i] = mk_l1_pgentry(0);
    5.49 +        d->arch.perdomain_ptes[i] = mk_l1_pgentry(0);
    5.50          page = &frame_table[pfn];
    5.51          ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
    5.52          ASSERT_PAGE_IS_DOMAIN(page, d->domain);
    5.53 @@ -219,7 +242,7 @@ static void __invalidate_shadow_ldt(stru
    5.54  
    5.55  static inline void invalidate_shadow_ldt(struct exec_domain *d)
    5.56  {
    5.57 -    if ( d->mm.shadow_ldt_mapcnt != 0 )
    5.58 +    if ( d->arch.shadow_ldt_mapcnt != 0 )
    5.59          __invalidate_shadow_ldt(d);
    5.60  }
    5.61  
    5.62 @@ -252,7 +275,7 @@ int map_ldt_shadow_page(unsigned int off
    5.63      if ( unlikely(in_irq()) )
    5.64          BUG();
    5.65  
    5.66 -    __get_user(l1e, (unsigned long *)&linear_pg_table[(ed->mm.ldt_base >> 
    5.67 +    __get_user(l1e, (unsigned long *)&linear_pg_table[(ed->arch.ldt_base >> 
    5.68                                                         PAGE_SHIFT) + off]);
    5.69  
    5.70      if ( unlikely(!(l1e & _PAGE_PRESENT)) ||
    5.71 @@ -260,8 +283,8 @@ int map_ldt_shadow_page(unsigned int off
    5.72                                       d, PGT_ldt_page)) )
    5.73          return 0;
    5.74  
    5.75 -    ed->mm.perdomain_ptes[off + 16] = mk_l1_pgentry(l1e | _PAGE_RW);
    5.76 -    ed->mm.shadow_ldt_mapcnt++;
    5.77 +    ed->arch.perdomain_ptes[off + 16] = mk_l1_pgentry(l1e | _PAGE_RW);
    5.78 +    ed->arch.shadow_ldt_mapcnt++;
    5.79  
    5.80      return 1;
    5.81  }
    5.82 @@ -512,7 +535,7 @@ static int alloc_l2_table(struct pfn_inf
    5.83      pl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    5.84          mk_l2_pgentry((page_nr << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    5.85      pl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
    5.86 -        mk_l2_pgentry(__pa(page_get_owner(page)->mm_perdomain_pt) | 
    5.87 +        mk_l2_pgentry(__pa(page_get_owner(page)->arch.mm_perdomain_pt) | 
    5.88                        __PAGE_HYPERVISOR);
    5.89  #endif
    5.90  
    5.91 @@ -747,11 +770,11 @@ void free_page_type(struct pfn_info *pag
    5.92          BUG();
    5.93      }
    5.94  
    5.95 -    if ( unlikely(d->exec_domain[0]->mm.shadow_mode) && 
    5.96 -         (get_shadow_status(&d->exec_domain[0]->mm, page_to_pfn(page)) & PSH_shadowed) )
    5.97 +    if ( unlikely(d->arch.shadow_mode) && 
    5.98 +         (get_shadow_status(d, page_to_pfn(page)) & PSH_shadowed) )
    5.99      {
   5.100          unshadow_table(page_to_pfn(page), type);
   5.101 -        put_shadow_status(&d->exec_domain[0]->mm);
   5.102 +        put_shadow_status(d);
   5.103      }
   5.104  }
   5.105  
   5.106 @@ -922,12 +945,12 @@ int new_guest_cr3(unsigned long pfn)
   5.107          invalidate_shadow_ldt(ed);
   5.108  
   5.109          percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
   5.110 -        old_base_pfn = pagetable_val(ed->mm.pagetable) >> PAGE_SHIFT;
   5.111 -        ed->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
   5.112 +        old_base_pfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
   5.113 +        ed->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
   5.114  
   5.115 -        shadow_mk_pagetable(&ed->mm);
   5.116 +        shadow_mk_pagetable(ed);
   5.117  
   5.118 -        write_ptbase(&ed->mm);
   5.119 +        write_ptbase(ed);
   5.120  
   5.121          put_page_and_type(&frame_table[old_base_pfn]);
   5.122      }
   5.123 @@ -1038,12 +1061,12 @@ static int do_extended_command(unsigned 
   5.124              okay = 0;
   5.125              MEM_LOG("Bad args to SET_LDT: ptr=%08lx, ents=%08lx", ptr, ents);
   5.126          }
   5.127 -        else if ( (ed->mm.ldt_ents != ents) || 
   5.128 -                  (ed->mm.ldt_base != ptr) )
   5.129 +        else if ( (ed->arch.ldt_ents != ents) || 
   5.130 +                  (ed->arch.ldt_base != ptr) )
   5.131          {
   5.132              invalidate_shadow_ldt(ed);
   5.133 -            ed->mm.ldt_base = ptr;
   5.134 -            ed->mm.ldt_ents = ents;
   5.135 +            ed->arch.ldt_base = ptr;
   5.136 +            ed->arch.ldt_ents = ents;
   5.137              load_LDT(ed);
   5.138              percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT;
   5.139              if ( ents != 0 )
   5.140 @@ -1409,13 +1432,13 @@ int do_mmu_update(
   5.141                      okay = mod_l1_entry((l1_pgentry_t *)va, 
   5.142                                          mk_l1_pgentry(req.val)); 
   5.143  
   5.144 -                    if ( unlikely(ed->mm.shadow_mode) && okay &&
   5.145 -                         (get_shadow_status(&ed->mm, page-frame_table) &
   5.146 +                    if ( unlikely(d->arch.shadow_mode) && okay &&
   5.147 +                         (get_shadow_status(d, page-frame_table) &
   5.148                            PSH_shadowed) )
   5.149                      {
   5.150                          shadow_l1_normal_pt_update(
   5.151                              req.ptr, req.val, &prev_spfn, &prev_spl1e);
   5.152 -                        put_shadow_status(&ed->mm);
   5.153 +                        put_shadow_status(d);
   5.154                      }
   5.155  
   5.156                      put_page_type(page);
   5.157 @@ -1428,12 +1451,12 @@ int do_mmu_update(
   5.158                                          mk_l2_pgentry(req.val),
   5.159                                          pfn); 
   5.160  
   5.161 -                    if ( unlikely(ed->mm.shadow_mode) && okay &&
   5.162 -                         (get_shadow_status(&ed->mm, page-frame_table) & 
   5.163 +                    if ( unlikely(d->arch.shadow_mode) && okay &&
   5.164 +                         (get_shadow_status(d, page-frame_table) & 
   5.165                            PSH_shadowed) )
   5.166                      {
   5.167                          shadow_l2_normal_pt_update(req.ptr, req.val);
   5.168 -                        put_shadow_status(&ed->mm);
   5.169 +                        put_shadow_status(d);
   5.170                      }
   5.171  
   5.172                      put_page_type(page);
   5.173 @@ -1466,9 +1489,9 @@ int do_mmu_update(
   5.174               * If in log-dirty mode, mark the corresponding pseudo-physical
   5.175               * page as dirty.
   5.176               */
   5.177 -            if ( unlikely(ed->mm.shadow_mode == SHM_logdirty) && 
   5.178 -                 mark_dirty(&ed->mm, pfn) )
   5.179 -                ed->mm.shadow_dirty_block_count++;
   5.180 +            if ( unlikely(d->arch.shadow_mode == SHM_logdirty) && 
   5.181 +                 mark_dirty(d, pfn) )
   5.182 +                d->arch.shadow_dirty_block_count++;
   5.183  
   5.184              put_page(&frame_table[pfn]);
   5.185              break;
   5.186 @@ -1555,11 +1578,11 @@ int do_update_va_mapping(unsigned long p
   5.187                                  mk_l1_pgentry(val))) )
   5.188          err = -EINVAL;
   5.189  
   5.190 -    if ( unlikely(ed->mm.shadow_mode) )
   5.191 +    if ( unlikely(d->arch.shadow_mode) )
   5.192      {
   5.193          unsigned long sval;
   5.194  
   5.195 -        l1pte_propagate_from_guest(&ed->mm, &val, &sval);
   5.196 +        l1pte_propagate_from_guest(d, &val, &sval);
   5.197  
   5.198          if ( unlikely(__put_user(sval, ((unsigned long *)(
   5.199              &shadow_linear_pg_table[page_nr])))) )
   5.200 @@ -1576,10 +1599,10 @@ int do_update_va_mapping(unsigned long p
   5.201           * the PTE in the PT-holding page. We need the machine frame number
   5.202           * for this.
   5.203           */
   5.204 -        if ( ed->mm.shadow_mode == SHM_logdirty )
   5.205 -            mark_dirty(&current->mm, va_to_l1mfn(page_nr << PAGE_SHIFT));  
   5.206 +        if ( d->arch.shadow_mode == SHM_logdirty )
   5.207 +            mark_dirty(d, va_to_l1mfn(page_nr << PAGE_SHIFT));  
   5.208    
   5.209 -        check_pagetable(&ed->mm, ed->mm.pagetable, "va"); /* debug */
   5.210 +        check_pagetable(d, ed->arch.pagetable, "va"); /* debug */
   5.211      }
   5.212  
   5.213      deferred_ops = percpu_info[cpu].deferred_ops;
   5.214 @@ -1673,15 +1696,15 @@ void ptwr_flush(const int which)
   5.215                  PTWR_PRINT_WHICH, ptep, pte);
   5.216      pte &= ~_PAGE_RW;
   5.217  
   5.218 -    if ( unlikely(ed->mm.shadow_mode) )
   5.219 +    if ( unlikely(d->arch.shadow_mode) )
   5.220      {
   5.221          /* Write-protect the p.t. page in the shadow page table. */
   5.222 -        l1pte_propagate_from_guest(&ed->mm, &pte, &spte);
   5.223 +        l1pte_propagate_from_guest(d, &pte, &spte);
   5.224          __put_user(
   5.225              spte, (unsigned long *)&shadow_linear_pg_table[l1va>>PAGE_SHIFT]);
   5.226  
   5.227          /* Is the p.t. page itself shadowed? Map it into Xen space if so. */
   5.228 -        sstat = get_shadow_status(&ed->mm, pte >> PAGE_SHIFT);
   5.229 +        sstat = get_shadow_status(d, pte >> PAGE_SHIFT);
   5.230          if ( sstat & PSH_shadowed )
   5.231              sl1e = map_domain_mem((sstat & PSH_pfn_mask) << PAGE_SHIFT);
   5.232      }
   5.233 @@ -1730,7 +1753,7 @@ void ptwr_flush(const int which)
   5.234              {
   5.235                  if ( unlikely(sl1e != NULL) )
   5.236                      l1pte_propagate_from_guest(
   5.237 -                        &ed->mm, &l1_pgentry_val(nl1e), 
   5.238 +                        d, &l1_pgentry_val(nl1e), 
   5.239                          &l1_pgentry_val(sl1e[i]));
   5.240                  put_page_type(&frame_table[l1_pgentry_to_pagenr(nl1e)]);
   5.241              }
   5.242 @@ -1754,7 +1777,7 @@ void ptwr_flush(const int which)
   5.243          
   5.244          if ( unlikely(sl1e != NULL) )
   5.245              l1pte_propagate_from_guest(
   5.246 -                &ed->mm, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i]));
   5.247 +                d, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i]));
   5.248  
   5.249          if ( unlikely(l1_pgentry_val(ol1e) & _PAGE_PRESENT) )
   5.250              put_page_from_l1e(ol1e, d);
   5.251 @@ -1765,7 +1788,7 @@ void ptwr_flush(const int which)
   5.252       * STEP 3. Reattach the L1 p.t. page into the current address space.
   5.253       */
   5.254  
   5.255 -    if ( (which == PTWR_PT_ACTIVE) && likely(!ed->mm.shadow_mode) )
   5.256 +    if ( (which == PTWR_PT_ACTIVE) && likely(!d->arch.shadow_mode) )
   5.257      {
   5.258          pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
   5.259          *pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); 
   5.260 @@ -1780,7 +1803,7 @@ void ptwr_flush(const int which)
   5.261      if ( unlikely(sl1e != NULL) )
   5.262      {
   5.263          unmap_domain_mem(sl1e);
   5.264 -        put_shadow_status(&ed->mm);
   5.265 +        put_shadow_status(d);
   5.266      }
   5.267  }
   5.268  
   5.269 @@ -1868,7 +1891,8 @@ int ptwr_do_page_fault(unsigned long add
   5.270      ptwr_info[cpu].ptinfo[which].l2_idx = l2_idx;
   5.271      
   5.272      /* For safety, disconnect the L1 p.t. page from current space. */
   5.273 -    if ( (which == PTWR_PT_ACTIVE) && likely(!current->mm.shadow_mode) )
   5.274 +    if ( (which == PTWR_PT_ACTIVE) && 
   5.275 +         likely(!current->domain->arch.shadow_mode) )
   5.276      {
   5.277          *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
   5.278  #if 1
   5.279 @@ -2059,7 +2083,7 @@ void audit_domain(struct domain *d)
   5.280      synchronise_pagetables(~0UL);
   5.281  
   5.282      printk("pt base=%lx sh_info=%x\n",
   5.283 -           pagetable_val(d->exec_domain[0]->mm.pagetable)>>PAGE_SHIFT,
   5.284 +           pagetable_val(d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT,
   5.285             virt_to_page(d->shared_info)-frame_table);
   5.286             
   5.287      spin_lock(&d->page_alloc_lock);
   5.288 @@ -2109,7 +2133,7 @@ void audit_domain(struct domain *d)
   5.289  
   5.290      /* PHASE 1 */
   5.291  
   5.292 -    adjust(&frame_table[pagetable_val(d->exec_domain[0]->mm.pagetable)>>PAGE_SHIFT], -1, 1);
   5.293 +    adjust(&frame_table[pagetable_val(d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT], -1, 1);
   5.294  
   5.295      list_ent = d->page_list.next;
   5.296      for ( i = 0; (list_ent != &d->page_list); i++ )
   5.297 @@ -2353,7 +2377,8 @@ void audit_domain(struct domain *d)
   5.298  
   5.299      spin_unlock(&d->page_alloc_lock);
   5.300  
   5.301 -    adjust(&frame_table[pagetable_val(d->exec_domain[0]->mm.pagetable)>>PAGE_SHIFT], 1, 1);
   5.302 +    adjust(&frame_table[pagetable_val(
   5.303 +        d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT], 1, 1);
   5.304  
   5.305      printk("Audit %d: Done. ctot=%d ttot=%d\n", d->id, ctot, ttot );
   5.306  
     6.1 --- a/xen/arch/x86/setup.c	Sat Feb 05 15:42:48 2005 +0000
     6.2 +++ b/xen/arch/x86/setup.c	Sat Feb 05 18:20:15 2005 +0000
     6.3 @@ -1,3 +1,4 @@
     6.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     6.5  
     6.6  #include <xen/config.h>
     6.7  #include <xen/init.h>
     6.8 @@ -308,7 +309,7 @@ void __init cpu_init(void)
     6.9      /* Set up GDT and IDT. */
    6.10      SET_GDT_ENTRIES(current, DEFAULT_GDT_ENTRIES);
    6.11      SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS);
    6.12 -    __asm__ __volatile__ ( "lgdt %0" : "=m" (*current->mm.gdt) );
    6.13 +    __asm__ __volatile__ ( "lgdt %0" : "=m" (*current->arch.gdt) );
    6.14      __asm__ __volatile__ ( "lidt %0" : "=m" (idt_descr) );
    6.15  
    6.16      /* No nested task. */
    6.17 @@ -338,7 +339,7 @@ void __init cpu_init(void)
    6.18      percpu_traps_init();
    6.19  
    6.20      /* Install correct page table. */
    6.21 -    write_ptbase(&current->mm);
    6.22 +    write_ptbase(current);
    6.23  
    6.24      init_idle_task();
    6.25  }
     7.1 --- a/xen/arch/x86/shadow.c	Sat Feb 05 15:42:48 2005 +0000
     7.2 +++ b/xen/arch/x86/shadow.c	Sat Feb 05 18:20:15 2005 +0000
     7.3 @@ -28,9 +28,9 @@ hypercall lock anyhow (at least initiall
     7.4  ********/
     7.5  
     7.6  static inline void free_shadow_page(
     7.7 -    struct mm_struct *m, struct pfn_info *page)
     7.8 +    struct domain *d, struct pfn_info *page)
     7.9  {
    7.10 -    m->shadow_page_count--;
    7.11 +    d->arch.shadow_page_count--;
    7.12  
    7.13      switch ( page->u.inuse.type_info & PGT_type_mask )
    7.14      {
    7.15 @@ -51,7 +51,7 @@ static inline void free_shadow_page(
    7.16      free_domheap_page(page);
    7.17  }
    7.18  
    7.19 -static void free_shadow_state(struct mm_struct *m)
    7.20 +static void free_shadow_state(struct domain *d)
    7.21  {
    7.22      int                   i, free = 0;
    7.23      struct shadow_status *x, *n;
    7.24 @@ -61,19 +61,19 @@ static void free_shadow_state(struct mm_
    7.25       * e.g., You are expected to have paused the domain and synchronized CR3.
    7.26       */
    7.27  
    7.28 -    shadow_audit(m, 1);
    7.29 +    shadow_audit(d, 1);
    7.30  
    7.31      /* Free each hash chain in turn. */
    7.32      for ( i = 0; i < shadow_ht_buckets; i++ )
    7.33      {
    7.34          /* Skip empty buckets. */
    7.35 -        x = &m->shadow_ht[i];
    7.36 +        x = &d->arch.shadow_ht[i];
    7.37          if ( x->pfn == 0 )
    7.38              continue;
    7.39  
    7.40          /* Free the head page. */
    7.41          free_shadow_page(
    7.42 -            m, &frame_table[x->spfn_and_flags & PSH_pfn_mask]);
    7.43 +            d, &frame_table[x->spfn_and_flags & PSH_pfn_mask]);
    7.44  
    7.45          /* Reinitialise the head node. */
    7.46          x->pfn            = 0;
    7.47 @@ -88,7 +88,7 @@ static void free_shadow_state(struct mm_
    7.48          { 
    7.49              /* Free the shadow page. */
    7.50              free_shadow_page(
    7.51 -                m, &frame_table[x->spfn_and_flags & PSH_pfn_mask]);
    7.52 +                d, &frame_table[x->spfn_and_flags & PSH_pfn_mask]);
    7.53  
    7.54              /* Re-initialise the chain node. */
    7.55              x->pfn            = 0;
    7.56 @@ -96,20 +96,20 @@ static void free_shadow_state(struct mm_
    7.57  
    7.58              /* Add to the free list. */
    7.59              n                 = x->next;
    7.60 -            x->next           = m->shadow_ht_free;
    7.61 -            m->shadow_ht_free = x;
    7.62 +            x->next           = d->arch.shadow_ht_free;
    7.63 +            d->arch.shadow_ht_free = x;
    7.64  
    7.65              free++;
    7.66          }
    7.67  
    7.68 -        shadow_audit(m, 0);
    7.69 +        shadow_audit(d, 0);
    7.70      }
    7.71  
    7.72      SH_LOG("Free shadow table. Freed=%d.", free);
    7.73  }
    7.74  
    7.75  static inline int clear_shadow_page(
    7.76 -    struct mm_struct *m, struct shadow_status *x)
    7.77 +    struct domain *d, struct shadow_status *x)
    7.78  {
    7.79      unsigned long   *p;
    7.80      int              restart = 0;
    7.81 @@ -120,7 +120,7 @@ static inline int clear_shadow_page(
    7.82          /* We clear L2 pages by zeroing the guest entries. */
    7.83      case PGT_l2_page_table:
    7.84          p = map_domain_mem((spage - frame_table) << PAGE_SHIFT);
    7.85 -        if (m->shadow_mode == SHM_full_32)
    7.86 +        if (d->arch.shadow_mode == SHM_full_32)
    7.87              memset(p, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
    7.88          else 
    7.89              memset(p, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
    7.90 @@ -129,8 +129,8 @@ static inline int clear_shadow_page(
    7.91  
    7.92          /* We clear L1 pages by freeing them: no benefit from zeroing them. */
    7.93      case PGT_l1_page_table:
    7.94 -        delete_shadow_status(m, x->pfn);
    7.95 -        free_shadow_page(m, spage);
    7.96 +        delete_shadow_status(d, x->pfn);
    7.97 +        free_shadow_page(d, spage);
    7.98          restart = 1; /* We need to go to start of list again. */
    7.99          break;
   7.100      }
   7.101 @@ -138,29 +138,29 @@ static inline int clear_shadow_page(
   7.102      return restart;
   7.103  }
   7.104  
   7.105 -static void clear_shadow_state(struct mm_struct *m)
   7.106 +static void clear_shadow_state(struct domain *d)
   7.107  {
   7.108      int                   i;
   7.109      struct shadow_status *x;
   7.110   
   7.111 -    shadow_audit(m, 1);
   7.112 +    shadow_audit(d, 1);
   7.113  
   7.114      for ( i = 0; i < shadow_ht_buckets; i++ )
   7.115      {
   7.116      retry:
   7.117          /* Skip empty buckets. */
   7.118 -        x = &m->shadow_ht[i];
   7.119 +        x = &d->arch.shadow_ht[i];
   7.120          if ( x->pfn == 0 )
   7.121              continue;
   7.122  
   7.123 -        if ( clear_shadow_page(m, x) )
   7.124 +        if ( clear_shadow_page(d, x) )
   7.125              goto retry;
   7.126  
   7.127          for ( x = x->next; x != NULL; x = x->next )
   7.128 -            if ( clear_shadow_page(m, x) )
   7.129 +            if ( clear_shadow_page(d, x) )
   7.130                  goto retry;
   7.131  
   7.132 -        shadow_audit(m, 0);
   7.133 +        shadow_audit(d, 0);
   7.134      }
   7.135  
   7.136      SH_VLOG("Scan shadow table. l1=%d l2=%d",
   7.137 @@ -172,119 +172,118 @@ void shadow_mode_init(void)
   7.138  {
   7.139  }
   7.140  
   7.141 -int shadow_mode_enable(struct domain *p, unsigned int mode)
   7.142 +int shadow_mode_enable(struct domain *d, unsigned int mode)
   7.143  {
   7.144 -    struct mm_struct *m = &p->exec_domain[0]->mm;
   7.145 -
   7.146 -    m->shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
   7.147 -    if ( m->shadow_ht == NULL )
   7.148 +    d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
   7.149 +    if ( d->arch.shadow_ht == NULL )
   7.150          goto nomem;
   7.151 -    memset(m->shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status));
   7.152 +    memset(d->arch.shadow_ht, 0,
   7.153 +           shadow_ht_buckets * sizeof(struct shadow_status));
   7.154  
   7.155      if ( mode == SHM_logdirty )
   7.156      {
   7.157 -        m->shadow_dirty_bitmap_size = (p->max_pages + 63) & ~63;
   7.158 -        m->shadow_dirty_bitmap = 
   7.159 -            xmalloc_array(unsigned long, m->shadow_dirty_bitmap_size /
   7.160 +        d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63;
   7.161 +        d->arch.shadow_dirty_bitmap = 
   7.162 +            xmalloc_array(unsigned long, d->arch.shadow_dirty_bitmap_size /
   7.163                                           (8 * sizeof(unsigned long)));
   7.164 -        if ( m->shadow_dirty_bitmap == NULL )
   7.165 +        if ( d->arch.shadow_dirty_bitmap == NULL )
   7.166          {
   7.167 -            m->shadow_dirty_bitmap_size = 0;
   7.168 +            d->arch.shadow_dirty_bitmap_size = 0;
   7.169              goto nomem;
   7.170          }
   7.171 -        memset(m->shadow_dirty_bitmap, 0, m->shadow_dirty_bitmap_size/8);
   7.172 +        memset(d->arch.shadow_dirty_bitmap, 0, 
   7.173 +               d->arch.shadow_dirty_bitmap_size/8);
   7.174      }
   7.175  
   7.176 -    m->shadow_mode = mode;
   7.177 +    d->arch.shadow_mode = mode;
   7.178  
   7.179 -    __shadow_mk_pagetable(m);
   7.180 +    __shadow_mk_pagetable(d->exec_domain[0]); /* XXX SMP */
   7.181      return 0;
   7.182  
   7.183   nomem:
   7.184 -    if ( m->shadow_ht != NULL )
   7.185 -        xfree( m->shadow_ht );
   7.186 -    m->shadow_ht = NULL;
   7.187 +    if ( d->arch.shadow_ht != NULL )
   7.188 +        xfree(d->arch.shadow_ht);
   7.189 +    d->arch.shadow_ht = NULL;
   7.190      return -ENOMEM;
   7.191  }
   7.192  
   7.193  void __shadow_mode_disable(struct domain *d)
   7.194  {
   7.195 -    struct mm_struct *m = &d->exec_domain[0]->mm;
   7.196      struct shadow_status *x, *n;
   7.197  
   7.198 -    free_shadow_state(m);
   7.199 -    m->shadow_mode = 0;
   7.200 +    free_shadow_state(d);
   7.201 +    d->arch.shadow_mode = 0;
   7.202  
   7.203      SH_VLOG("freed tables count=%d l1=%d l2=%d",
   7.204 -            m->shadow_page_count, perfc_value(shadow_l1_pages), 
   7.205 +            d->arch.shadow_page_count, perfc_value(shadow_l1_pages), 
   7.206              perfc_value(shadow_l2_pages));
   7.207  
   7.208 -    n = m->shadow_ht_extras;
   7.209 +    n = d->arch.shadow_ht_extras;
   7.210      while ( (x = n) != NULL )
   7.211      {
   7.212 -        m->shadow_extras_count--;
   7.213 +        d->arch.shadow_extras_count--;
   7.214          n = *((struct shadow_status **)(&x[shadow_ht_extra_size]));
   7.215          xfree(x);
   7.216      }
   7.217  
   7.218 -    m->shadow_ht_extras = NULL;
   7.219 -    ASSERT(m->shadow_extras_count == 0);
   7.220 -    SH_LOG("freed extras, now %d", m->shadow_extras_count);
   7.221 +    d->arch.shadow_ht_extras = NULL;
   7.222 +    ASSERT(d->arch.shadow_extras_count == 0);
   7.223 +    SH_LOG("freed extras, now %d", d->arch.shadow_extras_count);
   7.224  
   7.225 -    if ( m->shadow_dirty_bitmap != NULL )
   7.226 +    if ( d->arch.shadow_dirty_bitmap != NULL )
   7.227      {
   7.228 -        xfree(m->shadow_dirty_bitmap);
   7.229 -        m->shadow_dirty_bitmap = 0;
   7.230 -        m->shadow_dirty_bitmap_size = 0;
   7.231 +        xfree(d->arch.shadow_dirty_bitmap);
   7.232 +        d->arch.shadow_dirty_bitmap = 0;
   7.233 +        d->arch.shadow_dirty_bitmap_size = 0;
   7.234      }
   7.235  
   7.236 -    xfree(m->shadow_ht);
   7.237 -    m->shadow_ht = NULL;
   7.238 +    xfree(d->arch.shadow_ht);
   7.239 +    d->arch.shadow_ht = NULL;
   7.240  }
   7.241  
   7.242  static int shadow_mode_table_op(
   7.243      struct domain *d, dom0_shadow_control_t *sc)
   7.244  {
   7.245      unsigned int      op = sc->op;
   7.246 -    struct mm_struct *m = &d->exec_domain[0]->mm;
   7.247      int               i, rc = 0;
   7.248  
   7.249 -    ASSERT(spin_is_locked(&m->shadow_lock));
   7.250 +    ASSERT(spin_is_locked(&d->arch.shadow_lock));
   7.251  
   7.252      SH_VLOG("shadow mode table op %08lx %08lx count %d",
   7.253 -            pagetable_val(m->pagetable), pagetable_val(m->shadow_table),
   7.254 -            m->shadow_page_count);
   7.255 +            pagetable_val(d->exec_domain[0]->arch.pagetable),    /* XXX SMP */
   7.256 +            pagetable_val(d->exec_domain[0]->arch.shadow_table), /* XXX SMP */
   7.257 +            d->arch.shadow_page_count);
   7.258  
   7.259 -    shadow_audit(m, 1);
   7.260 +    shadow_audit(d, 1);
   7.261  
   7.262      switch ( op )
   7.263      {
   7.264      case DOM0_SHADOW_CONTROL_OP_FLUSH:
   7.265 -        free_shadow_state(m);
   7.266 +        free_shadow_state(d);
   7.267  
   7.268 -        m->shadow_fault_count       = 0;
   7.269 -        m->shadow_dirty_count       = 0;
   7.270 -        m->shadow_dirty_net_count   = 0;
   7.271 -        m->shadow_dirty_block_count = 0;
   7.272 +        d->arch.shadow_fault_count       = 0;
   7.273 +        d->arch.shadow_dirty_count       = 0;
   7.274 +        d->arch.shadow_dirty_net_count   = 0;
   7.275 +        d->arch.shadow_dirty_block_count = 0;
   7.276  
   7.277          break;
   7.278     
   7.279      case DOM0_SHADOW_CONTROL_OP_CLEAN:
   7.280 -        clear_shadow_state(m);
   7.281 +        clear_shadow_state(d);
   7.282  
   7.283 -        sc->stats.fault_count       = m->shadow_fault_count;
   7.284 -        sc->stats.dirty_count       = m->shadow_dirty_count;
   7.285 -        sc->stats.dirty_net_count   = m->shadow_dirty_net_count;
   7.286 -        sc->stats.dirty_block_count = m->shadow_dirty_block_count;
   7.287 +        sc->stats.fault_count       = d->arch.shadow_fault_count;
   7.288 +        sc->stats.dirty_count       = d->arch.shadow_dirty_count;
   7.289 +        sc->stats.dirty_net_count   = d->arch.shadow_dirty_net_count;
   7.290 +        sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
   7.291  
   7.292 -        m->shadow_fault_count       = 0;
   7.293 -        m->shadow_dirty_count       = 0;
   7.294 -        m->shadow_dirty_net_count   = 0;
   7.295 -        m->shadow_dirty_block_count = 0;
   7.296 +        d->arch.shadow_fault_count       = 0;
   7.297 +        d->arch.shadow_dirty_count       = 0;
   7.298 +        d->arch.shadow_dirty_net_count   = 0;
   7.299 +        d->arch.shadow_dirty_block_count = 0;
   7.300   
   7.301          if ( (d->max_pages > sc->pages) || 
   7.302               (sc->dirty_bitmap == NULL) || 
   7.303 -             (m->shadow_dirty_bitmap == NULL) )
   7.304 +             (d->arch.shadow_dirty_bitmap == NULL) )
   7.305          {
   7.306              rc = -EINVAL;
   7.307              break;
   7.308 @@ -300,34 +299,35 @@ static int shadow_mode_table_op(
   7.309       
   7.310              if (copy_to_user(
   7.311                      sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
   7.312 -                    m->shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
   7.313 +                    d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
   7.314                      bytes))
   7.315              {
   7.316                  // copy_to_user can fail when copying to guest app memory.
   7.317                  // app should zero buffer after mallocing, and pin it
   7.318                  rc = -EINVAL;
   7.319                  memset(
   7.320 -                    m->shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
   7.321 +                    d->arch.shadow_dirty_bitmap + 
   7.322 +                    (i/(8*sizeof(unsigned long))),
   7.323                      0, (d->max_pages/8) - (i/(8*sizeof(unsigned long))));
   7.324                  break;
   7.325              }
   7.326  
   7.327              memset(
   7.328 -                m->shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
   7.329 +                d->arch.shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))),
   7.330                  0, bytes);
   7.331          }
   7.332  
   7.333          break;
   7.334  
   7.335      case DOM0_SHADOW_CONTROL_OP_PEEK:
   7.336 -        sc->stats.fault_count       = m->shadow_fault_count;
   7.337 -        sc->stats.dirty_count       = m->shadow_dirty_count;
   7.338 -        sc->stats.dirty_net_count   = m->shadow_dirty_net_count;
   7.339 -        sc->stats.dirty_block_count = m->shadow_dirty_block_count;
   7.340 +        sc->stats.fault_count       = d->arch.shadow_fault_count;
   7.341 +        sc->stats.dirty_count       = d->arch.shadow_dirty_count;
   7.342 +        sc->stats.dirty_net_count   = d->arch.shadow_dirty_net_count;
   7.343 +        sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count;
   7.344   
   7.345          if ( (d->max_pages > sc->pages) || 
   7.346               (sc->dirty_bitmap == NULL) || 
   7.347 -             (m->shadow_dirty_bitmap == NULL) )
   7.348 +             (d->arch.shadow_dirty_bitmap == NULL) )
   7.349          {
   7.350              rc = -EINVAL;
   7.351              break;
   7.352 @@ -335,7 +335,7 @@ static int shadow_mode_table_op(
   7.353   
   7.354          sc->pages = d->max_pages;
   7.355          if (copy_to_user(
   7.356 -            sc->dirty_bitmap, m->shadow_dirty_bitmap, (d->max_pages+7)/8))
   7.357 +            sc->dirty_bitmap, d->arch.shadow_dirty_bitmap, (d->max_pages+7)/8))
   7.358          {
   7.359              rc = -EINVAL;
   7.360              break;
   7.361 @@ -348,9 +348,9 @@ static int shadow_mode_table_op(
   7.362          break;
   7.363      }
   7.364  
   7.365 -    SH_VLOG("shadow mode table op : page count %d", m->shadow_page_count);
   7.366 -    shadow_audit(m, 1);
   7.367 -    __shadow_mk_pagetable(m);
   7.368 +    SH_VLOG("shadow mode table op : page count %d", d->arch.shadow_page_count);
   7.369 +    shadow_audit(d, 1);
   7.370 +    __shadow_mk_pagetable(d->exec_domain[0]); /* XXX SMP */
   7.371      return rc;
   7.372  }
   7.373  
   7.374 @@ -368,7 +368,7 @@ int shadow_mode_control(struct domain *d
   7.375      domain_pause(d);
   7.376      synchronise_pagetables(~0UL);
   7.377  
   7.378 -    shadow_lock(&d->exec_domain[0]->mm);
   7.379 +    shadow_lock(d);
   7.380  
   7.381      switch ( op )
   7.382      {
   7.383 @@ -387,27 +387,27 @@ int shadow_mode_control(struct domain *d
   7.384          break;
   7.385  
   7.386      default:
   7.387 -        rc = shadow_mode(d->exec_domain[0]) ? shadow_mode_table_op(d, sc) : -EINVAL;
   7.388 +        rc = shadow_mode(d) ? shadow_mode_table_op(d, sc) : -EINVAL;
   7.389          break;
   7.390      }
   7.391  
   7.392 -    shadow_unlock(&d->exec_domain[0]->mm);
   7.393 +    shadow_unlock(d);
   7.394  
   7.395      domain_unpause(d);
   7.396  
   7.397      return rc;
   7.398  }
   7.399  
   7.400 -static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m)
   7.401 +static inline struct pfn_info *alloc_shadow_page(struct domain *d)
   7.402  {
   7.403      struct pfn_info *page = alloc_domheap_page(NULL);
   7.404  
   7.405 -    m->shadow_page_count++;
   7.406 +    d->arch.shadow_page_count++;
   7.407  
   7.408      if ( unlikely(page == NULL) )
   7.409      {
   7.410          printk("Couldn't alloc shadow page! count=%d\n",
   7.411 -               m->shadow_page_count);
   7.412 +               d->arch.shadow_page_count);
   7.413          SH_VLOG("Shadow tables l1=%d l2=%d",
   7.414                  perfc_value(shadow_l1_pages), 
   7.415                  perfc_value(shadow_l2_pages));
   7.416 @@ -431,35 +431,35 @@ void unshadow_table(unsigned long gpfn, 
   7.417       * guests there won't be a race here as this CPU was the one that 
   7.418       * cmpxchg'ed the page to invalid.
   7.419       */
   7.420 -    spfn = __shadow_status(&d->exec_domain[0]->mm, gpfn) & PSH_pfn_mask;
   7.421 -    delete_shadow_status(&d->exec_domain[0]->mm, gpfn);
   7.422 -    free_shadow_page(&d->exec_domain[0]->mm, &frame_table[spfn]);
   7.423 +    spfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
   7.424 +    delete_shadow_status(d, gpfn);
   7.425 +    free_shadow_page(d, &frame_table[spfn]);
   7.426  }
   7.427  
   7.428  #ifdef CONFIG_VMX
   7.429 -void vmx_shadow_clear_state(struct mm_struct *m) 
   7.430 +void vmx_shadow_clear_state(struct domain *d)
   7.431  {
   7.432      SH_VVLOG("vmx_clear_shadow_state: \n");
   7.433 -    clear_shadow_state(m);
   7.434 +    clear_shadow_state(d);
   7.435  }
   7.436  #endif
   7.437  
   7.438  
   7.439  unsigned long shadow_l2_table( 
   7.440 -    struct mm_struct *m, unsigned long gpfn)
   7.441 +    struct domain *d, unsigned long gpfn)
   7.442  {
   7.443      struct pfn_info *spfn_info;
   7.444      unsigned long    spfn;
   7.445      l2_pgentry_t    *spl2e = 0;
   7.446      unsigned long guest_gpfn;
   7.447  
   7.448 -    __get_machine_to_phys(m, guest_gpfn, gpfn);
   7.449 +    __get_machine_to_phys(d, guest_gpfn, gpfn);
   7.450  
   7.451      SH_VVLOG("shadow_l2_table( %08lx )", gpfn);
   7.452  
   7.453      perfc_incrc(shadow_l2_table_count);
   7.454  
   7.455 -    if ( (spfn_info = alloc_shadow_page(m)) == NULL )
   7.456 +    if ( (spfn_info = alloc_shadow_page(d)) == NULL )
   7.457          BUG(); /* XXX Deal gracefully with failure. */
   7.458  
   7.459      spfn_info->u.inuse.type_info = PGT_l2_page_table;
   7.460 @@ -467,13 +467,13 @@ unsigned long shadow_l2_table(
   7.461  
   7.462      spfn = spfn_info - frame_table;
   7.463    /* Mark pfn as being shadowed; update field to point at shadow. */
   7.464 -    set_shadow_status(m, guest_gpfn, spfn | PSH_shadowed);
   7.465 +    set_shadow_status(d, guest_gpfn, spfn | PSH_shadowed);
   7.466   
   7.467  #ifdef __i386__
   7.468      /* Install hypervisor and 2x linear p.t. mapings. */
   7.469 -    if ( m->shadow_mode == SHM_full_32 )
   7.470 +    if ( d->arch.shadow_mode == SHM_full_32 )
   7.471      {
   7.472 -        vmx_update_shadow_state(m, gpfn, spfn);
   7.473 +        vmx_update_shadow_state(d->exec_domain[0], gpfn, spfn);
   7.474      }
   7.475      else
   7.476      {
   7.477 @@ -494,12 +494,12 @@ unsigned long shadow_l2_table(
   7.478          spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
   7.479              mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   7.480          spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
   7.481 -            mk_l2_pgentry(__pa(page_get_owner(&frame_table[gpfn])->mm_perdomain_pt) |
   7.482 +            mk_l2_pgentry(__pa(page_get_owner(&frame_table[gpfn])->arch.mm_perdomain_pt) |
   7.483                            __PAGE_HYPERVISOR);
   7.484      }
   7.485  #endif
   7.486  
   7.487 -    if ( m->shadow_mode != SHM_full_32 ) 
   7.488 +    if ( d->arch.shadow_mode != SHM_full_32 ) 
   7.489          unmap_domain_mem(spl2e);
   7.490  
   7.491      SH_VLOG("shadow_l2_table( %08lx -> %08lx)", gpfn, spfn);
   7.492 @@ -508,22 +508,23 @@ unsigned long shadow_l2_table(
   7.493  
   7.494  static void shadow_map_l1_into_current_l2(unsigned long va)
   7.495  { 
   7.496 -    struct mm_struct *m = &current->mm;
   7.497 +    struct exec_domain *ed = current;
   7.498 +    struct domain *d = ed->domain;
   7.499      unsigned long    *gpl1e, *spl1e, gpl2e, spl2e, gl1pfn, sl1pfn=0, sl1ss;
   7.500      struct pfn_info  *sl1pfn_info;
   7.501      int               i;
   7.502  
   7.503 -    __guest_get_pl2e(m, va, &gpl2e);
   7.504 +    __guest_get_pl2e(ed, va, &gpl2e);
   7.505  
   7.506      gl1pfn = gpl2e >> PAGE_SHIFT;
   7.507  
   7.508 -    sl1ss = __shadow_status(m, gl1pfn);
   7.509 +    sl1ss = __shadow_status(d, gl1pfn);
   7.510      if ( !(sl1ss & PSH_shadowed) )
   7.511      {
   7.512          /* This L1 is NOT already shadowed so we need to shadow it. */
   7.513          SH_VVLOG("4a: l1 not shadowed ( %08lx )", sl1pfn);
   7.514  
   7.515 -        sl1pfn_info = alloc_shadow_page(m);
   7.516 +        sl1pfn_info = alloc_shadow_page(d);
   7.517          sl1pfn_info->u.inuse.type_info = PGT_l1_page_table;
   7.518     
   7.519          sl1pfn = sl1pfn_info - frame_table;
   7.520 @@ -531,12 +532,12 @@ static void shadow_map_l1_into_current_l
   7.521          perfc_incrc(shadow_l1_table_count);
   7.522          perfc_incr(shadow_l1_pages);
   7.523  
   7.524 -        set_shadow_status(m, gl1pfn, PSH_shadowed | sl1pfn);
   7.525 +        set_shadow_status(d, gl1pfn, PSH_shadowed | sl1pfn);
   7.526  
   7.527 -        l2pde_general(m, &gpl2e, &spl2e, sl1pfn);
   7.528 +        l2pde_general(d, &gpl2e, &spl2e, sl1pfn);
   7.529  
   7.530 -        __guest_set_pl2e(m, va, gpl2e);
   7.531 -        __shadow_set_pl2e(m, va, spl2e);
   7.532 +        __guest_set_pl2e(ed, va, gpl2e);
   7.533 +        __shadow_set_pl2e(ed, va, spl2e);
   7.534  
   7.535          gpl1e = (unsigned long *) &(linear_pg_table[
   7.536              (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]);
   7.537 @@ -545,7 +546,7 @@ static void shadow_map_l1_into_current_l
   7.538              (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]);
   7.539  
   7.540          for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
   7.541 -            l1pte_propagate_from_guest(m, &gpl1e[i], &spl1e[i]);
   7.542 +            l1pte_propagate_from_guest(d, &gpl1e[i], &spl1e[i]);
   7.543      }
   7.544      else
   7.545      {
   7.546 @@ -553,20 +554,20 @@ static void shadow_map_l1_into_current_l
   7.547          SH_VVLOG("4b: was shadowed, l2 missing ( %08lx )", sl1pfn);
   7.548  
   7.549          sl1pfn = sl1ss & PSH_pfn_mask;
   7.550 -        l2pde_general(m, &gpl2e, &spl2e, sl1pfn);
   7.551 -        __guest_set_pl2e(m, va, gpl2e);
   7.552 -        __shadow_set_pl2e(m, va, spl2e);
   7.553 +        l2pde_general(d, &gpl2e, &spl2e, sl1pfn);
   7.554 +        __guest_set_pl2e(ed, va, gpl2e);
   7.555 +        __shadow_set_pl2e(ed, va, spl2e);
   7.556      }              
   7.557  }
   7.558  
   7.559  #ifdef CONFIG_VMX
   7.560 -void vmx_shadow_invlpg(struct mm_struct *m, unsigned long va)
   7.561 +void vmx_shadow_invlpg(struct domain *d, unsigned long va)
   7.562  {
   7.563      unsigned long gpte, spte, host_pfn;
   7.564  
   7.565      if (__put_user(0L, (unsigned long *)
   7.566                     &shadow_linear_pg_table[va >> PAGE_SHIFT])) {
   7.567 -        vmx_shadow_clear_state(m);
   7.568 +        vmx_shadow_clear_state(d);
   7.569          return;
   7.570      }
   7.571  
   7.572 @@ -588,11 +589,12 @@ void vmx_shadow_invlpg(struct mm_struct 
   7.573  int shadow_fault(unsigned long va, long error_code)
   7.574  {
   7.575      unsigned long gpte, spte;
   7.576 -    struct mm_struct *m = &current->mm;
   7.577 +    struct exec_domain *ed = current;
   7.578 +    struct domain *d = ed->domain;
   7.579  
   7.580      SH_VVLOG("shadow_fault( va=%08lx, code=%ld )", va, error_code );
   7.581  
   7.582 -    check_pagetable(m, current->mm.pagetable, "pre-sf");
   7.583 +    check_pagetable(d, ed->arch.pagetable, "pre-sf");
   7.584  
   7.585      /*
   7.586       * STEP 1. A fast-reject set of checks with no locking.
   7.587 @@ -621,20 +623,20 @@ int shadow_fault(unsigned long va, long 
   7.588       * STEP 2. Take the shadow lock and re-check the guest PTE.
   7.589       */
   7.590  
   7.591 -    shadow_lock(m);
   7.592 +    shadow_lock(d);
   7.593   
   7.594      if ( unlikely(__get_user(gpte, (unsigned long *)
   7.595                               &linear_pg_table[va >> PAGE_SHIFT])) )
   7.596      {
   7.597          SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
   7.598 -        shadow_unlock(m);
   7.599 +        shadow_unlock(d);
   7.600          return 0;
   7.601      }
   7.602  
   7.603      if ( unlikely(!(gpte & _PAGE_PRESENT)) )
   7.604      {
   7.605          SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
   7.606 -        shadow_unlock(m);
   7.607 +        shadow_unlock(d);
   7.608          return 0;
   7.609      }
   7.610  
   7.611 @@ -645,15 +647,15 @@ int shadow_fault(unsigned long va, long 
   7.612          {
   7.613              /* Write fault on a read-only mapping. */
   7.614              SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%lx)", gpte);
   7.615 -            shadow_unlock(m);
   7.616 +            shadow_unlock(d);
   7.617              return 0;
   7.618          }
   7.619  
   7.620 -        l1pte_write_fault(m, &gpte, &spte);
   7.621 +        l1pte_write_fault(d, &gpte, &spte);
   7.622      }
   7.623      else
   7.624      {
   7.625 -        l1pte_read_fault(m, &gpte, &spte);
   7.626 +        l1pte_read_fault(d, &gpte, &spte);
   7.627      }
   7.628  
   7.629      /*
   7.630 @@ -678,11 +680,11 @@ int shadow_fault(unsigned long va, long 
   7.631      }
   7.632  
   7.633      perfc_incrc(shadow_fixup_count);
   7.634 -    m->shadow_fault_count++;
   7.635 +    d->arch.shadow_fault_count++;
   7.636  
   7.637 -    shadow_unlock(m);
   7.638 +    shadow_unlock(d);
   7.639  
   7.640 -    check_pagetable(m, current->mm.pagetable, "post-sf");
   7.641 +    check_pagetable(d, ed->arch.pagetable, "post-sf");
   7.642      return EXCRET_fault_fixed;
   7.643  }
   7.644  
   7.645 @@ -700,7 +702,7 @@ void shadow_l1_normal_pt_update(
   7.646               "prev_spfn=%08lx, prev_spl1e=%p\n",
   7.647               pa, gpte, prev_spfn, prev_spl1e);
   7.648  
   7.649 -    spfn = __shadow_status(&current->mm, pa >> PAGE_SHIFT) & PSH_pfn_mask;
   7.650 +    spfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask;
   7.651  
   7.652      if ( spfn == prev_spfn )
   7.653      {
   7.654 @@ -715,7 +717,7 @@ void shadow_l1_normal_pt_update(
   7.655          *prev_spl1e_ptr = spl1e;
   7.656      }
   7.657  
   7.658 -    l1pte_propagate_from_guest(&current->mm, &gpte, &spte);
   7.659 +    l1pte_propagate_from_guest(current->domain, &gpte, &spte);
   7.660      spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = mk_l1_pgentry(spte);
   7.661  }
   7.662  
   7.663 @@ -728,13 +730,13 @@ void shadow_l2_normal_pt_update(unsigned
   7.664      /* N.B. To get here, we know the l2 page *must* be shadowed. */
   7.665      SH_VVLOG("shadow_l2_normal_pt_update pa=%08lx, gpte=%08lx",pa,gpte);
   7.666  
   7.667 -    spfn = __shadow_status(&current->mm, pa >> PAGE_SHIFT) & PSH_pfn_mask;
   7.668 +    spfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask;
   7.669  
   7.670      s_sh = (gpte & _PAGE_PRESENT) ?
   7.671 -        __shadow_status(&current->mm, gpte >> PAGE_SHIFT) : 0;
   7.672 +        __shadow_status(current->domain, gpte >> PAGE_SHIFT) : 0;
   7.673  
   7.674      /* XXXX Should mark guest pte as DIRTY and ACCESSED too! */
   7.675 -    l2pde_general(&current->mm, &gpte, &spte, s_sh);
   7.676 +    l2pde_general(current->domain, &gpte, &spte, s_sh);
   7.677      spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
   7.678      spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = mk_l2_pgentry(spte);
   7.679      unmap_domain_mem(spl2e);
   7.680 @@ -761,13 +763,11 @@ char * sh_check_name;
   7.681      } while ( 0 )
   7.682  
   7.683  static int check_pte(
   7.684 -    struct mm_struct *m, unsigned long gpte, unsigned long spte, 
   7.685 +    struct domain *d, unsigned long gpte, unsigned long spte, 
   7.686      int level, int i)
   7.687  {
   7.688      unsigned long mask, gpfn, spfn;
   7.689 -#ifdef CONFIG_VMX
   7.690      unsigned long guest_gpfn;
   7.691 -#endif
   7.692  
   7.693      if ( (spte == 0) || (spte == 0xdeadface) || (spte == 0x00000E00) )
   7.694          return 1;  /* always safe */
   7.695 @@ -811,18 +811,18 @@ static int check_pte(
   7.696          if ( level < 2 )
   7.697              FAIL("Shadow in L1 entry?");
   7.698  
   7.699 -        if (m->shadow_mode == SHM_full_32) {
   7.700 +        if (d->arch.shadow_mode == SHM_full_32) {
   7.701  
   7.702              guest_gpfn = phys_to_machine_mapping[gpfn];
   7.703  
   7.704 -            if ( __shadow_status(m, guest_gpfn) != (PSH_shadowed | spfn) )
   7.705 +            if ( __shadow_status(d, guest_gpfn) != (PSH_shadowed | spfn) )
   7.706                  FAIL("spfn problem g.sf=%08lx", 
   7.707 -                     __shadow_status(m, guest_gpfn) );
   7.708 +                     __shadow_status(d, guest_gpfn) );
   7.709              
   7.710          } else {
   7.711 -            if ( __shadow_status(m, gpfn) != (PSH_shadowed | spfn) )
   7.712 +            if ( __shadow_status(d, gpfn) != (PSH_shadowed | spfn) )
   7.713                  FAIL("spfn problem g.sf=%08lx", 
   7.714 -                     __shadow_status(m, gpfn) );
   7.715 +                     __shadow_status(d, gpfn) );
   7.716          }
   7.717  
   7.718      }
   7.719 @@ -832,7 +832,7 @@ static int check_pte(
   7.720  
   7.721  
   7.722  static int check_l1_table(
   7.723 -    struct mm_struct *m, unsigned long va, 
   7.724 +    struct domain *d, unsigned long va, 
   7.725      unsigned long g2, unsigned long s2)
   7.726  {
   7.727      int i;
   7.728 @@ -842,7 +842,7 @@ static int check_l1_table(
   7.729      spl1e = map_domain_mem(s2 << PAGE_SHIFT);
   7.730  
   7.731      for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
   7.732 -        check_pte(m, gpl1e[i], spl1e[i], 1, i);
   7.733 +        check_pte(d, gpl1e[i], spl1e[i], 1, i);
   7.734   
   7.735      unmap_domain_mem(spl1e);
   7.736      unmap_domain_mem(gpl1e);
   7.737 @@ -856,11 +856,11 @@ static int check_l1_table(
   7.738          BUG();                                                 \
   7.739      } while ( 0 )
   7.740  
   7.741 -int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s)
   7.742 +int check_pagetable(struct domain *d, pagetable_t pt, char *s)
   7.743  {
   7.744      unsigned long gptbase = pagetable_val(pt);
   7.745      unsigned long gpfn, spfn;
   7.746 -    int           i;
   7.747 +    unsigned long i;
   7.748      l2_pgentry_t *gpl2e, *spl2e;
   7.749      unsigned long host_gpfn = 0;
   7.750  
   7.751 @@ -872,22 +872,22 @@ int check_pagetable(struct mm_struct *m,
   7.752  
   7.753      gpfn = gptbase >> PAGE_SHIFT;
   7.754  
   7.755 -    __get_phys_to_machine(m, host_gpfn, gpfn);
   7.756 +    __get_phys_to_machine(d, host_gpfn, gpfn);
   7.757    
   7.758 -    if ( ! (__shadow_status(m, gpfn) & PSH_shadowed) )
   7.759 +    if ( ! (__shadow_status(d, gpfn) & PSH_shadowed) )
   7.760      {
   7.761          printk("%s-PT %08lx not shadowed\n", s, gptbase);
   7.762  
   7.763 -        if( __shadow_status(m, gpfn) != 0 ) BUG();
   7.764 +        if( __shadow_status(d, gpfn) != 0 ) BUG();
   7.765              return 0;
   7.766      }   
   7.767   
   7.768 -    spfn = __shadow_status(m, gpfn) & PSH_pfn_mask;
   7.769 +    spfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
   7.770  
   7.771 -    if ( ! __shadow_status(m, gpfn) == (PSH_shadowed | spfn) )
   7.772 +    if ( ! __shadow_status(d, gpfn) == (PSH_shadowed | spfn) )
   7.773              FAILPT("ptbase shadow inconsistent1");
   7.774  
   7.775 -    if (m->shadow_mode == SHM_full_32) 
   7.776 +    if (d->arch.shadow_mode == SHM_full_32) 
   7.777      {
   7.778          host_gpfn = phys_to_machine_mapping[gpfn];
   7.779          gpl2e = (l2_pgentry_t *) map_domain_mem( host_gpfn << PAGE_SHIFT );
   7.780 @@ -922,23 +922,23 @@ int check_pagetable(struct mm_struct *m,
   7.781                                     L2_PAGETABLE_SHIFT]),
   7.782                 (spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   7.783  
   7.784 -    if (m->shadow_mode != SHM_full_32) {
   7.785 +    if (d->arch.shadow_mode != SHM_full_32) {
   7.786          if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
   7.787 -              ((__pa(page_get_owner(&frame_table[gpfn])->mm.perdomain_pt) | 
   7.788 +              ((__pa(page_get_owner(&frame_table[gpfn])->arch.mm_perdomain_pt) | 
   7.789              __PAGE_HYPERVISOR))) )
   7.790              FAILPT("hypervisor per-domain map inconsistent");
   7.791      }
   7.792  
   7.793      /* Check the whole L2. */
   7.794      for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   7.795 -        check_pte(m, l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]), 2, i);
   7.796 +        check_pte(d, l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]), 2, i);
   7.797  
   7.798      /* Go back and recurse. */
   7.799      for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   7.800      {
   7.801          if ( l2_pgentry_val(spl2e[i]) != 0 )
   7.802              check_l1_table(
   7.803 -                m, i << L2_PAGETABLE_SHIFT,
   7.804 +                d, i << L2_PAGETABLE_SHIFT,
   7.805                  l2_pgentry_val(gpl2e[i]) >> PAGE_SHIFT, 
   7.806                  l2_pgentry_val(spl2e[i]) >> PAGE_SHIFT);
   7.807      }
     8.1 --- a/xen/arch/x86/smpboot.c	Sat Feb 05 15:42:48 2005 +0000
     8.2 +++ b/xen/arch/x86/smpboot.c	Sat Feb 05 18:20:15 2005 +0000
     8.3 @@ -1,3 +1,4 @@
     8.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     8.5  /*
     8.6   *	x86 SMP booting functions
     8.7   *
     8.8 @@ -662,7 +663,7 @@ static void __init do_boot_cpu (int apic
     8.9  
    8.10      set_bit(DF_IDLETASK, &idle->d_flags);
    8.11  
    8.12 -    ed->mm.pagetable = mk_pagetable(__pa(idle_pg_table));
    8.13 +    ed->arch.pagetable = mk_pagetable(__pa(idle_pg_table));
    8.14  
    8.15      map_cpu_to_boot_apicid(cpu, apicid);
    8.16  
     9.1 --- a/xen/arch/x86/traps.c	Sat Feb 05 15:42:48 2005 +0000
     9.2 +++ b/xen/arch/x86/traps.c	Sat Feb 05 18:20:15 2005 +0000
     9.3 @@ -1,3 +1,4 @@
     9.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     9.5  /******************************************************************************
     9.6   * arch/x86/traps.c
     9.7   * 
     9.8 @@ -139,7 +140,7 @@ static inline int do_trap(int trapnr, ch
     9.9                            int use_error_code)
    9.10  {
    9.11      struct exec_domain *ed = current;
    9.12 -    struct trap_bounce *tb = &ed->thread.trap_bounce;
    9.13 +    struct trap_bounce *tb = &ed->arch.trap_bounce;
    9.14      trap_info_t *ti;
    9.15      unsigned long fixup;
    9.16  
    9.17 @@ -148,7 +149,7 @@ static inline int do_trap(int trapnr, ch
    9.18      if ( !GUEST_FAULT(regs) )
    9.19          goto xen_fault;
    9.20  
    9.21 -    ti = current->thread.traps + trapnr;
    9.22 +    ti = current->arch.traps + trapnr;
    9.23      tb->flags = TBF_EXCEPTION;
    9.24      tb->cs    = ti->cs;
    9.25      tb->eip   = ti->address;
    9.26 @@ -206,7 +207,7 @@ DO_ERROR_NOCODE(19, "simd error", simd_c
    9.27  asmlinkage int do_int3(struct xen_regs *regs)
    9.28  {
    9.29      struct exec_domain *ed = current;
    9.30 -    struct trap_bounce *tb = &ed->thread.trap_bounce;
    9.31 +    struct trap_bounce *tb = &ed->arch.trap_bounce;
    9.32      trap_info_t *ti;
    9.33  
    9.34      DEBUGGER_trap_entry(TRAP_int3, regs);
    9.35 @@ -218,7 +219,7 @@ asmlinkage int do_int3(struct xen_regs *
    9.36          panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n", smp_processor_id());
    9.37      }
    9.38  
    9.39 -    ti = current->thread.traps + 3;
    9.40 +    ti = current->arch.traps + 3;
    9.41      tb->flags = TBF_EXCEPTION;
    9.42      tb->cs    = ti->cs;
    9.43      tb->eip   = ti->address;
    9.44 @@ -237,9 +238,9 @@ void propagate_page_fault(unsigned long 
    9.45  {
    9.46      trap_info_t *ti;
    9.47      struct exec_domain *ed = current;
    9.48 -    struct trap_bounce *tb = &ed->thread.trap_bounce;
    9.49 +    struct trap_bounce *tb = &ed->arch.trap_bounce;
    9.50  
    9.51 -    ti = ed->thread.traps + 14;
    9.52 +    ti = ed->arch.traps + 14;
    9.53      tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
    9.54      tb->cr2        = addr;
    9.55      tb->error_code = error_code;
    9.56 @@ -248,7 +249,7 @@ void propagate_page_fault(unsigned long 
    9.57      if ( TI_GET_IF(ti) )
    9.58          ed->vcpu_info->evtchn_upcall_mask = 1;
    9.59  
    9.60 -    ed->mm.guest_cr2 = addr;
    9.61 +    ed->arch.guest_cr2 = addr;
    9.62  }
    9.63  
    9.64  asmlinkage int do_page_fault(struct xen_regs *regs)
    9.65 @@ -282,7 +283,7 @@ asmlinkage int do_page_fault(struct xen_
    9.66               ((regs->error_code & 3) == 3) && /* write-protection fault */
    9.67               ptwr_do_page_fault(addr) )
    9.68          {
    9.69 -            if ( unlikely(ed->mm.shadow_mode) )
    9.70 +            if ( unlikely(d->arch.shadow_mode) )
    9.71                  (void)shadow_fault(addr, regs->error_code);
    9.72              UNLOCK_BIGLOCK(d);
    9.73              return EXCRET_fault_fixed;
    9.74 @@ -290,12 +291,12 @@ asmlinkage int do_page_fault(struct xen_
    9.75          UNLOCK_BIGLOCK(d);
    9.76      }
    9.77  
    9.78 -    if ( unlikely(ed->mm.shadow_mode) && 
    9.79 +    if ( unlikely(d->arch.shadow_mode) && 
    9.80           (addr < PAGE_OFFSET) && shadow_fault(addr, regs->error_code) )
    9.81          return EXCRET_fault_fixed;
    9.82  
    9.83      if ( unlikely(addr >= LDT_VIRT_START(ed)) && 
    9.84 -         (addr < (LDT_VIRT_START(ed) + (ed->mm.ldt_ents*LDT_ENTRY_SIZE))) )
    9.85 +         (addr < (LDT_VIRT_START(ed) + (ed->arch.ldt_ents*LDT_ENTRY_SIZE))) )
    9.86      {
    9.87          /*
    9.88           * Copy a mapping from the guest's LDT, if it is valid. Otherwise we
    9.89 @@ -303,7 +304,7 @@ asmlinkage int do_page_fault(struct xen_
    9.90           */
    9.91          LOCK_BIGLOCK(d);
    9.92          off  = addr - LDT_VIRT_START(ed);
    9.93 -        addr = ed->mm.ldt_base + off;
    9.94 +        addr = ed->arch.ldt_base + off;
    9.95          ret = map_ldt_shadow_page(off >> PAGE_SHIFT);
    9.96          UNLOCK_BIGLOCK(d);
    9.97          if ( likely(ret) )
    9.98 @@ -321,7 +322,7 @@ asmlinkage int do_page_fault(struct xen_
    9.99      if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
   9.100      {
   9.101          perfc_incrc(copy_user_faults);
   9.102 -        if ( !ed->mm.shadow_mode )
   9.103 +        if ( !d->arch.shadow_mode )
   9.104              DPRINTK("Page fault: %p -> %p\n", regs->eip, fixup);
   9.105          regs->eip = fixup;
   9.106          return 0;
   9.107 @@ -388,11 +389,11 @@ static int emulate_privileged_op(struct 
   9.108              break;
   9.109  
   9.110          case 2: /* Read CR2 */
   9.111 -            *reg = ed->mm.guest_cr2;
   9.112 +            *reg = ed->arch.guest_cr2;
   9.113              break;
   9.114              
   9.115          case 3: /* Read CR3 */
   9.116 -            *reg = pagetable_val(ed->mm.pagetable);
   9.117 +            *reg = pagetable_val(ed->arch.pagetable);
   9.118              break;
   9.119  
   9.120          default:
   9.121 @@ -415,7 +416,7 @@ static int emulate_privileged_op(struct 
   9.122              break;
   9.123  
   9.124          case 2: /* Write CR2 */
   9.125 -            ed->mm.guest_cr2 = *reg;
   9.126 +            ed->arch.guest_cr2 = *reg;
   9.127              break;
   9.128              
   9.129          case 3: /* Write CR3 */
   9.130 @@ -465,7 +466,7 @@ static int emulate_privileged_op(struct 
   9.131  asmlinkage int do_general_protection(struct xen_regs *regs)
   9.132  {
   9.133      struct exec_domain *ed = current;
   9.134 -    struct trap_bounce *tb = &ed->thread.trap_bounce;
   9.135 +    struct trap_bounce *tb = &ed->arch.trap_bounce;
   9.136      trap_info_t *ti;
   9.137      unsigned long fixup;
   9.138  
   9.139 @@ -500,7 +501,7 @@ asmlinkage int do_general_protection(str
   9.140      if ( (regs->error_code & 3) == 2 )
   9.141      {
   9.142          /* This fault must be due to <INT n> instruction. */
   9.143 -        ti = current->thread.traps + (regs->error_code>>3);
   9.144 +        ti = current->arch.traps + (regs->error_code>>3);
   9.145          if ( TI_GET_DPL(ti) >= (VM86_MODE(regs) ? 3 : (regs->cs & 3)) )
   9.146          {
   9.147              tb->flags = TBF_EXCEPTION;
   9.148 @@ -523,7 +524,7 @@ asmlinkage int do_general_protection(str
   9.149  #endif
   9.150  
   9.151      /* Pass on GPF as is. */
   9.152 -    ti = current->thread.traps + 13;
   9.153 +    ti = current->arch.traps + 13;
   9.154      tb->flags      = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
   9.155      tb->error_code = regs->error_code;
   9.156   finish_propagation:
   9.157 @@ -615,10 +616,10 @@ asmlinkage int math_state_restore(struct
   9.158  
   9.159      if ( test_and_clear_bit(EDF_GUEST_STTS, &current->ed_flags) )
   9.160      {
   9.161 -        struct trap_bounce *tb = &current->thread.trap_bounce;
   9.162 +        struct trap_bounce *tb = &current->arch.trap_bounce;
   9.163          tb->flags      = TBF_EXCEPTION;
   9.164 -        tb->cs         = current->thread.traps[7].cs;
   9.165 -        tb->eip        = current->thread.traps[7].address;
   9.166 +        tb->cs         = current->arch.traps[7].cs;
   9.167 +        tb->eip        = current->arch.traps[7].address;
   9.168      }
   9.169  
   9.170      return EXCRET_fault_fixed;
   9.171 @@ -628,7 +629,7 @@ asmlinkage int do_debug(struct xen_regs 
   9.172  {
   9.173      unsigned long condition;
   9.174      struct exec_domain *d = current;
   9.175 -    struct trap_bounce *tb = &d->thread.trap_bounce;
   9.176 +    struct trap_bounce *tb = &d->arch.trap_bounce;
   9.177  
   9.178      DEBUGGER_trap_entry(TRAP_debug, regs);
   9.179  
   9.180 @@ -636,7 +637,7 @@ asmlinkage int do_debug(struct xen_regs 
   9.181  
   9.182      /* Mask out spurious debug traps due to lazy DR7 setting */
   9.183      if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
   9.184 -         (d->thread.debugreg[7] == 0) )
   9.185 +         (d->arch.debugreg[7] == 0) )
   9.186      {
   9.187          __asm__("mov %0,%%db7" : : "r" (0UL));
   9.188          goto out;
   9.189 @@ -656,11 +657,11 @@ asmlinkage int do_debug(struct xen_regs 
   9.190      }
   9.191  
   9.192      /* Save debug status register where guest OS can peek at it */
   9.193 -    d->thread.debugreg[6] = condition;
   9.194 +    d->arch.debugreg[6] = condition;
   9.195  
   9.196      tb->flags = TBF_EXCEPTION;
   9.197 -    tb->cs    = d->thread.traps[1].cs;
   9.198 -    tb->eip   = d->thread.traps[1].address;
   9.199 +    tb->cs    = d->arch.traps[1].cs;
   9.200 +    tb->eip   = d->arch.traps[1].address;
   9.201  
   9.202   out:
   9.203      return EXCRET_not_a_fault;
   9.204 @@ -759,7 +760,7 @@ void __init trap_init(void)
   9.205  long do_set_trap_table(trap_info_t *traps)
   9.206  {
   9.207      trap_info_t cur;
   9.208 -    trap_info_t *dst = current->thread.traps;
   9.209 +    trap_info_t *dst = current->arch.traps;
   9.210  
   9.211      LOCK_BIGLOCK(current->domain);
   9.212  
   9.213 @@ -798,10 +799,10 @@ long do_set_callbacks(unsigned long even
   9.214      if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
   9.215          return -EPERM;
   9.216  
   9.217 -    d->thread.event_selector    = event_selector;
   9.218 -    d->thread.event_address     = event_address;
   9.219 -    d->thread.failsafe_selector = failsafe_selector;
   9.220 -    d->thread.failsafe_address  = failsafe_address;
   9.221 +    d->arch.event_selector    = event_selector;
   9.222 +    d->arch.event_address     = event_address;
   9.223 +    d->arch.failsafe_selector = failsafe_selector;
   9.224 +    d->arch.failsafe_address  = failsafe_address;
   9.225  
   9.226      return 0;
   9.227  }
   9.228 @@ -876,7 +877,7 @@ long set_debugreg(struct exec_domain *p,
   9.229          return -EINVAL;
   9.230      }
   9.231  
   9.232 -    p->thread.debugreg[reg] = value;
   9.233 +    p->arch.debugreg[reg] = value;
   9.234      return 0;
   9.235  }
   9.236  
   9.237 @@ -888,5 +889,5 @@ long do_set_debugreg(int reg, unsigned l
   9.238  unsigned long do_get_debugreg(int reg)
   9.239  {
   9.240      if ( (reg < 0) || (reg > 7) ) return -EINVAL;
   9.241 -    return current->thread.debugreg[reg];
   9.242 +    return current->arch.debugreg[reg];
   9.243  }
    10.1 --- a/xen/arch/x86/vmx.c	Sat Feb 05 15:42:48 2005 +0000
    10.2 +++ b/xen/arch/x86/vmx.c	Sat Feb 05 18:20:15 2005 +0000
    10.3 @@ -1,3 +1,4 @@
    10.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    10.5  /*
    10.6   * vmx.c: handling VMX architecture-related VM exits
    10.7   * Copyright (c) 2004, Intel Corporation.
    10.8 @@ -110,7 +111,6 @@ static int vmx_do_page_fault(unsigned lo
    10.9      unsigned long gpde = 0, gpte, gpa;
   10.10      int result;
   10.11      struct exec_domain *ed = current;
   10.12 -    struct mm_struct *m = &ed->mm;
   10.13  
   10.14  #if VMX_DEBUG
   10.15      {
   10.16 @@ -123,18 +123,18 @@ static int vmx_do_page_fault(unsigned lo
   10.17      /*
   10.18       * Set up guest page directory cache to make linear_pt_table[] work.
   10.19       */
   10.20 -    __guest_get_pl2e(m, va, &gpde);
   10.21 +    __guest_get_pl2e(ed, va, &gpde);
   10.22      if (!(gpde & _PAGE_PRESENT))
   10.23          return 0;
   10.24  
   10.25      index = (va >> L2_PAGETABLE_SHIFT);
   10.26 -    if (!l2_pgentry_val(m->guest_pl2e_cache[index])) {
   10.27 +    if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) {
   10.28          pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT];
   10.29  
   10.30          VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_do_page_fault: pagetable = %lx\n",
   10.31 -                pagetable_val(m->pagetable));
   10.32 +                pagetable_val(ed->arch.pagetable));
   10.33  
   10.34 -        m->guest_pl2e_cache[index] = 
   10.35 +        ed->arch.guest_pl2e_cache[index] = 
   10.36              mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   10.37      }
   10.38      
   10.39 @@ -246,18 +246,18 @@ static void vmx_dr_access (unsigned long
   10.40      case TYPE_MOV_TO_DR: 
   10.41          /* don't need to check the range */
   10.42          if (reg != REG_ESP)
   10.43 -            ed->thread.debugreg[reg] = *reg_p; 
   10.44 +            ed->arch.debugreg[reg] = *reg_p; 
   10.45          else {
   10.46              unsigned long value;
   10.47              __vmread(GUEST_ESP, &value);
   10.48 -            ed->thread.debugreg[reg] = value;
   10.49 +            ed->arch.debugreg[reg] = value;
   10.50          }
   10.51          break;
   10.52      case TYPE_MOV_FROM_DR:
   10.53          if (reg != REG_ESP)
   10.54 -            *reg_p = ed->thread.debugreg[reg];
   10.55 +            *reg_p = ed->arch.debugreg[reg];
   10.56          else {
   10.57 -            __vmwrite(GUEST_ESP, ed->thread.debugreg[reg]);
   10.58 +            __vmwrite(GUEST_ESP, ed->arch.debugreg[reg]);
   10.59          }
   10.60          break;
   10.61      }
   10.62 @@ -270,7 +270,7 @@ static void vmx_dr_access (unsigned long
   10.63  static void vmx_vmexit_do_invlpg(unsigned long va) 
   10.64  {
   10.65      unsigned long eip;
   10.66 -    struct exec_domain *d = current;
   10.67 +    struct exec_domain *ed = current;
   10.68      unsigned int index;
   10.69  
   10.70      __vmread(GUEST_EIP, &eip);
   10.71 @@ -282,31 +282,31 @@ static void vmx_vmexit_do_invlpg(unsigne
   10.72       * We do the safest things first, then try to update the shadow
   10.73       * copying from guest
   10.74       */
   10.75 -    vmx_shadow_invlpg(&d->mm, va);
   10.76 +    vmx_shadow_invlpg(ed->domain, va);
   10.77      index = (va >> L2_PAGETABLE_SHIFT);
   10.78 -    d->mm.guest_pl2e_cache[index] = mk_l2_pgentry(0); /* invalidate pgd cache */
   10.79 +    ed->arch.guest_pl2e_cache[index] = 
   10.80 +        mk_l2_pgentry(0); /* invalidate pgd cache */
   10.81  }
   10.82  
   10.83 -static inline void guest_pl2e_cache_invalidate(struct mm_struct *m) 
   10.84 +static inline void guest_pl2e_cache_invalidate(struct exec_domain *ed)
   10.85  {
   10.86      /*
   10.87       * Need to optimize this
   10.88       */
   10.89 -    memset(m->guest_pl2e_cache, 0, PAGE_SIZE);
   10.90 +    memset(ed->arch.guest_pl2e_cache, 0, PAGE_SIZE);
   10.91  }
   10.92  
   10.93  inline unsigned long gva_to_gpa(unsigned long gva)
   10.94  {
   10.95      unsigned long gpde, gpte, pfn, index;
   10.96 -    struct exec_domain *d = current;
   10.97 -    struct mm_struct *m = &d->mm;
   10.98 +    struct exec_domain *ed = current;
   10.99  
  10.100 -    __guest_get_pl2e(m, gva, &gpde);
  10.101 +    __guest_get_pl2e(ed, gva, &gpde);
  10.102      index = (gva >> L2_PAGETABLE_SHIFT);
  10.103  
  10.104      pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT];
  10.105  
  10.106 -    m->guest_pl2e_cache[index] = 
  10.107 +    ed->arch.guest_pl2e_cache[index] = 
  10.108              mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
  10.109  
  10.110      if ( unlikely(__get_user(gpte, (unsigned long *)
  10.111 @@ -350,14 +350,14 @@ static void vmx_io_instruction(struct xe
  10.112          return;
  10.113      }
  10.114  
  10.115 -    vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va;
  10.116 +    vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
  10.117      if (vio == 0) {
  10.118          VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx\n", (unsigned long) vio);
  10.119          domain_crash(); 
  10.120      }
  10.121      p = &vio->vp_ioreq;
  10.122      p->dir = test_bit(3, &exit_qualification);  
  10.123 -    set_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags);
  10.124 +    set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
  10.125  
  10.126      p->pdata_valid = 0;
  10.127      p->count = 1;
  10.128 @@ -443,40 +443,40 @@ static void mov_to_cr(int gp, int cr, st
  10.129          __vmwrite(CR0_READ_SHADOW, value);
  10.130  
  10.131          if (value & (X86_CR0_PE | X86_CR0_PG) &&
  10.132 -            !test_bit(VMX_CPU_STATE_PG_ENABLED, &d->thread.arch_vmx.cpu_state)) {
  10.133 +            !test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
  10.134              /*
  10.135               * Enable paging
  10.136               */
  10.137 -            set_bit(VMX_CPU_STATE_PG_ENABLED, &d->thread.arch_vmx.cpu_state);
  10.138 +            set_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state);
  10.139              /*
  10.140               * The guest CR3 must be pointing to the guest physical.
  10.141               */
  10.142              if (!(pfn = phys_to_machine_mapping[
  10.143 -                      d->thread.arch_vmx.cpu_cr3 >> PAGE_SHIFT])) 
  10.144 +                      d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT])) 
  10.145              {
  10.146                  VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx\n", 
  10.147 -                        d->thread.arch_vmx.cpu_cr3);
  10.148 +                        d->arch.arch_vmx.cpu_cr3);
  10.149                  domain_crash(); /* need to take a clean path */
  10.150              }
  10.151 -            old_base_pfn = pagetable_val(d->mm.pagetable) >> PAGE_SHIFT;
  10.152 +            old_base_pfn = pagetable_val(d->arch.pagetable) >> PAGE_SHIFT;
  10.153              /*
  10.154               * Now mm.pagetable points to machine physical.
  10.155               */
  10.156 -            d->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
  10.157 +            d->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
  10.158  
  10.159              VMX_DBG_LOG(DBG_LEVEL_VMMU, "New mm.pagetable = %lx\n", 
  10.160                      (unsigned long) (pfn << PAGE_SHIFT));
  10.161  
  10.162 -            shadow_lock(&d->mm);
  10.163 +            shadow_lock(d->domain);
  10.164              shadow_mode_enable(d->domain, SHM_full_32); 
  10.165 -            shadow_unlock(&d->mm);
  10.166 +            shadow_unlock(d->domain);
  10.167  
  10.168 -            __vmwrite(GUEST_CR3, pagetable_val(d->mm.shadow_table));
  10.169 +            __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
  10.170              /* 
  10.171               * mm->shadow_table should hold the next CR3 for shadow
  10.172               */
  10.173              VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, pfn = %lx\n", 
  10.174 -                    d->thread.arch_vmx.cpu_cr3, pfn);
  10.175 +                    d->arch.arch_vmx.cpu_cr3, pfn);
  10.176              put_page_and_type(&frame_table[old_base_pfn]);
  10.177  
  10.178          }
  10.179 @@ -489,26 +489,26 @@ static void mov_to_cr(int gp, int cr, st
  10.180          /*
  10.181           * If paging is not enabled yet, simply copy the valut to CR3.
  10.182           */
  10.183 -        if (!test_bit(VMX_CPU_STATE_PG_ENABLED, &d->thread.arch_vmx.cpu_state)) {
  10.184 -            d->thread.arch_vmx.cpu_cr3 = value;
  10.185 +        if (!test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
  10.186 +            d->arch.arch_vmx.cpu_cr3 = value;
  10.187              return;
  10.188          }
  10.189          
  10.190 -        guest_pl2e_cache_invalidate(&d->mm);
  10.191 +        guest_pl2e_cache_invalidate(d);
  10.192          /*
  10.193           * We make a new one if the shadow does not exist.
  10.194           */
  10.195 -        if (value == d->thread.arch_vmx.cpu_cr3) {
  10.196 +        if (value == d->arch.arch_vmx.cpu_cr3) {
  10.197              /* 
  10.198               * This is simple TLB flush, implying the guest has 
  10.199               * removed some translation or changed page attributes.
  10.200               * We simply invalidate the shadow.
  10.201               */
  10.202              pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
  10.203 -            if ((pfn << PAGE_SHIFT) != pagetable_val(d->mm.pagetable))
  10.204 +            if ((pfn << PAGE_SHIFT) != pagetable_val(d->arch.pagetable))
  10.205                  __vmx_bug(regs);
  10.206 -            vmx_shadow_clear_state(&d->mm);
  10.207 -            shadow_invalidate(&d->mm);
  10.208 +            vmx_shadow_clear_state(d->domain);
  10.209 +            shadow_invalidate(d);
  10.210          } else {
  10.211              /*
  10.212               * If different, make a shadow. Check if the PDBR is valid
  10.213 @@ -522,16 +522,16 @@ static void mov_to_cr(int gp, int cr, st
  10.214                  domain_crash(); /* need to take a clean path */
  10.215              }
  10.216              pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
  10.217 -            vmx_shadow_clear_state(&d->mm);
  10.218 -            d->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
  10.219 -            shadow_mk_pagetable(&d->mm);
  10.220 +            vmx_shadow_clear_state(d->domain);
  10.221 +            d->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
  10.222 +            shadow_mk_pagetable(d);
  10.223              /* 
  10.224               * mm->shadow_table should hold the next CR3 for shadow
  10.225               */
  10.226 -            d->thread.arch_vmx.cpu_cr3 = value;
  10.227 +            d->arch.arch_vmx.cpu_cr3 = value;
  10.228              VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx\n", 
  10.229                      value);
  10.230 -            __vmwrite(GUEST_CR3, pagetable_val(d->mm.shadow_table));
  10.231 +            __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
  10.232          }
  10.233          break;
  10.234      }
  10.235 @@ -549,9 +549,9 @@ static void mov_to_cr(int gp, int cr, st
  10.236           * all TLB entries except global entries.
  10.237           */
  10.238          if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
  10.239 -            vmx_shadow_clear_state(&d->mm);
  10.240 -            shadow_invalidate(&d->mm);
  10.241 -            guest_pl2e_cache_invalidate(&d->mm);
  10.242 +            vmx_shadow_clear_state(d->domain);
  10.243 +            shadow_invalidate(d);
  10.244 +            guest_pl2e_cache_invalidate(d);
  10.245          }
  10.246          break;
  10.247      default:
  10.248 @@ -576,7 +576,7 @@ static void mov_from_cr(int cr, int gp, 
  10.249      if (cr != 3)
  10.250          __vmx_bug(regs);
  10.251  
  10.252 -    value = (unsigned long) d->thread.arch_vmx.cpu_cr3;
  10.253 +    value = (unsigned long) d->arch.arch_vmx.cpu_cr3;
  10.254      ASSERT(value);
  10.255  
  10.256      switch (gp) {
  10.257 @@ -799,7 +799,7 @@ asmlinkage void vmx_vmexit_handler(struc
  10.258                      "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx\n",
  10.259                          regs.eax, regs.ebx, regs.ecx, regs.edx, regs.esi,
  10.260                          regs.edi);
  10.261 -            d->thread.arch_vmx.vmx_platform.mpci.inst_decoder_regs = &regs;
  10.262 +            d->arch.arch_vmx.vmx_platform.mpci.inst_decoder_regs = &regs;
  10.263  
  10.264              if (!(error = vmx_do_page_fault(va, error_code))) {
  10.265                  /*
  10.266 @@ -813,7 +813,7 @@ asmlinkage void vmx_vmexit_handler(struc
  10.267                             VECTOR_PG);
  10.268                  __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
  10.269                  __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
  10.270 -                d->thread.arch_vmx.cpu_cr2 = va;
  10.271 +                d->arch.arch_vmx.cpu_cr2 = va;
  10.272              }
  10.273              break;
  10.274          }
  10.275 @@ -935,5 +935,5 @@ asmlinkage void load_cr2(void)
  10.276      struct exec_domain *d = current;
  10.277  
  10.278      local_irq_disable();        
  10.279 -    asm volatile("movl %0,%%cr2": :"r" (d->thread.arch_vmx.cpu_cr2));
  10.280 +    asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
  10.281  }
    11.1 --- a/xen/arch/x86/vmx_io.c	Sat Feb 05 15:42:48 2005 +0000
    11.2 +++ b/xen/arch/x86/vmx_io.c	Sat Feb 05 18:20:15 2005 +0000
    11.3 @@ -1,3 +1,4 @@
    11.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    11.5  /*
    11.6   * vmx_io.c: handling I/O, interrupts related VMX entry/exit 
    11.7   * Copyright (c) 2004, Intel Corporation.
    11.8 @@ -178,7 +179,7 @@ void vmx_io_assist(struct exec_domain *e
    11.9      struct mi_per_cpu_info *mpci_p;
   11.10      struct xen_regs *inst_decoder_regs;
   11.11  
   11.12 -    mpci_p = &ed->thread.arch_vmx.vmx_platform.mpci;
   11.13 +    mpci_p = &ed->arch.arch_vmx.vmx_platform.mpci;
   11.14      inst_decoder_regs = mpci_p->inst_decoder_regs;
   11.15  
   11.16      /* clear the pending event */
   11.17 @@ -187,7 +188,7 @@ void vmx_io_assist(struct exec_domain *e
   11.18      clear_bit(IOPACKET_PORT>>5, &ed->vcpu_info->evtchn_pending_sel);
   11.19      clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_pending[0]);
   11.20  
   11.21 -    vio = (vcpu_iodata_t *) ed->thread.arch_vmx.vmx_platform.shared_page_va;
   11.22 +    vio = (vcpu_iodata_t *) ed->arch.arch_vmx.vmx_platform.shared_page_va;
   11.23      if (vio == 0) {
   11.24          VMX_DBG_LOG(DBG_LEVEL_1, 
   11.25                      "bad shared page: %lx\n", (unsigned long) vio);
   11.26 @@ -195,14 +196,14 @@ void vmx_io_assist(struct exec_domain *e
   11.27      }
   11.28      p = &vio->vp_ioreq;
   11.29      /* clear IO wait VMX flag */
   11.30 -    if (test_bit(ARCH_VMX_IO_WAIT, &ed->thread.arch_vmx.flags)) {
   11.31 +    if (test_bit(ARCH_VMX_IO_WAIT, &ed->arch.arch_vmx.flags)) {
   11.32          if (p->state != STATE_IORESP_READY) {
   11.33              printk("got a false I/O reponse\n");
   11.34              do_block();
   11.35          } else {
   11.36              p->state = STATE_INVALID;
   11.37          }
   11.38 -        clear_bit(ARCH_VMX_IO_WAIT, &ed->thread.arch_vmx.flags);
   11.39 +        clear_bit(ARCH_VMX_IO_WAIT, &ed->arch.arch_vmx.flags);
   11.40      } else {
   11.41          return;
   11.42      }
   11.43 @@ -218,10 +219,10 @@ void vmx_io_assist(struct exec_domain *e
   11.44              }
   11.45              int size = -1, index = -1;
   11.46  
   11.47 -            size = operand_size(ed->thread.arch_vmx.vmx_platform.mpci.mmio_target);
   11.48 -            index = operand_index(ed->thread.arch_vmx.vmx_platform.mpci.mmio_target);
   11.49 +            size = operand_size(ed->arch.arch_vmx.vmx_platform.mpci.mmio_target);
   11.50 +            index = operand_index(ed->arch.arch_vmx.vmx_platform.mpci.mmio_target);
   11.51  
   11.52 -            if (ed->thread.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
   11.53 +            if (ed->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
   11.54                  p->u.data = p->u.data & 0xffff;
   11.55              }        
   11.56              set_reg_value(size, index, 0, (struct xen_regs *)ec, p->u.data);
   11.57 @@ -301,7 +302,7 @@ static inline int find_highest_pending_i
   11.58  {
   11.59      vcpu_iodata_t *vio;
   11.60  
   11.61 -    vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va;
   11.62 +    vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
   11.63      if (vio == 0) {
   11.64          VMX_DBG_LOG(DBG_LEVEL_1, 
   11.65                      "bad shared page: %lx\n", (unsigned long) vio);
   11.66 @@ -315,7 +316,7 @@ static inline void clear_highest_bit(str
   11.67  {
   11.68      vcpu_iodata_t *vio;
   11.69  
   11.70 -    vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va;
   11.71 +    vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
   11.72      if (vio == 0) {
   11.73          VMX_DBG_LOG(DBG_LEVEL_1, 
   11.74                      "bad shared page: %lx\n", (unsigned long) vio);
   11.75 @@ -363,15 +364,15 @@ void vmx_intr_assist(struct exec_domain 
   11.76  
   11.77  void vmx_do_resume(struct exec_domain *d) 
   11.78  {
   11.79 -    __vmwrite(HOST_CR3, pagetable_val(d->mm.monitor_table));
   11.80 -    __vmwrite(GUEST_CR3, pagetable_val(d->mm.shadow_table));
   11.81 +    __vmwrite(HOST_CR3, pagetable_val(d->arch.monitor_table));
   11.82 +    __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
   11.83      __vmwrite(HOST_ESP, (unsigned long) get_stack_top());
   11.84  
   11.85      if (event_pending(d)) {
   11.86          if (test_bit(IOPACKET_PORT, &d->domain->shared_info->evtchn_pending[0])) 
   11.87              vmx_io_assist(d);
   11.88  
   11.89 -        else if (test_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags)) {
   11.90 +        else if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) {
   11.91              printk("got an event while blocked on I/O\n");
   11.92              do_block();
   11.93          }
   11.94 @@ -382,6 +383,6 @@ void vmx_do_resume(struct exec_domain *d
   11.95           * a response to ioreq_t is not ok.
   11.96           */
   11.97      }
   11.98 -    if (!test_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags))
   11.99 +    if (!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags))
  11.100          vmx_intr_assist(d);
  11.101  }
    12.1 --- a/xen/arch/x86/vmx_platform.c	Sat Feb 05 15:42:48 2005 +0000
    12.2 +++ b/xen/arch/x86/vmx_platform.c	Sat Feb 05 18:20:15 2005 +0000
    12.3 @@ -1,3 +1,4 @@
    12.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    12.5  /*
    12.6   * vmx_platform.c: handling x86 platform related MMIO instructions
    12.7   * Copyright (c) 2004, Intel Corporation.
    12.8 @@ -420,9 +421,9 @@ static void send_mmio_req(unsigned long 
    12.9      extern long evtchn_send(int lport);
   12.10      extern long do_block(void);
   12.11  
   12.12 -    mpci_p = &current->thread.arch_vmx.vmx_platform.mpci;
   12.13 +    mpci_p = &current->arch.arch_vmx.vmx_platform.mpci;
   12.14      inst_decoder_regs = mpci_p->inst_decoder_regs;
   12.15 -    vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va;
   12.16 +    vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
   12.17          
   12.18      if (vio == NULL) {
   12.19          printk("bad shared page\n");
   12.20 @@ -430,7 +431,7 @@ static void send_mmio_req(unsigned long 
   12.21      }
   12.22      p = &vio->vp_ioreq;
   12.23          
   12.24 -    set_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags);
   12.25 +    set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
   12.26      p->dir = dir;
   12.27      p->pdata_valid = pvalid;
   12.28      p->count = 1;
   12.29 @@ -470,7 +471,7 @@ void handle_mmio(unsigned long va, unsig
   12.30      unsigned char inst[MAX_INST_LEN];
   12.31      int ret;
   12.32       
   12.33 -    mpci_p = &current->thread.arch_vmx.vmx_platform.mpci;
   12.34 +    mpci_p = &current->arch.arch_vmx.vmx_platform.mpci;
   12.35      inst_decoder_regs = mpci_p->inst_decoder_regs;
   12.36  
   12.37      __vmread(GUEST_EIP, &eip);
    13.1 --- a/xen/arch/x86/vmx_vmcs.c	Sat Feb 05 15:42:48 2005 +0000
    13.2 +++ b/xen/arch/x86/vmx_vmcs.c	Sat Feb 05 18:20:15 2005 +0000
    13.3 @@ -1,3 +1,4 @@
    13.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    13.5  /*
    13.6   * vmx_vmcs.c: VMCS management
    13.7   * Copyright (c) 2004, Intel Corporation.
    13.8 @@ -137,7 +138,7 @@ int vmx_setup_platform(struct exec_domai
    13.9  
   13.10      mpfn = phys_to_machine_mapping[gpfn];
   13.11      p = map_domain_mem(mpfn << PAGE_SHIFT);
   13.12 -    d->thread.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p;
   13.13 +    d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p;
   13.14  
   13.15      return 0;
   13.16  }
   13.17 @@ -159,7 +160,7 @@ static int add_mapping_perdomain(struct 
   13.18      if (gpfn > ENTRIES_PER_L2_PAGETABLE * ENTRIES_PER_L1_PAGETABLE)
   13.19          return -1;
   13.20  
   13.21 -    if (!(l1_pgentry_val(d->domain->mm_perdomain_pt[
   13.22 +    if (!(l1_pgentry_val(d->domain->arch.mm_perdomain_pt[
   13.23              gpfn >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)]) & _PAGE_PRESENT))
   13.24      {
   13.25          page = (struct pfn_info *) alloc_domheap_page(NULL);
   13.26 @@ -168,7 +169,7 @@ static int add_mapping_perdomain(struct 
   13.27          }
   13.28  
   13.29          pfn = (unsigned long) (page - frame_table);
   13.30 -        d->domain->mm_perdomain_pt[gpfn >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)] = 
   13.31 +        d->domain->arch.mm_perdomain_pt[gpfn >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)] = 
   13.32              mk_l1_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   13.33      }
   13.34      phys_to_machine_mapping[gpfn] = mpfn;
   13.35 @@ -190,18 +191,18 @@ void vmx_do_launch(struct exec_domain *e
   13.36      struct domain *d = ed->domain;
   13.37  
   13.38      cpu =  smp_processor_id();
   13.39 -    ed->mm.min_pfn = ed->mm.max_pfn = 0;
   13.40 +    d->arch.min_pfn = d->arch.max_pfn = 0;
   13.41  
   13.42      spin_lock(&d->page_alloc_lock);
   13.43      list_ent = d->page_list.next;
   13.44  
   13.45 -    mpl2e = (l2_pgentry_t *)map_domain_mem(pagetable_val(ed->mm.monitor_table));
   13.46 +    mpl2e = (l2_pgentry_t *)map_domain_mem(pagetable_val(ed->arch.monitor_table));
   13.47  
   13.48      for ( i = 0; list_ent != &d->page_list; i++ )
   13.49      {
   13.50          pfn = list_entry(list_ent, struct pfn_info, list) - frame_table;
   13.51 -        ed->mm.min_pfn = min(ed->mm.min_pfn, pfn);
   13.52 -        ed->mm.max_pfn = max(ed->mm.max_pfn, pfn);
   13.53 +        d->arch.min_pfn = min(d->arch.min_pfn, pfn);
   13.54 +        d->arch.max_pfn = max(d->arch.max_pfn, pfn);
   13.55          list_ent = frame_table[pfn].list.next;
   13.56          add_mapping_perdomain(ed, i, pfn);
   13.57      }
   13.58 @@ -219,7 +220,7 @@ void vmx_do_launch(struct exec_domain *e
   13.59  
   13.60      guest_pl2e_cache = map_domain_mem(pfn << PAGE_SHIFT);
   13.61      memset(guest_pl2e_cache, 0, PAGE_SIZE); /* clean it up */
   13.62 -    ed->mm.guest_pl2e_cache = guest_pl2e_cache; 
   13.63 +    ed->arch.guest_pl2e_cache = guest_pl2e_cache; 
   13.64          
   13.65      unmap_domain_mem(mpl2e);
   13.66  
   13.67 @@ -245,12 +246,12 @@ void vmx_do_launch(struct exec_domain *e
   13.68      error |= __vmwrite(GUEST_TR_BASE, 0);
   13.69      error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
   13.70  
   13.71 -    ed->mm.shadow_table = ed->mm.pagetable;
   13.72 -    __vmwrite(GUEST_CR3, pagetable_val(ed->mm.pagetable));
   13.73 -    __vmwrite(HOST_CR3, pagetable_val(ed->mm.monitor_table));
   13.74 +    ed->arch.shadow_table = ed->arch.pagetable;
   13.75 +    __vmwrite(GUEST_CR3, pagetable_val(ed->arch.pagetable));
   13.76 +    __vmwrite(HOST_CR3, pagetable_val(ed->arch.monitor_table));
   13.77      __vmwrite(HOST_ESP, (unsigned long) get_stack_top());
   13.78  
   13.79 -    ed->thread.schedule_tail = arch_vmx_do_resume;
   13.80 +    ed->arch.schedule_tail = arch_vmx_do_resume;
   13.81  }
   13.82  
   13.83  /*
    14.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Sat Feb 05 15:42:48 2005 +0000
    14.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Sat Feb 05 18:20:15 2005 +0000
    14.3 @@ -39,12 +39,12 @@ void __dummy__(void)
    14.4  
    14.5      OFFSET(EDOMAIN_processor, struct exec_domain, processor);
    14.6      OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
    14.7 -    OFFSET(EDOMAIN_event_sel, struct exec_domain, thread.event_selector);
    14.8 -    OFFSET(EDOMAIN_event_addr, struct exec_domain, thread.event_address);
    14.9 -    OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, thread.failsafe_selector);
   14.10 -    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, thread.failsafe_address);
   14.11 -    OFFSET(EDOMAIN_trap_bounce, struct exec_domain, thread.trap_bounce);
   14.12 -    OFFSET(EDOMAIN_thread_flags, struct exec_domain, thread.flags);
   14.13 +    OFFSET(EDOMAIN_event_sel, struct exec_domain, arch.event_selector);
   14.14 +    OFFSET(EDOMAIN_event_addr, struct exec_domain, arch.event_address);
   14.15 +    OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, arch.failsafe_selector);
   14.16 +    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address);
   14.17 +    OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
   14.18 +    OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
   14.19      BLANK();
   14.20  
   14.21      OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
    15.1 --- a/xen/arch/x86/x86_32/domain_build.c	Sat Feb 05 15:42:48 2005 +0000
    15.2 +++ b/xen/arch/x86/x86_32/domain_build.c	Sat Feb 05 18:20:15 2005 +0000
    15.3 @@ -1,3 +1,4 @@
    15.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    15.5  /******************************************************************************
    15.6   * domain_build.c
    15.7   * 
    15.8 @@ -216,11 +217,11 @@ int construct_dom0(struct domain *d,
    15.9       * We're basically forcing default RPLs to 1, so that our "what privilege
   15.10       * level are we returning to?" logic works.
   15.11       */
   15.12 -    ed->thread.failsafe_selector = FLAT_GUESTOS_CS;
   15.13 -    ed->thread.event_selector    = FLAT_GUESTOS_CS;
   15.14 -    ed->thread.guestos_ss = FLAT_GUESTOS_SS;
   15.15 +    ed->arch.failsafe_selector = FLAT_GUESTOS_CS;
   15.16 +    ed->arch.event_selector    = FLAT_GUESTOS_CS;
   15.17 +    ed->arch.guestos_ss = FLAT_GUESTOS_SS;
   15.18      for ( i = 0; i < 256; i++ ) 
   15.19 -        ed->thread.traps[i].cs = FLAT_GUESTOS_CS;
   15.20 +        ed->arch.traps[i].cs = FLAT_GUESTOS_CS;
   15.21  
   15.22      /* WARNING: The new domain must have its 'processor' field filled in! */
   15.23      l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
   15.24 @@ -228,8 +229,8 @@ int construct_dom0(struct domain *d,
   15.25      l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
   15.26          mk_l2_pgentry((unsigned long)l2start | __PAGE_HYPERVISOR);
   15.27      l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
   15.28 -        mk_l2_pgentry(__pa(d->mm_perdomain_pt) | __PAGE_HYPERVISOR);
   15.29 -    ed->mm.pagetable = mk_pagetable((unsigned long)l2start);
   15.30 +        mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
   15.31 +    ed->arch.pagetable = mk_pagetable((unsigned long)l2start);
   15.32  
   15.33      l2tab += l2_table_offset(dsi.v_start);
   15.34      mfn = alloc_start >> PAGE_SHIFT;
   15.35 @@ -307,7 +308,7 @@ int construct_dom0(struct domain *d,
   15.36  
   15.37      /* Install the new page tables. */
   15.38      __cli();
   15.39 -    write_ptbase(&ed->mm);
   15.40 +    write_ptbase(ed);
   15.41  
   15.42      /* Copy the OS image. */
   15.43      (void)loadelfimage(image_start);
   15.44 @@ -360,7 +361,7 @@ int construct_dom0(struct domain *d,
   15.45      *dst = '\0';
   15.46  
   15.47      /* Reinstate the caller's page tables. */
   15.48 -    write_ptbase(&current->mm);
   15.49 +    write_ptbase(current);
   15.50      __sti();
   15.51  
   15.52      /* Destroy low mappings - they were only for our convenience. */
    16.1 --- a/xen/arch/x86/x86_32/mm.c	Sat Feb 05 15:42:48 2005 +0000
    16.2 +++ b/xen/arch/x86/x86_32/mm.c	Sat Feb 05 18:20:15 2005 +0000
    16.3 @@ -1,3 +1,4 @@
    16.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    16.5  /******************************************************************************
    16.6   * arch/x86/x86_32/mm.c
    16.7   * 
    16.8 @@ -184,7 +185,7 @@ static void __synchronise_pagetables(voi
    16.9      struct exec_domain *ed = current;
   16.10      if ( ((unsigned long)mask & (1 << ed->processor)) &&
   16.11           is_idle_task(ed->domain) )
   16.12 -        write_ptbase(&ed->mm);
   16.13 +        write_ptbase(ed);
   16.14  }
   16.15  void synchronise_pagetables(unsigned long cpu_mask)
   16.16  {
   16.17 @@ -201,8 +202,8 @@ long do_stack_switch(unsigned long ss, u
   16.18      if ( (ss & 3) == 0 )
   16.19          return -EPERM;
   16.20  
   16.21 -    current->thread.guestos_ss = ss;
   16.22 -    current->thread.guestos_sp = esp;
   16.23 +    current->arch.guestos_ss = ss;
   16.24 +    current->arch.guestos_sp = esp;
   16.25      t->ss1  = ss;
   16.26      t->esp1 = esp;
   16.27  
   16.28 @@ -316,9 +317,9 @@ void destroy_gdt(struct exec_domain *ed)
   16.29  
   16.30      for ( i = 0; i < 16; i++ )
   16.31      {
   16.32 -        if ( (pfn = l1_pgentry_to_pagenr(ed->mm.perdomain_ptes[i])) != 0 )
   16.33 +        if ( (pfn = l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[i])) != 0 )
   16.34              put_page_and_type(&frame_table[pfn]);
   16.35 -        ed->mm.perdomain_ptes[i] = mk_l1_pgentry(0);
   16.36 +        ed->arch.perdomain_ptes[i] = mk_l1_pgentry(0);
   16.37      }
   16.38  }
   16.39  
   16.40 @@ -372,7 +373,7 @@ long set_gdt(struct exec_domain *ed,
   16.41  
   16.42      /* Install the new GDT. */
   16.43      for ( i = 0; i < nr_pages; i++ )
   16.44 -        ed->mm.perdomain_ptes[i] =
   16.45 +        ed->arch.perdomain_ptes[i] =
   16.46              mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   16.47  
   16.48      SET_GDT_ADDRESS(ed, GDT_VIRT_START(ed));
   16.49 @@ -404,7 +405,7 @@ long do_set_gdt(unsigned long *frame_lis
   16.50      if ( (ret = set_gdt(current, frames, entries)) == 0 )
   16.51      {
   16.52          local_flush_tlb();
   16.53 -        __asm__ __volatile__ ("lgdt %0" : "=m" (*current->mm.gdt));
   16.54 +        __asm__ __volatile__ ("lgdt %0" : "=m" (*current->arch.gdt));
   16.55      }
   16.56  
   16.57      UNLOCK_BIGLOCK(current->domain);
   16.58 @@ -443,7 +444,7 @@ long do_update_descriptor(
   16.59      case PGT_gdt_page:
   16.60          /* Disallow updates of Xen-reserved descriptors in the current GDT. */
   16.61          for_each_exec_domain(current->domain, ed) {
   16.62 -            if ( (l1_pgentry_to_pagenr(ed->mm.perdomain_ptes[0]) == pfn) &&
   16.63 +            if ( (l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[0]) == pfn) &&
   16.64                   (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) &&
   16.65                   (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) )
   16.66                  goto out;
    17.1 --- a/xen/arch/x86/x86_32/seg_fixup.c	Sat Feb 05 15:42:48 2005 +0000
    17.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c	Sat Feb 05 18:20:15 2005 +0000
    17.3 @@ -1,3 +1,4 @@
    17.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    17.5  /******************************************************************************
    17.6   * arch/x86/x86_32/seg_fixup.c
    17.7   * 
    17.8 @@ -114,7 +115,7 @@ int get_baselimit(u16 seg, unsigned long
    17.9      if ( ldt )
   17.10      {
   17.11          table = (unsigned long *)LDT_VIRT_START(d);
   17.12 -        if ( idx >= d->mm.ldt_ents )
   17.13 +        if ( idx >= d->arch.ldt_ents )
   17.14              goto fail;
   17.15      }
   17.16      else /* gdt */
   17.17 @@ -180,10 +181,10 @@ int fixup_seg(u16 seg, unsigned long off
   17.18      if ( ldt )
   17.19      {
   17.20          table = (unsigned long *)LDT_VIRT_START(d);
   17.21 -        if ( idx >= d->mm.ldt_ents )
   17.22 +        if ( idx >= d->arch.ldt_ents )
   17.23          {
   17.24              DPRINTK("Segment %04x out of LDT range (%ld)\n",
   17.25 -                    seg, d->mm.ldt_ents);
   17.26 +                    seg, d->arch.ldt_ents);
   17.27              goto fail;
   17.28          }
   17.29      }
   17.30 @@ -466,8 +467,8 @@ int gpf_emulate_4gb(struct xen_regs *reg
   17.31      /* If requested, give a callback on otherwise unused vector 15. */
   17.32      if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) )
   17.33      {
   17.34 -        ti  = &d->thread.traps[15];
   17.35 -        tb = &d->thread.trap_bounce;
   17.36 +        ti  = &d->arch.traps[15];
   17.37 +        tb  = &d->arch.trap_bounce;
   17.38          tb->flags      = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
   17.39          tb->error_code = pb - eip;
   17.40          tb->cs         = ti->cs;
    18.1 --- a/xen/arch/x86/x86_32/traps.c	Sat Feb 05 15:42:48 2005 +0000
    18.2 +++ b/xen/arch/x86/x86_32/traps.c	Sat Feb 05 18:20:15 2005 +0000
    18.3 @@ -1,3 +1,4 @@
    18.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    18.5  
    18.6  #include <xen/config.h>
    18.7  #include <xen/init.h>
    18.8 @@ -208,8 +209,8 @@ long set_fast_trap(struct exec_domain *p
    18.9      if ( idx == 0 )
   18.10      {
   18.11          if ( p == current )
   18.12 -            CLEAR_FAST_TRAP(&p->thread);
   18.13 -        SET_DEFAULT_FAST_TRAP(&p->thread);
   18.14 +            CLEAR_FAST_TRAP(&p->arch);
   18.15 +        SET_DEFAULT_FAST_TRAP(&p->arch);
   18.16          return 0;
   18.17      }
   18.18  
   18.19 @@ -221,7 +222,7 @@ long set_fast_trap(struct exec_domain *p
   18.20      if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) 
   18.21          return -1;
   18.22  
   18.23 -    ti = p->thread.traps + idx;
   18.24 +    ti = p->arch.traps + idx;
   18.25  
   18.26      /*
   18.27       * We can't virtualise interrupt gates, as there's no way to get
   18.28 @@ -231,15 +232,15 @@ long set_fast_trap(struct exec_domain *p
   18.29          return -1;
   18.30  
   18.31      if ( p == current )
   18.32 -        CLEAR_FAST_TRAP(&p->thread);
   18.33 +        CLEAR_FAST_TRAP(&p->arch);
   18.34  
   18.35 -    p->thread.fast_trap_idx    = idx;
   18.36 -    p->thread.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
   18.37 -    p->thread.fast_trap_desc.b = 
   18.38 +    p->arch.fast_trap_idx    = idx;
   18.39 +    p->arch.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
   18.40 +    p->arch.fast_trap_desc.b = 
   18.41          (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13;
   18.42  
   18.43      if ( p == current )
   18.44 -        SET_FAST_TRAP(&p->thread);
   18.45 +        SET_FAST_TRAP(&p->arch);
   18.46  
   18.47      return 0;
   18.48  }
    19.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Sat Feb 05 15:42:48 2005 +0000
    19.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Sat Feb 05 18:20:15 2005 +0000
    19.3 @@ -41,12 +41,12 @@ void __dummy__(void)
    19.4  
    19.5      OFFSET(EDOMAIN_processor, struct exec_domain, processor);
    19.6      OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
    19.7 -    OFFSET(EDOMAIN_event_sel, struct exec_domain, thread.event_selector);
    19.8 -    OFFSET(EDOMAIN_event_addr, struct exec_domain, thread.event_address);
    19.9 -    OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, thread.failsafe_selector);
   19.10 -    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, thread.failsafe_address);
   19.11 -    OFFSET(EDOMAIN_trap_bounce, struct exec_domain, thread.trap_bounce);
   19.12 -    OFFSET(EDOMAIN_thread_flags, struct exec_domain, thread.flags);
   19.13 +    OFFSET(EDOMAIN_event_sel, struct exec_domain, arch.event_selector);
   19.14 +    OFFSET(EDOMAIN_event_addr, struct exec_domain, arch.event_address);
   19.15 +    OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, arch.failsafe_selector);
   19.16 +    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address);
   19.17 +    OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
   19.18 +    OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
   19.19      BLANK();
   19.20  
   19.21      OFFSET(SHINFO_upcall_pending, shared_info_t, 
    20.1 --- a/xen/arch/x86/x86_64/domain_build.c	Sat Feb 05 15:42:48 2005 +0000
    20.2 +++ b/xen/arch/x86/x86_64/domain_build.c	Sat Feb 05 18:20:15 2005 +0000
    20.3 @@ -1,3 +1,4 @@
    20.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    20.5  /******************************************************************************
    20.6   * domain_build.c
    20.7   * 
    20.8 @@ -224,11 +225,11 @@ int construct_dom0(struct domain *d,
    20.9       * We're basically forcing default RPLs to 1, so that our "what privilege
   20.10       * level are we returning to?" logic works.
   20.11       */
   20.12 -    ed->thread.failsafe_selector = FLAT_GUESTOS_CS;
   20.13 -    ed->thread.event_selector    = FLAT_GUESTOS_CS;
   20.14 -    ed->thread.guestos_ss = FLAT_GUESTOS_SS;
   20.15 +    ed->arch.failsafe_selector = FLAT_GUESTOS_CS;
   20.16 +    ed->arch.event_selector    = FLAT_GUESTOS_CS;
   20.17 +    ed->arch.guestos_ss = FLAT_GUESTOS_SS;
   20.18      for ( i = 0; i < 256; i++ ) 
   20.19 -        ed->thread.traps[i].cs = FLAT_GUESTOS_CS;
   20.20 +        ed->arch.traps[i].cs = FLAT_GUESTOS_CS;
   20.21  
   20.22      /* WARNING: The new domain must have its 'processor' field filled in! */
   20.23      phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
   20.24 @@ -237,8 +238,8 @@ int construct_dom0(struct domain *d,
   20.25      l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
   20.26          mk_l4_pgentry(__pa(l4start) | __PAGE_HYPERVISOR);
   20.27      l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
   20.28 -        mk_l4_pgentry(__pa(d->mm_perdomain_pt) | __PAGE_HYPERVISOR);
   20.29 -    ed->mm.pagetable = mk_pagetable(__pa(l4start));
   20.30 +        mk_l4_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
   20.31 +    ed->arch.pagetable = mk_pagetable(__pa(l4start));
   20.32  
   20.33      l4tab += l4_table_offset(dsi.v_start);
   20.34      mfn = alloc_start >> PAGE_SHIFT;
   20.35 @@ -329,7 +330,7 @@ int construct_dom0(struct domain *d,
   20.36  
   20.37      /* Install the new page tables. */
   20.38      __cli();
   20.39 -    write_ptbase(&ed->mm);
   20.40 +    write_ptbase(ed);
   20.41  
   20.42      /* Copy the OS image. */
   20.43      (void)loadelfimage(image_start);
   20.44 @@ -382,7 +383,7 @@ int construct_dom0(struct domain *d,
   20.45      *dst = '\0';
   20.46  
   20.47      /* Reinstate the caller's page tables. */
   20.48 -    write_ptbase(&current->mm);
   20.49 +    write_ptbase(current);
   20.50      __sti();
   20.51  
   20.52      /* DOM0 gets access to everything. */
    21.1 --- a/xen/arch/x86/x86_64/mm.c	Sat Feb 05 15:42:48 2005 +0000
    21.2 +++ b/xen/arch/x86/x86_64/mm.c	Sat Feb 05 18:20:15 2005 +0000
    21.3 @@ -1,3 +1,4 @@
    21.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    21.5  /******************************************************************************
    21.6   * arch/x86/x86_64/mm.c
    21.7   * 
    21.8 @@ -220,7 +221,7 @@ static void __synchronise_pagetables(voi
    21.9      struct exec_domain *ed = current;
   21.10      if ( ((unsigned long)mask & (1 << ed->processor)) &&
   21.11           is_idle_task(ed->domain) )
   21.12 -        write_ptbase(&ed->mm);
   21.13 +        write_ptbase(ed);
   21.14  }
   21.15  void synchronise_pagetables(unsigned long cpu_mask)
   21.16  {
   21.17 @@ -232,8 +233,8 @@ long do_stack_switch(unsigned long ss, u
   21.18  {
   21.19      if ( (ss & 3) != 3 )
   21.20          return -EPERM;
   21.21 -    current->thread.guestos_ss = ss;
   21.22 -    current->thread.guestos_sp = esp;
   21.23 +    current->arch.guestos_ss = ss;
   21.24 +    current->arch.guestos_sp = esp;
   21.25      return 0;
   21.26  }
   21.27  
   21.28 @@ -346,9 +347,9 @@ void destroy_gdt(struct exec_domain *ed)
   21.29  
   21.30      for ( i = 0; i < 16; i++ )
   21.31      {
   21.32 -        if ( (pfn = l1_pgentry_to_pagenr(ed->mm.perdomain_ptes[i])) != 0 )
   21.33 +        if ( (pfn = l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[i])) != 0 )
   21.34              put_page_and_type(&frame_table[pfn]);
   21.35 -        ed->mm.perdomain_ptes[i] = mk_l1_pgentry(0);
   21.36 +        ed->arch.perdomain_ptes[i] = mk_l1_pgentry(0);
   21.37      }
   21.38  }
   21.39  
   21.40 @@ -402,7 +403,7 @@ long set_gdt(struct exec_domain *ed,
   21.41  
   21.42      /* Install the new GDT. */
   21.43      for ( i = 0; i < nr_pages; i++ )
   21.44 -        ed->mm.perdomain_ptes[i] =
   21.45 +        ed->arch.perdomain_ptes[i] =
   21.46              mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   21.47  
   21.48      SET_GDT_ADDRESS(ed, GDT_VIRT_START(ed));
   21.49 @@ -432,7 +433,7 @@ long do_set_gdt(unsigned long *frame_lis
   21.50      if ( (ret = set_gdt(current, frames, entries)) == 0 )
   21.51      {
   21.52          local_flush_tlb();
   21.53 -        __asm__ __volatile__ ("lgdt %0" : "=m" (*current->mm.gdt));
   21.54 +        __asm__ __volatile__ ("lgdt %0" : "=m" (*current->arch.gdt));
   21.55      }
   21.56  
   21.57      return ret;
   21.58 @@ -461,7 +462,7 @@ long do_update_descriptor(
   21.59      {
   21.60      case PGT_gdt_page:
   21.61          /* Disallow updates of Xen-reserved descriptors in the current GDT. */
   21.62 -        if ( (l1_pgentry_to_pagenr(current->mm.perdomain_ptes[0]) == pfn) &&
   21.63 +        if ( (l1_pgentry_to_pagenr(current->arch.perdomain_ptes[0]) == pfn) &&
   21.64               (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) &&
   21.65               (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) )
   21.66              goto out;
    22.1 --- a/xen/common/domain.c	Sat Feb 05 15:42:48 2005 +0000
    22.2 +++ b/xen/common/domain.c	Sat Feb 05 18:20:15 2005 +0000
    22.3 @@ -39,13 +39,13 @@ struct domain *do_createdomain(domid_t d
    22.4      atomic_set(&d->refcnt, 1);
    22.5      atomic_set(&ed->pausecnt, 0);
    22.6  
    22.7 -    shadow_lock_init(ed);
    22.8 +    shadow_lock_init(d);
    22.9  
   22.10      d->id          = dom_id;
   22.11 -    ed->processor   = cpu;
   22.12 +    ed->processor  = cpu;
   22.13      d->create_time = NOW();
   22.14   
   22.15 -    memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread));
   22.16 +    memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch));
   22.17  
   22.18      spin_lock_init(&d->time_lock);
   22.19  
   22.20 @@ -327,9 +327,9 @@ long do_boot_vcpu(unsigned long vcpu, fu
   22.21      ed = d->exec_domain[vcpu];
   22.22  
   22.23      atomic_set(&ed->pausecnt, 0);
   22.24 -    shadow_lock_init(ed);
   22.25 +    shadow_lock_init(d);
   22.26  
   22.27 -    memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread));
   22.28 +    memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch));
   22.29  
   22.30      arch_do_boot_vcpu(ed);
   22.31  
    23.1 --- a/xen/common/physdev.c	Sat Feb 05 15:42:48 2005 +0000
    23.2 +++ b/xen/common/physdev.c	Sat Feb 05 18:20:15 2005 +0000
    23.3 @@ -172,21 +172,21 @@ int physdev_pci_access_modify(
    23.4  
    23.5      /* Now, setup access to the IO ports and memory regions for the device. */
    23.6  
    23.7 -    if ( ed->thread.io_bitmap == NULL )
    23.8 +    if ( ed->arch.io_bitmap == NULL )
    23.9      {
   23.10 -        if ( (ed->thread.io_bitmap = xmalloc_array(u8, IOBMP_BYTES)) == NULL )
   23.11 +        if ( (ed->arch.io_bitmap = xmalloc_array(u8, IOBMP_BYTES)) == NULL )
   23.12          {
   23.13              rc = -ENOMEM;
   23.14              goto out;
   23.15          }
   23.16 -        memset(ed->thread.io_bitmap, 0xFF, IOBMP_BYTES);
   23.17 +        memset(ed->arch.io_bitmap, 0xFF, IOBMP_BYTES);
   23.18  
   23.19 -        ed->thread.io_bitmap_sel = ~0ULL;
   23.20 +        ed->arch.io_bitmap_sel = ~0ULL;
   23.21  
   23.22          for_each_exec_domain(p, edc) {
   23.23              if (edc == ed)
   23.24                  continue;
   23.25 -            edc->thread.io_bitmap = ed->thread.io_bitmap;
   23.26 +            edc->arch.io_bitmap = ed->arch.io_bitmap;
   23.27          }
   23.28      }
   23.29  
   23.30 @@ -204,8 +204,8 @@ int physdev_pci_access_modify(
   23.31                   "for device %s\n", dom, r->start, r->end, pdev->slot_name);
   23.32              for ( j = r->start; j < r->end + 1; j++ )
   23.33              {
   23.34 -                clear_bit(j, ed->thread.io_bitmap);
   23.35 -                clear_bit(j / IOBMP_BITS_PER_SELBIT, &ed->thread.io_bitmap_sel);
   23.36 +                clear_bit(j, ed->arch.io_bitmap);
   23.37 +                clear_bit(j / IOBMP_BITS_PER_SELBIT, &ed->arch.io_bitmap_sel);
   23.38              }
   23.39          }
   23.40  
   23.41 @@ -215,7 +215,7 @@ int physdev_pci_access_modify(
   23.42      for_each_exec_domain(p, edc) {
   23.43          if (edc == ed)
   23.44              continue;
   23.45 -        edc->thread.io_bitmap_sel = ed->thread.io_bitmap_sel;
   23.46 +        edc->arch.io_bitmap_sel = ed->arch.io_bitmap_sel;
   23.47      }
   23.48  
   23.49   out:
    24.1 --- a/xen/include/asm-x86/domain.h	Sat Feb 05 15:42:48 2005 +0000
    24.2 +++ b/xen/include/asm-x86/domain.h	Sat Feb 05 18:20:15 2005 +0000
    24.3 @@ -1,11 +1,119 @@
    24.4 -
    24.5 -#ifndef __X86_DOMAIN_H__
    24.6 -#define __X86_DOMAIN_H__
    24.7 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    24.8  
    24.9 -typedef struct {
   24.10 -} arch_domain_t;
   24.11 +#ifndef __ASM_DOMAIN_H__
   24.12 +#define __ASM_DOMAIN_H__
   24.13  
   24.14 -typedef struct {
   24.15 -} arch_exec_domain_t;
   24.16 +struct trap_bounce {
   24.17 +    unsigned long  error_code;
   24.18 +    unsigned long  cr2;
   24.19 +    unsigned short flags; /* TBF_ */
   24.20 +    unsigned short cs;
   24.21 +    unsigned long  eip;
   24.22 +};
   24.23  
   24.24 -#endif /* __X86_DOMAIN_H__ */
   24.25 +struct arch_domain
   24.26 +{
   24.27 +    l1_pgentry_t *mm_perdomain_pt;
   24.28 +
   24.29 +    /* shadow mode status and controls */
   24.30 +    unsigned int shadow_mode;  /* flags to control shadow table operation */
   24.31 +    spinlock_t   shadow_lock;
   24.32 +    unsigned long min_pfn;     /* min host physical */
   24.33 +    unsigned long max_pfn;     /* max host physical */
   24.34 +
   24.35 +    /* shadow hashtable */
   24.36 +    struct shadow_status *shadow_ht;
   24.37 +    struct shadow_status *shadow_ht_free;
   24.38 +    struct shadow_status *shadow_ht_extras; /* extra allocation units */
   24.39 +    unsigned int shadow_extras_count;
   24.40 +
   24.41 +    /* shadow dirty bitmap */
   24.42 +    unsigned long *shadow_dirty_bitmap;
   24.43 +    unsigned int shadow_dirty_bitmap_size;  /* in pages, bit per page */
   24.44 +
   24.45 +    /* shadow mode stats */
   24.46 +    unsigned int shadow_page_count;     
   24.47 +    unsigned int shadow_fault_count;     
   24.48 +    unsigned int shadow_dirty_count;     
   24.49 +    unsigned int shadow_dirty_net_count;     
   24.50 +    unsigned int shadow_dirty_block_count;     
   24.51 +} __cacheline_aligned;
   24.52 +
   24.53 +struct arch_exec_domain
   24.54 +{
   24.55 +    unsigned long      guestos_sp;
   24.56 +    unsigned long      guestos_ss;
   24.57 +
   24.58 +    unsigned long      flags; /* TF_ */
   24.59 +
   24.60 +    /* Hardware debugging registers */
   24.61 +    unsigned long      debugreg[8];  /* %%db0-7 debug registers */
   24.62 +
   24.63 +    /* floating point info */
   24.64 +    struct i387_state  i387;
   24.65 +
   24.66 +    /* general user-visible register state */
   24.67 +    execution_context_t user_ctxt;
   24.68 +
   24.69 +    void (*schedule_tail) (struct exec_domain *);
   24.70 +
   24.71 +    /*
   24.72 +     * Return vectors pushed to us by guest OS.
   24.73 +     * The stack frame for events is exactly that of an x86 hardware interrupt.
   24.74 +     * The stack frame for a failsafe callback is augmented with saved values
   24.75 +     * for segment registers %ds, %es, %fs and %gs:
   24.76 +     * 	%ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
   24.77 +     */
   24.78 +    unsigned long event_selector;    /* entry CS  */
   24.79 +    unsigned long event_address;     /* entry EIP */
   24.80 +
   24.81 +    unsigned long failsafe_selector; /* entry CS  */
   24.82 +    unsigned long failsafe_address;  /* entry EIP */
   24.83 +
   24.84 +    /* Bounce information for propagating an exception to guest OS. */
   24.85 +    struct trap_bounce trap_bounce;
   24.86 +
   24.87 +    /* I/O-port access bitmap. */
   24.88 +    u64 io_bitmap_sel; /* Selector to tell us which part of the IO bitmap are
   24.89 +                        * "interesting" (i.e. have clear bits) */
   24.90 +    u8 *io_bitmap; /* Pointer to task's IO bitmap or NULL */
   24.91 +
   24.92 +    /* Trap info. */
   24.93 +#ifdef ARCH_HAS_FAST_TRAP
   24.94 +    int                fast_trap_idx;
   24.95 +    struct desc_struct fast_trap_desc;
   24.96 +#endif
   24.97 +    trap_info_t        traps[256];
   24.98 +#ifdef CONFIG_VMX
   24.99 +    struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
  24.100 +#endif
  24.101 +
  24.102 +    /*
  24.103 +     * Every domain has a L1 pagetable of its own. Per-domain mappings
  24.104 +     * are put in this table (eg. the current GDT is mapped here).
  24.105 +     */
  24.106 +    l1_pgentry_t *perdomain_ptes;
  24.107 +    pagetable_t  pagetable;
  24.108 +
  24.109 +    pagetable_t  monitor_table;
  24.110 +    pagetable_t  shadow_table;
  24.111 +    l2_pgentry_t *vpagetable;	        /* virtual address of pagetable */
  24.112 +    l2_pgentry_t *shadow_vtable;	/* virtual address of shadow_table */
  24.113 +    l2_pgentry_t *guest_pl2e_cache;	/* guest page directory cache */
  24.114 +
  24.115 +    /* Virtual CR2 value. Can be read/written by guest. */
  24.116 +    unsigned long guest_cr2;
  24.117 +
  24.118 +    /* Current LDT details. */
  24.119 +    unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
  24.120 +    /* Next entry is passed to LGDT on domain switch. */
  24.121 +    char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */
  24.122 +} __cacheline_aligned;
  24.123 +
  24.124 +#define IDLE0_ARCH_EXEC_DOMAIN                                      \
  24.125 +{                                                                   \
  24.126 +    perdomain_ptes: 0,                                              \
  24.127 +    pagetable:      mk_pagetable(__pa(idle_pg_table))               \
  24.128 +}
  24.129 +
  24.130 +#endif /* __ASM_DOMAIN_H__ */
    25.1 --- a/xen/include/asm-x86/ldt.h	Sat Feb 05 15:42:48 2005 +0000
    25.2 +++ b/xen/include/asm-x86/ldt.h	Sat Feb 05 18:20:15 2005 +0000
    25.3 @@ -1,25 +1,27 @@
    25.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    25.5 +
    25.6  #ifndef __ARCH_LDT_H
    25.7  #define __ARCH_LDT_H
    25.8  
    25.9  #ifndef __ASSEMBLY__
   25.10  
   25.11 -static inline void load_LDT(struct exec_domain *p)
   25.12 +static inline void load_LDT(struct exec_domain *ed)
   25.13  {
   25.14      unsigned int cpu;
   25.15      struct desc_struct *desc;
   25.16      unsigned long ents;
   25.17 -                                                                                                
   25.18 -    if ( (ents = p->mm.ldt_ents) == 0 )
   25.19 +
   25.20 +    if ( (ents = ed->arch.ldt_ents) == 0 )
   25.21      {
   25.22          __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
   25.23      }
   25.24      else
   25.25      {
   25.26          cpu = smp_processor_id();
   25.27 -        desc = (struct desc_struct *)GET_GDT_ADDRESS(p) + __LDT(cpu);
   25.28 -        desc->a = ((LDT_VIRT_START(p)&0xffff)<<16) | (ents*8-1);
   25.29 -        desc->b = (LDT_VIRT_START(p)&(0xff<<24)) | 0x8200 |
   25.30 -            ((LDT_VIRT_START(p)&0xff0000)>>16);
   25.31 +        desc = (struct desc_struct *)GET_GDT_ADDRESS(ed) + __LDT(cpu);
   25.32 +        desc->a = ((LDT_VIRT_START(ed)&0xffff)<<16) | (ents*8-1);
   25.33 +        desc->b = (LDT_VIRT_START(ed)&(0xff<<24)) | 0x8200 |
   25.34 +            ((LDT_VIRT_START(ed)&0xff0000)>>16);
   25.35          __asm__ __volatile__ ( "lldt %%ax" : : "a" (__LDT(cpu)<<3) );
   25.36      }
   25.37  }
    26.1 --- a/xen/include/asm-x86/processor.h	Sat Feb 05 15:42:48 2005 +0000
    26.2 +++ b/xen/include/asm-x86/processor.h	Sat Feb 05 18:20:15 2005 +0000
    26.3 @@ -1,8 +1,6 @@
    26.4 -/*
    26.5 - * include/asm-x86/processor.h
    26.6 - *
    26.7 - * Copyright (C) 1994 Linus Torvalds
    26.8 - */
    26.9 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
   26.10 +
   26.11 +/* Portions are: Copyright (c) 1994 Linus Torvalds */
   26.12  
   26.13  #ifndef __ASM_X86_PROCESSOR_H
   26.14  #define __ASM_X86_PROCESSOR_H
   26.15 @@ -380,63 +378,6 @@ struct tss_struct {
   26.16      u8 __cacheline_filler[23];
   26.17  } __cacheline_aligned PACKED;
   26.18  
   26.19 -struct trap_bounce {
   26.20 -    unsigned long  error_code;
   26.21 -    unsigned long  cr2;
   26.22 -    unsigned short flags; /* TBF_ */
   26.23 -    unsigned short cs;
   26.24 -    unsigned long  eip;
   26.25 -};
   26.26 -
   26.27 -struct thread_struct {
   26.28 -    unsigned long      guestos_sp;
   26.29 -    unsigned long      guestos_ss;
   26.30 -
   26.31 -    unsigned long      flags; /* TF_ */
   26.32 -
   26.33 -    /* Hardware debugging registers */
   26.34 -    unsigned long      debugreg[8];  /* %%db0-7 debug registers */
   26.35 -
   26.36 -    /* floating point info */
   26.37 -    struct i387_state  i387;
   26.38 -
   26.39 -    /* general user-visible register state */
   26.40 -    execution_context_t user_ctxt;
   26.41 -
   26.42 -    void (*schedule_tail) (struct exec_domain *);
   26.43 -
   26.44 -    /*
   26.45 -     * Return vectors pushed to us by guest OS.
   26.46 -     * The stack frame for events is exactly that of an x86 hardware interrupt.
   26.47 -     * The stack frame for a failsafe callback is augmented with saved values
   26.48 -     * for segment registers %ds, %es, %fs and %gs:
   26.49 -     * 	%ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
   26.50 -     */
   26.51 -    unsigned long event_selector;    /* entry CS  */
   26.52 -    unsigned long event_address;     /* entry EIP */
   26.53 -
   26.54 -    unsigned long failsafe_selector; /* entry CS  */
   26.55 -    unsigned long failsafe_address;  /* entry EIP */
   26.56 -
   26.57 -    /* Bounce information for propagating an exception to guest OS. */
   26.58 -    struct trap_bounce trap_bounce;
   26.59 -
   26.60 -    /* I/O-port access bitmap. */
   26.61 -    u64 io_bitmap_sel; /* Selector to tell us which part of the IO bitmap are
   26.62 -                        * "interesting" (i.e. have clear bits) */
   26.63 -    u8 *io_bitmap; /* Pointer to task's IO bitmap or NULL */
   26.64 -
   26.65 -    /* Trap info. */
   26.66 -#ifdef ARCH_HAS_FAST_TRAP
   26.67 -    int                fast_trap_idx;
   26.68 -    struct desc_struct fast_trap_desc;
   26.69 -#endif
   26.70 -    trap_info_t        traps[256];
   26.71 -#ifdef CONFIG_VMX
   26.72 -    struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
   26.73 -#endif
   26.74 -} __cacheline_aligned;
   26.75 -
   26.76  #define IDT_ENTRIES 256
   26.77  extern idt_entry_t idt_table[];
   26.78  extern idt_entry_t *idt_tables[];
   26.79 @@ -467,91 +408,18 @@ long set_fast_trap(struct exec_domain *p
   26.80  
   26.81  #endif
   26.82  
   26.83 -#define INIT_THREAD { 0 }
   26.84 -
   26.85  extern int gpf_emulate_4gb(struct xen_regs *regs);
   26.86  
   26.87 -struct mm_struct {
   26.88 -    /*
   26.89 -     * Every domain has a L1 pagetable of its own. Per-domain mappings
   26.90 -     * are put in this table (eg. the current GDT is mapped here).
   26.91 -     */
   26.92 -    l1_pgentry_t *perdomain_ptes;
   26.93 -    pagetable_t  pagetable;
   26.94 -
   26.95 -    pagetable_t  monitor_table;
   26.96 -    l2_pgentry_t *vpagetable;	/* virtual address of pagetable */
   26.97 -    l2_pgentry_t *shadow_vtable;	/* virtual address of shadow_table */
   26.98 -    l2_pgentry_t *guest_pl2e_cache;	/* guest page directory cache */
   26.99 -    unsigned long min_pfn;		/* min host physical */
  26.100 -    unsigned long max_pfn;		/* max host physical */
  26.101 -
  26.102 -    /* Virtual CR2 value. Can be read/written by guest. */
  26.103 -    unsigned long guest_cr2;
  26.104 -
  26.105 -    /* shadow mode status and controls */
  26.106 -    unsigned int shadow_mode;  /* flags to control shadow table operation */
  26.107 -    pagetable_t  shadow_table;
  26.108 -    spinlock_t   shadow_lock;
  26.109 -    unsigned int shadow_max_page_count; // currently unused
  26.110 -
  26.111 -    /* shadow hashtable */
  26.112 -    struct shadow_status *shadow_ht;
  26.113 -    struct shadow_status *shadow_ht_free;
  26.114 -    struct shadow_status *shadow_ht_extras; /* extra allocation units */
  26.115 -    unsigned int shadow_extras_count;
  26.116 -
  26.117 -    /* shadow dirty bitmap */
  26.118 -    unsigned long *shadow_dirty_bitmap;
  26.119 -    unsigned int shadow_dirty_bitmap_size;  /* in pages, bit per page */
  26.120 +extern void write_ptbase(struct exec_domain *ed);
  26.121  
  26.122 -    /* shadow mode stats */
  26.123 -    unsigned int shadow_page_count;     
  26.124 -    unsigned int shadow_fault_count;     
  26.125 -    unsigned int shadow_dirty_count;     
  26.126 -    unsigned int shadow_dirty_net_count;     
  26.127 -    unsigned int shadow_dirty_block_count;     
  26.128 -
  26.129 -    /* Current LDT details. */
  26.130 -    unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
  26.131 -    /* Next entry is passed to LGDT on domain switch. */
  26.132 -    char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */
  26.133 -};
  26.134 -
  26.135 -#define SHM_full_32     (8) /* full virtualization for 32-bit */
  26.136 -
  26.137 -static inline void write_ptbase(struct mm_struct *mm)
  26.138 -{
  26.139 -    unsigned long pa;
  26.140 -
  26.141 -#ifdef CONFIG_VMX
  26.142 -    if ( unlikely(mm->shadow_mode) ) {
  26.143 -            if (mm->shadow_mode == SHM_full_32)
  26.144 -                    pa = pagetable_val(mm->monitor_table);
  26.145 -            else
  26.146 -                    pa = pagetable_val(mm->shadow_table);   
  26.147 -    }
  26.148 -#else
  26.149 -    if ( unlikely(mm->shadow_mode) )
  26.150 -            pa = pagetable_val(mm->shadow_table);    
  26.151 -#endif
  26.152 -    else
  26.153 -            pa = pagetable_val(mm->pagetable);
  26.154 -
  26.155 -    write_cr3(pa);
  26.156 -}
  26.157 -
  26.158 -#define IDLE0_MM                                                    \
  26.159 -{                                                                   \
  26.160 -    perdomain_ptes: 0,                                              \
  26.161 -    pagetable:      mk_pagetable(__pa(idle_pg_table))               \
  26.162 -}
  26.163 -
  26.164 -/* Convenient accessor for mm.gdt. */
  26.165 -#define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (((_e)<<3)-1))
  26.166 -#define SET_GDT_ADDRESS(_p, _a) ((*(unsigned long *)((_p)->mm.gdt + 2)) = (_a))
  26.167 -#define GET_GDT_ENTRIES(_p)     (((*(u16 *)((_p)->mm.gdt + 0))+1)>>3)
  26.168 -#define GET_GDT_ADDRESS(_p)     (*(unsigned long *)((_p)->mm.gdt + 2))
  26.169 +#define SET_GDT_ENTRIES(_p, _e) \
  26.170 +    ((*(u16 *)((_p)->arch.gdt + 0)) = (((_e)<<3)-1))
  26.171 +#define SET_GDT_ADDRESS(_p, _a) \
  26.172 +    ((*(unsigned long *)((_p)->arch.gdt + 2)) = (_a))
  26.173 +#define GET_GDT_ENTRIES(_p)     \
  26.174 +    (((*(u16 *)((_p)->arch.gdt + 0))+1)>>3)
  26.175 +#define GET_GDT_ADDRESS(_p)     \
  26.176 +    (*(unsigned long *)((_p)->arch.gdt + 2))
  26.177  
  26.178  void destroy_gdt(struct exec_domain *d);
  26.179  long set_gdt(struct exec_domain *d, 
    27.1 --- a/xen/include/asm-x86/shadow.h	Sat Feb 05 15:42:48 2005 +0000
    27.2 +++ b/xen/include/asm-x86/shadow.h	Sat Feb 05 18:20:15 2005 +0000
    27.3 @@ -1,4 +1,4 @@
    27.4 -/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*- */
    27.5 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    27.6  
    27.7  #ifndef _XEN_SHADOW_H
    27.8  #define _XEN_SHADOW_H
    27.9 @@ -12,7 +12,7 @@
   27.10  #define PSH_shadowed    (1<<31) /* page has a shadow. PFN points to shadow */
   27.11  #define PSH_pfn_mask    ((1<<21)-1)
   27.12  
   27.13 -/* Shadow PT operation mode : shadowmode variable in mm_struct */
   27.14 +/* Shadow PT operation mode : shadow-mode variable in arch_domain. */
   27.15  #define SHM_test        (1) /* just run domain on shadow PTs */
   27.16  #define SHM_logdirty    (2) /* log pages that are dirtied */
   27.17  #define SHM_translate   (3) /* lookup machine pages in translation table */
   27.18 @@ -23,10 +23,10 @@
   27.19  #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
   27.20       (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
   27.21  
   27.22 -#define shadow_mode(_d)      ((_d)->mm.shadow_mode)
   27.23 -#define shadow_lock_init(_d) spin_lock_init(&(_d)->mm.shadow_lock)
   27.24 -#define shadow_lock(_m)      spin_lock(&(_m)->shadow_lock)
   27.25 -#define shadow_unlock(_m)    spin_unlock(&(_m)->shadow_lock)
   27.26 +#define shadow_mode(_d)      ((_d)->arch.shadow_mode)
   27.27 +#define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock)
   27.28 +#define shadow_lock(_d)      spin_lock(&(_d)->arch.shadow_lock)
   27.29 +#define shadow_unlock(_d)    spin_unlock(&(_d)->arch.shadow_lock)
   27.30  
   27.31  extern void shadow_mode_init(void);
   27.32  extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
   27.33 @@ -39,18 +39,18 @@ extern void unshadow_table(unsigned long
   27.34  extern int shadow_mode_enable(struct domain *p, unsigned int mode);
   27.35  
   27.36  #ifdef CONFIG_VMX
   27.37 -extern void vmx_shadow_clear_state(struct mm_struct *);
   27.38 -extern void vmx_shadow_invlpg(struct mm_struct *, unsigned long);
   27.39 +extern void vmx_shadow_clear_state(struct domain *);
   27.40 +extern void vmx_shadow_invlpg(struct domain *, unsigned long);
   27.41  #endif
   27.42  
   27.43 -#define  __get_machine_to_phys(m, guest_gpfn, gpfn)     \
   27.44 -    if ((m)->shadow_mode == SHM_full_32)                \
   27.45 +#define  __get_machine_to_phys(_d, guest_gpfn, gpfn)    \
   27.46 +    if ((_d)->arch.shadow_mode == SHM_full_32)          \
   27.47          (guest_gpfn) = machine_to_phys_mapping[(gpfn)]; \
   27.48      else                                                \
   27.49          (guest_gpfn) = (gpfn);
   27.50  
   27.51 -#define  __get_phys_to_machine(m, host_gpfn, gpfn)     \
   27.52 -    if ((m)->shadow_mode == SHM_full_32)               \
   27.53 +#define  __get_phys_to_machine(_d, host_gpfn, gpfn)    \
   27.54 +    if ((_d)->arch.shadow_mode == SHM_full_32)         \
   27.55          (host_gpfn) = phys_to_machine_mapping[(gpfn)]; \
   27.56      else                                               \
   27.57          (host_gpfn) = (gpfn);
   27.58 @@ -58,21 +58,21 @@ extern void vmx_shadow_invlpg(struct mm_
   27.59  extern void __shadow_mode_disable(struct domain *d);
   27.60  static inline void shadow_mode_disable(struct domain *d)
   27.61  {
   27.62 -    if ( shadow_mode(d->exec_domain[0]) )
   27.63 +    if ( shadow_mode(d) )
   27.64          __shadow_mode_disable(d);
   27.65  }
   27.66  
   27.67  extern unsigned long shadow_l2_table( 
   27.68 -    struct mm_struct *m, unsigned long gpfn);
   27.69 +    struct domain *d, unsigned long gpfn);
   27.70    
   27.71 -static inline void shadow_invalidate(struct mm_struct *m) {
   27.72 -    if (m->shadow_mode != SHM_full_32)
   27.73 +static inline void shadow_invalidate(struct exec_domain *ed) {
   27.74 +    if ( ed->domain->arch.shadow_mode != SHM_full_32 )
   27.75          BUG();
   27.76 -    memset(m->shadow_vtable, 0, PAGE_SIZE);
   27.77 +    memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
   27.78  }
   27.79  
   27.80 -#define SHADOW_DEBUG 0
   27.81 -#define SHADOW_HASH_DEBUG 0
   27.82 +#define SHADOW_DEBUG 1
   27.83 +#define SHADOW_HASH_DEBUG 1
   27.84  
   27.85  struct shadow_status {
   27.86      unsigned long pfn;            /* Guest pfn.             */
   27.87 @@ -94,7 +94,7 @@ printk("DOM%u: (file=shadow.c, line=%d) 
   27.88  #if SHADOW_DEBUG
   27.89  #define SH_VLOG(_f, _a...)                             \
   27.90      printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
   27.91 -           current->id , __LINE__ , ## _a )
   27.92 +           current->domain->id , __LINE__ , ## _a )
   27.93  #else
   27.94  #define SH_VLOG(_f, _a...) 
   27.95  #endif
   27.96 @@ -102,67 +102,64 @@ printk("DOM%u: (file=shadow.c, line=%d) 
   27.97  #if 0
   27.98  #define SH_VVLOG(_f, _a...)                             \
   27.99      printk("DOM%u: (file=shadow.c, line=%d) " _f "\n",  \
  27.100 -           current->id , __LINE__ , ## _a )
  27.101 +           current->domain->id , __LINE__ , ## _a )
  27.102  #else
  27.103  #define SH_VVLOG(_f, _a...)
  27.104  #endif
  27.105  
  27.106 -static inline void __shadow_get_pl2e(struct mm_struct *m, 
  27.107 -                                unsigned long va, unsigned long *sl2e)
  27.108 +static inline void __shadow_get_pl2e(
  27.109 +    struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
  27.110  {
  27.111 -    if (m->shadow_mode == SHM_full_32) {
  27.112 -        *sl2e = l2_pgentry_val(m->shadow_vtable[va >> L2_PAGETABLE_SHIFT]);
  27.113 -    }
  27.114 -    else
  27.115 -        *sl2e = l2_pgentry_val(linear_l2_table[va >> L2_PAGETABLE_SHIFT]);
  27.116 +    *sl2e = (ed->domain->arch.shadow_mode == SHM_full_32) ?
  27.117 +        l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]) :
  27.118 +        l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
  27.119  }
  27.120  
  27.121 -static inline void __shadow_set_pl2e(struct mm_struct *m, 
  27.122 -                                unsigned long va, unsigned long value)
  27.123 +static inline void __shadow_set_pl2e(
  27.124 +    struct exec_domain *ed, unsigned long va, unsigned long value)
  27.125  {
  27.126 -    if (m->shadow_mode == SHM_full_32) {
  27.127 -        m->shadow_vtable[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
  27.128 -    }
  27.129 +    if ( ed->domain->arch.shadow_mode == SHM_full_32 )
  27.130 +        ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
  27.131      else
  27.132 -        linear_l2_table[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
  27.133 +        linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
  27.134  }
  27.135  
  27.136 -static inline void __guest_get_pl2e(struct mm_struct *m, 
  27.137 -                                unsigned long va, unsigned long *l2e)
  27.138 +static inline void __guest_get_pl2e(
  27.139 +    struct exec_domain *ed, unsigned long va, unsigned long *l2e)
  27.140  {
  27.141 -    if (m->shadow_mode == SHM_full_32) {
  27.142 -        *l2e = l2_pgentry_val(m->vpagetable[va >> L2_PAGETABLE_SHIFT]);
  27.143 -    }
  27.144 -    else
  27.145 -        *l2e = l2_pgentry_val(linear_l2_table[va >> L2_PAGETABLE_SHIFT]);
  27.146 +    *l2e = (ed->domain->arch.shadow_mode == SHM_full_32) ?
  27.147 +        l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) :
  27.148 +        l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
  27.149  }
  27.150  
  27.151 -static inline void __guest_set_pl2e(struct mm_struct *m, 
  27.152 -                                unsigned long va, unsigned long value)
  27.153 +static inline void __guest_set_pl2e(
  27.154 +    struct exec_domain *ed, unsigned long va, unsigned long value)
  27.155  {
  27.156 -    if (m->shadow_mode == SHM_full_32) {
  27.157 +    if ( ed->domain->arch.shadow_mode == SHM_full_32 )
  27.158 +    {
  27.159          unsigned long pfn;
  27.160  
  27.161          pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
  27.162 -                m->guest_pl2e_cache[va >> L2_PAGETABLE_SHIFT] =
  27.163 -                        mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
  27.164 +        ed->arch.guest_pl2e_cache[l2_table_offset(va)] =
  27.165 +            mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
  27.166  
  27.167 -        m->vpagetable[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
  27.168 +        ed->arch.vpagetable[l2_table_offset(va)] = mk_l2_pgentry(value);
  27.169      }
  27.170      else
  27.171 -        linear_l2_table[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
  27.172 -
  27.173 +    {
  27.174 +        linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
  27.175 +    }
  27.176  }
  27.177  
  27.178  /************************************************************************/
  27.179  
  27.180 -static inline int __mark_dirty( struct mm_struct *m, unsigned int mfn)
  27.181 +static inline int __mark_dirty(struct domain *d, unsigned int mfn)
  27.182  {
  27.183      unsigned long pfn;
  27.184      int           rc = 0;
  27.185  
  27.186 -    ASSERT(spin_is_locked(&m->shadow_lock));
  27.187 -    ASSERT(m->shadow_dirty_bitmap != NULL);
  27.188 +    ASSERT(spin_is_locked(&d->arch.shadow_lock));
  27.189 +    ASSERT(d->arch.shadow_dirty_bitmap != NULL);
  27.190  
  27.191      pfn = machine_to_phys_mapping[mfn];
  27.192  
  27.193 @@ -174,20 +171,20 @@ static inline int __mark_dirty( struct m
  27.194      if ( unlikely(pfn & 0x80000000UL) )
  27.195          return rc;
  27.196  
  27.197 -    if ( likely(pfn < m->shadow_dirty_bitmap_size) )
  27.198 +    if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) )
  27.199      {
  27.200          /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
  27.201 -        if ( !__test_and_set_bit(pfn, m->shadow_dirty_bitmap) )
  27.202 +        if ( !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) )
  27.203          {
  27.204 -            m->shadow_dirty_count++;
  27.205 +            d->arch.shadow_dirty_count++;
  27.206              rc = 1;
  27.207          }
  27.208      }
  27.209  #ifndef NDEBUG
  27.210      else if ( mfn < max_page )
  27.211      {
  27.212 -        SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (mm %p)",
  27.213 -               mfn, pfn, m->shadow_dirty_bitmap_size, m );
  27.214 +        SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (dom %p)",
  27.215 +               mfn, pfn, d->arch.shadow_dirty_bitmap_size, d);
  27.216          SH_LOG("dom=%p caf=%08x taf=%08x\n", 
  27.217                 page_get_owner(&frame_table[mfn]),
  27.218                 frame_table[mfn].count_info, 
  27.219 @@ -199,12 +196,12 @@ static inline int __mark_dirty( struct m
  27.220  }
  27.221  
  27.222  
  27.223 -static inline int mark_dirty(struct mm_struct *m, unsigned int mfn)
  27.224 +static inline int mark_dirty(struct domain *d, unsigned int mfn)
  27.225  {
  27.226      int rc;
  27.227 -    shadow_lock(m);
  27.228 -    rc = __mark_dirty(m, mfn);
  27.229 -    shadow_unlock(m);
  27.230 +    shadow_lock(d);
  27.231 +    rc = __mark_dirty(d, mfn);
  27.232 +    shadow_unlock(d);
  27.233      return rc;
  27.234  }
  27.235  
  27.236 @@ -212,7 +209,7 @@ static inline int mark_dirty(struct mm_s
  27.237  /************************************************************************/
  27.238  
  27.239  static inline void l1pte_write_fault(
  27.240 -    struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
  27.241 +    struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
  27.242  { 
  27.243      unsigned long gpte = *gpte_p;
  27.244      unsigned long spte = *spte_p;
  27.245 @@ -220,7 +217,7 @@ static inline void l1pte_write_fault(
  27.246      ASSERT(gpte & _PAGE_RW);
  27.247      gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
  27.248  
  27.249 -    switch ( m->shadow_mode )
  27.250 +    switch ( d->arch.shadow_mode )
  27.251      {
  27.252      case SHM_test:
  27.253          spte = gpte | _PAGE_RW;
  27.254 @@ -228,7 +225,7 @@ static inline void l1pte_write_fault(
  27.255  
  27.256      case SHM_logdirty:
  27.257          spte = gpte | _PAGE_RW;
  27.258 -        __mark_dirty(m, gpte >> PAGE_SHIFT);
  27.259 +        __mark_dirty(d, gpte >> PAGE_SHIFT);
  27.260  
  27.261      case SHM_full_32:
  27.262      {
  27.263 @@ -247,14 +244,14 @@ static inline void l1pte_write_fault(
  27.264  }
  27.265  
  27.266  static inline void l1pte_read_fault(
  27.267 -    struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
  27.268 +    struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
  27.269  { 
  27.270      unsigned long gpte = *gpte_p;
  27.271      unsigned long spte = *spte_p;
  27.272  
  27.273      gpte |= _PAGE_ACCESSED;
  27.274  
  27.275 -    switch ( m->shadow_mode )
  27.276 +    switch ( d->arch.shadow_mode )
  27.277      {
  27.278      case SHM_test:
  27.279          spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
  27.280 @@ -281,12 +278,13 @@ static inline void l1pte_read_fault(
  27.281  }
  27.282  
  27.283  static inline void l1pte_propagate_from_guest(
  27.284 -    struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
  27.285 +    struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
  27.286  { 
  27.287      unsigned long gpte = *gpte_p;
  27.288      unsigned long spte = *spte_p;
  27.289 +    unsigned long host_pfn, host_gpte;
  27.290  
  27.291 -    switch ( m->shadow_mode )
  27.292 +    switch ( d->arch.shadow_mode )
  27.293      {
  27.294      case SHM_test:
  27.295          spte = 0;
  27.296 @@ -303,11 +301,10 @@ static inline void l1pte_propagate_from_
  27.297          break;
  27.298  
  27.299      case SHM_full_32:
  27.300 -    {
  27.301 -        unsigned long host_pfn, host_gpte;
  27.302          spte = 0;
  27.303  
  27.304 -        if (mmio_space(gpte & 0xFFFFF000)) {
  27.305 +        if ( mmio_space(gpte & 0xFFFFF000) )
  27.306 +        {
  27.307              *spte_p = spte;
  27.308              return;
  27.309          }
  27.310 @@ -317,8 +314,9 @@ static inline void l1pte_propagate_from_
  27.311  
  27.312          if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 
  27.313               (_PAGE_PRESENT|_PAGE_ACCESSED) )
  27.314 -            spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW);
  27.315 -    }
  27.316 +            spte = (host_gpte & _PAGE_DIRTY) ? 
  27.317 +                host_gpte : (host_gpte & ~_PAGE_RW);
  27.318 +
  27.319          break;
  27.320      }
  27.321  
  27.322 @@ -327,7 +325,7 @@ static inline void l1pte_propagate_from_
  27.323  }
  27.324  
  27.325  static inline void l2pde_general(
  27.326 -    struct mm_struct *m,
  27.327 +    struct domain *d,
  27.328      unsigned long *gpde_p,
  27.329      unsigned long *spde_p,
  27.330      unsigned long sl1pfn)
  27.331 @@ -347,7 +345,7 @@ static inline void l2pde_general(
  27.332          if ( (frame_table[sl1pfn].u.inuse.type_info & PGT_type_mask) ==
  27.333               PGT_l2_page_table ) 
  27.334          {
  27.335 -            if (m->shadow_mode != SHM_full_32)
  27.336 +            if ( d->arch.shadow_mode != SHM_full_32 )
  27.337                  spde = gpde & ~_PAGE_RW;
  27.338  
  27.339          }
  27.340 @@ -360,14 +358,14 @@ static inline void l2pde_general(
  27.341  /*********************************************************************/
  27.342  
  27.343  #if SHADOW_HASH_DEBUG
  27.344 -static void shadow_audit(struct mm_struct *m, int print)
  27.345 +static void shadow_audit(struct domain *d, int print)
  27.346  {
  27.347      int live = 0, free = 0, j = 0, abs;
  27.348      struct shadow_status *a;
  27.349  
  27.350      for ( j = 0; j < shadow_ht_buckets; j++ )
  27.351      {
  27.352 -        a = &m->shadow_ht[j];        
  27.353 +        a = &d->arch.shadow_ht[j];        
  27.354          if ( a->pfn ) { live++; ASSERT(a->spfn_and_flags & PSH_pfn_mask); }
  27.355          ASSERT(a->pfn < 0x00100000UL);
  27.356          a = a->next;
  27.357 @@ -387,7 +385,7 @@ static void shadow_audit(struct mm_struc
  27.358          ASSERT(live < 9999);
  27.359      }
  27.360  
  27.361 -    for ( a = m->shadow_ht_free; a != NULL; a = a->next )
  27.362 +    for ( a = d->arch.shadow_ht_free; a != NULL; a = a->next )
  27.363          free++; 
  27.364  
  27.365      if ( print)
  27.366 @@ -406,24 +404,23 @@ static void shadow_audit(struct mm_struc
  27.367  #endif
  27.368  
  27.369  
  27.370 -
  27.371  static inline struct shadow_status *hash_bucket(
  27.372 -    struct mm_struct *m, unsigned int gpfn)
  27.373 +    struct domain *d, unsigned int gpfn)
  27.374  {
  27.375 -    return &m->shadow_ht[gpfn % shadow_ht_buckets];
  27.376 +    return &d->arch.shadow_ht[gpfn % shadow_ht_buckets];
  27.377  }
  27.378  
  27.379  
  27.380  static inline unsigned long __shadow_status(
  27.381 -    struct mm_struct *m, unsigned int gpfn)
  27.382 +    struct domain *d, unsigned int gpfn)
  27.383  {
  27.384      struct shadow_status *p, *x, *head;
  27.385  
  27.386 -    x = head = hash_bucket(m, gpfn);
  27.387 +    x = head = hash_bucket(d, gpfn);
  27.388      p = NULL;
  27.389  
  27.390      SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x);
  27.391 -    shadow_audit(m, 0);
  27.392 +    shadow_audit(d, 0);
  27.393  
  27.394      do
  27.395      {
  27.396 @@ -461,11 +458,11 @@ static inline unsigned long __shadow_sta
  27.397   * anyway it's probably not worth being too clever.
  27.398   */
  27.399  static inline unsigned long get_shadow_status(
  27.400 -    struct mm_struct *m, unsigned int gpfn )
  27.401 +    struct domain *d, unsigned int gpfn )
  27.402  {
  27.403      unsigned long res;
  27.404  
  27.405 -    ASSERT(m->shadow_mode);
  27.406 +    ASSERT(d->arch.shadow_mode);
  27.407  
  27.408      /*
  27.409       * If we get here we know that some sort of update has happened to the
  27.410 @@ -475,37 +472,37 @@ static inline unsigned long get_shadow_s
  27.411       * N.B. The VA update path doesn't use this and is handled independently. 
  27.412       */
  27.413  
  27.414 -    shadow_lock(m);
  27.415 +    shadow_lock(d);
  27.416  
  27.417 -    if ( m->shadow_mode == SHM_logdirty )
  27.418 -        __mark_dirty( m, gpfn );
  27.419 +    if ( d->arch.shadow_mode == SHM_logdirty )
  27.420 +        __mark_dirty(d, gpfn);
  27.421  
  27.422 -    if ( !(res = __shadow_status(m, gpfn)) )
  27.423 -        shadow_unlock(m);
  27.424 +    if ( !(res = __shadow_status(d, gpfn)) )
  27.425 +        shadow_unlock(d);
  27.426  
  27.427      return res;
  27.428  }
  27.429  
  27.430  
  27.431  static inline void put_shadow_status(
  27.432 -    struct mm_struct *m)
  27.433 +    struct domain *d)
  27.434  {
  27.435 -    shadow_unlock(m);
  27.436 +    shadow_unlock(d);
  27.437  }
  27.438  
  27.439  
  27.440  static inline void delete_shadow_status( 
  27.441 -    struct mm_struct *m, unsigned int gpfn)
  27.442 +    struct domain *d, unsigned int gpfn)
  27.443  {
  27.444      struct shadow_status *p, *x, *n, *head;
  27.445  
  27.446 -    ASSERT(spin_is_locked(&m->shadow_lock));
  27.447 +    ASSERT(spin_is_locked(&d->arch.shadow_lock));
  27.448      ASSERT(gpfn != 0);
  27.449  
  27.450 -    head = hash_bucket(m, gpfn);
  27.451 +    head = hash_bucket(d, gpfn);
  27.452  
  27.453      SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, head);
  27.454 -    shadow_audit(m, 0);
  27.455 +    shadow_audit(d, 0);
  27.456  
  27.457      /* Match on head item? */
  27.458      if ( head->pfn == gpfn )
  27.459 @@ -522,8 +519,8 @@ static inline void delete_shadow_status(
  27.460              /* Add deleted node to the free list. */
  27.461              n->pfn            = 0;
  27.462              n->spfn_and_flags = 0;
  27.463 -            n->next           = m->shadow_ht_free;
  27.464 -            m->shadow_ht_free = n;
  27.465 +            n->next           = d->arch.shadow_ht_free;
  27.466 +            d->arch.shadow_ht_free = n;
  27.467          }
  27.468          else
  27.469          {
  27.470 @@ -548,8 +545,8 @@ static inline void delete_shadow_status(
  27.471              /* Add deleted node to the free list. */
  27.472              x->pfn            = 0;
  27.473              x->spfn_and_flags = 0;
  27.474 -            x->next           = m->shadow_ht_free;
  27.475 -            m->shadow_ht_free = x;
  27.476 +            x->next           = d->arch.shadow_ht_free;
  27.477 +            d->arch.shadow_ht_free = x;
  27.478  
  27.479              goto found;
  27.480          }
  27.481 @@ -563,24 +560,24 @@ static inline void delete_shadow_status(
  27.482      BUG();
  27.483  
  27.484   found:
  27.485 -    shadow_audit(m, 0);
  27.486 +    shadow_audit(d, 0);
  27.487  }
  27.488  
  27.489  
  27.490  static inline void set_shadow_status(
  27.491 -    struct mm_struct *m, unsigned int gpfn, unsigned long s)
  27.492 +    struct domain *d, unsigned int gpfn, unsigned long s)
  27.493  {
  27.494      struct shadow_status *x, *head, *extra;
  27.495      int i;
  27.496  
  27.497 -    ASSERT(spin_is_locked(&m->shadow_lock));
  27.498 +    ASSERT(spin_is_locked(&d->arch.shadow_lock));
  27.499      ASSERT(gpfn != 0);
  27.500      ASSERT(s & PSH_shadowed);
  27.501  
  27.502 -    x = head = hash_bucket(m, gpfn);
  27.503 +    x = head = hash_bucket(d, gpfn);
  27.504     
  27.505      SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, x, x->next);
  27.506 -    shadow_audit(m, 0);
  27.507 +    shadow_audit(d, 0);
  27.508  
  27.509      /*
  27.510       * STEP 1. If page is already in the table, update it in place.
  27.511 @@ -612,7 +609,7 @@ static inline void set_shadow_status(
  27.512      }
  27.513  
  27.514      /* We need to allocate a new node. Ensure the quicklist is non-empty. */
  27.515 -    if ( unlikely(m->shadow_ht_free == NULL) )
  27.516 +    if ( unlikely(d->arch.shadow_ht_free == NULL) )
  27.517      {
  27.518          SH_LOG("Allocate more shadow hashtable blocks.");
  27.519  
  27.520 @@ -626,10 +623,10 @@ static inline void set_shadow_status(
  27.521          memset(extra, 0, sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
  27.522  
  27.523          /* Record the allocation block so it can be correctly freed later. */
  27.524 -        m->shadow_extras_count++;
  27.525 +        d->arch.shadow_extras_count++;
  27.526          *((struct shadow_status **)&extra[shadow_ht_extra_size]) = 
  27.527 -            m->shadow_ht_extras;
  27.528 -        m->shadow_ht_extras = &extra[0];
  27.529 +            d->arch.shadow_ht_extras;
  27.530 +        d->arch.shadow_ht_extras = &extra[0];
  27.531  
  27.532          /* Thread a free chain through the newly-allocated nodes. */
  27.533          for ( i = 0; i < (shadow_ht_extra_size - 1); i++ )
  27.534 @@ -637,12 +634,12 @@ static inline void set_shadow_status(
  27.535          extra[i].next = NULL;
  27.536  
  27.537          /* Add the new nodes to the free list. */
  27.538 -        m->shadow_ht_free = &extra[0];
  27.539 +        d->arch.shadow_ht_free = &extra[0];
  27.540      }
  27.541  
  27.542      /* Allocate a new node from the quicklist. */
  27.543 -    x                 = m->shadow_ht_free;
  27.544 -    m->shadow_ht_free = x->next;
  27.545 +    x                      = d->arch.shadow_ht_free;
  27.546 +    d->arch.shadow_ht_free = x->next;
  27.547  
  27.548      /* Initialise the new node and insert directly after the head item. */
  27.549      x->pfn            = gpfn;
  27.550 @@ -651,50 +648,51 @@ static inline void set_shadow_status(
  27.551      head->next        = x;
  27.552  
  27.553   done:
  27.554 -    shadow_audit(m, 0);
  27.555 +    shadow_audit(d, 0);
  27.556  }
  27.557    
  27.558  #ifdef CONFIG_VMX
  27.559  #include <asm/domain_page.h>
  27.560  
  27.561  static inline void vmx_update_shadow_state(
  27.562 -    struct mm_struct *mm, unsigned long gpfn, unsigned long spfn)
  27.563 +    struct exec_domain *ed, unsigned long gpfn, unsigned long spfn)
  27.564  {
  27.565  
  27.566      l2_pgentry_t *mpl2e = 0;
  27.567      l2_pgentry_t *gpl2e, *spl2e;
  27.568  
  27.569      /* unmap the old mappings */
  27.570 -    if (mm->shadow_vtable)
  27.571 -        unmap_domain_mem(mm->shadow_vtable);
  27.572 -    if (mm->vpagetable)
  27.573 -        unmap_domain_mem(mm->vpagetable);
  27.574 +    if ( ed->arch.shadow_vtable )
  27.575 +        unmap_domain_mem(ed->arch.shadow_vtable);
  27.576 +    if ( ed->arch.vpagetable )
  27.577 +        unmap_domain_mem(ed->arch.vpagetable);
  27.578  
  27.579      /* new mapping */
  27.580 -    mpl2e = (l2_pgentry_t *) 
  27.581 -        map_domain_mem(pagetable_val(mm->monitor_table));
  27.582 +    mpl2e = (l2_pgentry_t *)
  27.583 +        map_domain_mem(pagetable_val(ed->arch.monitor_table));
  27.584  
  27.585 -    mpl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
  27.586 +    mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
  27.587          mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
  27.588      __flush_tlb_one(SH_LINEAR_PT_VIRT_START);
  27.589  
  27.590 -    spl2e = (l2_pgentry_t *) map_domain_mem(spfn << PAGE_SHIFT);
  27.591 -    gpl2e = (l2_pgentry_t *) map_domain_mem(gpfn << PAGE_SHIFT);
  27.592 +    spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
  27.593 +    gpl2e = (l2_pgentry_t *)map_domain_mem(gpfn << PAGE_SHIFT);
  27.594      memset(spl2e, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
  27.595  
  27.596 -    mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
  27.597 -    mm->shadow_vtable = spl2e;
  27.598 -    mm->vpagetable = gpl2e; /* expect the guest did clean this up */
  27.599 +    ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
  27.600 +    ed->arch.shadow_vtable = spl2e;
  27.601 +    ed->arch.vpagetable = gpl2e; /* expect the guest did clean this up */
  27.602      unmap_domain_mem(mpl2e);
  27.603  }
  27.604  
  27.605 -static inline void __shadow_mk_pagetable( struct mm_struct *mm )
  27.606 +static inline void __shadow_mk_pagetable(struct exec_domain *ed)
  27.607  {
  27.608 -    unsigned long gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
  27.609 +    struct domain *d = ed->domain;
  27.610 +    unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
  27.611      unsigned long spfn;
  27.612      SH_VLOG("0: __shadow_mk_pagetable(gpfn=%08lx\n", gpfn);
  27.613  
  27.614 -    if (mm->shadow_mode == SHM_full_32) 
  27.615 +    if (d->arch.shadow_mode == SHM_full_32) 
  27.616      {
  27.617          unsigned long guest_gpfn;
  27.618          guest_gpfn = machine_to_phys_mapping[gpfn];
  27.619 @@ -702,59 +700,59 @@ static inline void __shadow_mk_pagetable
  27.620          SH_VVLOG("__shadow_mk_pagetable(guest_gpfn=%08lx, gpfn=%08lx\n", 
  27.621                   guest_gpfn, gpfn);
  27.622  
  27.623 -        spfn = __shadow_status(mm, guest_gpfn) & PSH_pfn_mask;
  27.624 +        spfn = __shadow_status(d, guest_gpfn) & PSH_pfn_mask;
  27.625          if ( unlikely(spfn == 0) ) {
  27.626 -            spfn = shadow_l2_table(mm, gpfn);
  27.627 -            mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
  27.628 +            spfn = shadow_l2_table(d, gpfn);
  27.629 +            ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
  27.630          } else {
  27.631 -            vmx_update_shadow_state(mm, gpfn, spfn);
  27.632 +            vmx_update_shadow_state(ed, gpfn, spfn);
  27.633          }
  27.634      } else {
  27.635 -        spfn = __shadow_status(mm, gpfn) & PSH_pfn_mask;
  27.636 +        spfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
  27.637  
  27.638          if ( unlikely(spfn == 0) ) {
  27.639 -            spfn = shadow_l2_table(mm, gpfn);
  27.640 +            spfn = shadow_l2_table(d, gpfn);
  27.641          }
  27.642 -        mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
  27.643 +        ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
  27.644      }
  27.645  }
  27.646  #else
  27.647 -static inline void __shadow_mk_pagetable(struct mm_struct *mm)
  27.648 +static inline void __shadow_mk_pagetable(struct exec_domain *ed)
  27.649  {
  27.650 -    unsigned long gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
  27.651 -    unsigned long spfn = __shadow_status(mm, gpfn);
  27.652 +    unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
  27.653 +    unsigned long spfn = __shadow_status(ed->domain, gpfn);
  27.654  
  27.655      if ( unlikely(spfn == 0) )
  27.656 -        spfn = shadow_l2_table(mm, gpfn);
  27.657 +        spfn = shadow_l2_table(ed->domain, gpfn);
  27.658  
  27.659 -    mm->shadow_table = mk_pagetable(spfn << PAGE_SHIFT);
  27.660 +    ed->arch.shadow_table = mk_pagetable(spfn << PAGE_SHIFT);
  27.661  }
  27.662  #endif /* CONFIG_VMX */
  27.663  
  27.664 -static inline void shadow_mk_pagetable(struct mm_struct *mm)
  27.665 +static inline void shadow_mk_pagetable(struct exec_domain *ed)
  27.666  {
  27.667 -     if ( unlikely(mm->shadow_mode) )
  27.668 +     if ( unlikely(ed->domain->arch.shadow_mode) )
  27.669       {
  27.670           SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
  27.671 -             pagetable_val(mm->pagetable), mm->shadow_mode ); 
  27.672 -
  27.673 -         shadow_lock(mm);
  27.674 -         __shadow_mk_pagetable(mm);
  27.675 -         shadow_unlock(mm);
  27.676 +             pagetable_val(ed->arch.pagetable),
  27.677 +                  ed->domain->arch.shadow_mode); 
  27.678  
  27.679 -     SH_VVLOG("leaving shadow_mk_pagetable:\n");
  27.680 - 
  27.681 -     SH_VVLOG("( gptbase=%08lx, mode=%d ) sh=%08lx",
  27.682 -              pagetable_val(mm->pagetable), mm->shadow_mode, 
  27.683 -              pagetable_val(mm->shadow_table) );
  27.684 - 
  27.685 -     } 
  27.686 +         shadow_lock(ed->domain);
  27.687 +         __shadow_mk_pagetable(ed);
  27.688 +         shadow_unlock(ed->domain);
  27.689 +
  27.690 +     SH_VVLOG("leaving shadow_mk_pagetable:\n"
  27.691 +              "( gptbase=%08lx, mode=%d ) sh=%08lx",
  27.692 +              pagetable_val(ed->arch.pagetable),
  27.693 +              ed->domain->arch.shadow_mode, 
  27.694 +              pagetable_val(ed->arch.shadow_table) );
  27.695 +     }
  27.696  }
  27.697  
  27.698  #if SHADOW_DEBUG
  27.699 -extern int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s);
  27.700 +extern int check_pagetable(struct domain *d, pagetable_t pt, char *s);
  27.701  #else
  27.702 -#define check_pagetable(m, pt, s) ((void)0)
  27.703 +#define check_pagetable(d, pt, s) ((void)0)
  27.704  #endif
  27.705  
  27.706  #endif /* XEN_SHADOW_H */
    28.1 --- a/xen/include/asm-x86/vmx_vmcs.h	Sat Feb 05 15:42:48 2005 +0000
    28.2 +++ b/xen/include/asm-x86/vmx_vmcs.h	Sat Feb 05 18:20:15 2005 +0000
    28.3 @@ -1,3 +1,4 @@
    28.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    28.5  /*
    28.6   * vmx_vmcs.h: VMCS related definitions
    28.7   * Copyright (c) 2004, Intel Corporation.
    28.8 @@ -59,7 +60,7 @@ struct arch_vmx_struct {
    28.9  #define vmx_schedule_tail(next)         \
   28.10      (next)->thread.arch_vmx.arch_vmx_schedule_tail((next))
   28.11  
   28.12 -#define VMX_DOMAIN(d)   d->thread.arch_vmx.flags
   28.13 +#define VMX_DOMAIN(d)   d->arch.arch_vmx.flags
   28.14  
   28.15  #define ARCH_VMX_VMCS_LOADED    0       /* VMCS has been loaded and active */
   28.16  #define ARCH_VMX_VMCS_LAUNCH    1       /* Needs VMCS launch */
    29.1 --- a/xen/include/asm-x86/x86_32/current.h	Sat Feb 05 15:42:48 2005 +0000
    29.2 +++ b/xen/include/asm-x86/x86_32/current.h	Sat Feb 05 18:20:15 2005 +0000
    29.3 @@ -1,3 +1,5 @@
    29.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    29.5 +
    29.6  #ifndef _X86_CURRENT_H
    29.7  #define _X86_CURRENT_H
    29.8  
    29.9 @@ -50,6 +52,6 @@ static inline unsigned long get_stack_to
   29.10          "movl %0,%%esp; jmp "STR(__fn)                            \
   29.11          : : "r" (get_execution_context()) )
   29.12  
   29.13 -#define schedule_tail(_d) ((_d)->thread.schedule_tail)(_d)
   29.14 +#define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
   29.15  
   29.16  #endif /* _X86_CURRENT_H */
    30.1 --- a/xen/include/asm-x86/x86_64/current.h	Sat Feb 05 15:42:48 2005 +0000
    30.2 +++ b/xen/include/asm-x86/x86_64/current.h	Sat Feb 05 18:20:15 2005 +0000
    30.3 @@ -1,3 +1,5 @@
    30.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    30.5 +
    30.6  #ifndef _X86_64_CURRENT_H
    30.7  #define _X86_64_CURRENT_H
    30.8  
    30.9 @@ -44,6 +46,6 @@ static inline unsigned long get_stack_to
   30.10          "movq %0,%%rsp; jmp "STR(__fn)                            \
   30.11          : : "r" (get_execution_context()) )
   30.12  
   30.13 -#define schedule_tail(_d) ((_d)->thread.schedule_tail)(_d)
   30.14 +#define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
   30.15  
   30.16  #endif /* !(_X86_64_CURRENT_H) */
    31.1 --- a/xen/include/xen/sched.h	Sat Feb 05 15:42:48 2005 +0000
    31.2 +++ b/xen/include/xen/sched.h	Sat Feb 05 18:20:15 2005 +0000
    31.3 @@ -1,3 +1,5 @@
    31.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
    31.5 +
    31.6  #ifndef __SCHED_H__
    31.7  #define __SCHED_H__
    31.8  
    31.9 @@ -70,12 +72,8 @@ struct exec_domain
   31.10  
   31.11  #ifdef ARCH_HAS_EXEC_DOMAIN_MM_PTR
   31.12      struct mm_struct *mm;
   31.13 -#else
   31.14 -    struct mm_struct mm;
   31.15  #endif
   31.16  
   31.17 -    struct thread_struct thread;
   31.18 -
   31.19      struct ac_timer  timer;         /* one-shot timer for timeout values */
   31.20  
   31.21      s_time_t         lastschd;      /* time this domain was last scheduled */
   31.22 @@ -89,8 +87,8 @@ struct exec_domain
   31.23      u16 virq_to_evtchn[NR_VIRQS];
   31.24  
   31.25      atomic_t pausecnt;
   31.26 -    arch_exec_domain_t arch;
   31.27  
   31.28 +    struct arch_exec_domain arch;
   31.29  };
   31.30  
   31.31  /*
   31.32 @@ -107,7 +105,8 @@ struct exec_domain
   31.33  #define UNLOCK_BIGLOCK(_d) spin_unlock_recursive(&(_d)->big_lock)
   31.34  #endif
   31.35  
   31.36 -struct domain {
   31.37 +struct domain
   31.38 +{
   31.39      domid_t          id;
   31.40      s_time_t         create_time;
   31.41  
   31.42 @@ -116,8 +115,6 @@ struct domain {
   31.43  
   31.44      spinlock_t       big_lock;
   31.45  
   31.46 -    l1_pgentry_t    *mm_perdomain_pt;
   31.47 -
   31.48      spinlock_t       page_alloc_lock; /* protects all the following fields  */
   31.49      struct list_head page_list;       /* linked list, of size tot_pages     */
   31.50      struct list_head xenpage_list;    /* linked list, of size xenheap_pages */
   31.51 @@ -157,7 +154,8 @@ struct domain {
   31.52      atomic_t refcnt;
   31.53  
   31.54      struct exec_domain *exec_domain[MAX_VIRT_CPUS];
   31.55 -    arch_domain_t arch;
   31.56 +
   31.57 +    struct arch_domain arch;
   31.58  };
   31.59  
   31.60  struct domain_setup_info