ia64/xen-unstable

changeset 2919:fe5933507ca5

bitkeeper revision 1.1159.1.393 (4190a145cbZFKzGdkH5xPlOlxNNPnw)

Split struct exec_domain out of struct domain.
author cl349@freefall.cl.cam.ac.uk
date Tue Nov 09 10:51:49 2004 +0000 (2004-11-09)
parents a049a5fcefc4
children 90b094417ff3
files tools/libxc/xc_linux_build.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/i387.c xen/arch/x86/idle0_task.c xen/arch/x86/irq.c xen/arch/x86/memory.c xen/arch/x86/setup.c xen/arch/x86/shadow.c xen/arch/x86/smpboot.c xen/arch/x86/traps.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/seg_fixup.c xen/common/Makefile xen/common/dom0_ops.c xen/common/dom_mem_ops.c xen/common/domain.c xen/common/event_channel.c xen/common/grant_table.c xen/common/kernel.c xen/common/keyhandler.c xen/common/page_alloc.c xen/common/physdev.c xen/common/sched_bvt.c xen/common/schedule.c xen/drivers/char/console.c xen/include/asm-x86/domain.h xen/include/asm-x86/i387.h xen/include/asm-x86/ldt.h xen/include/asm-x86/processor.h xen/include/asm-x86/shadow.h xen/include/asm-x86/x86_32/current.h xen/include/public/dom0_ops.h xen/include/public/xen.h xen/include/xen/event.h xen/include/xen/sched-if.h xen/include/xen/sched.h
line diff
     1.1 --- a/tools/libxc/xc_linux_build.c	Mon Nov 08 15:29:51 2004 +0000
     1.2 +++ b/tools/libxc/xc_linux_build.c	Tue Nov 09 10:51:49 2004 +0000
     1.3 @@ -335,6 +335,7 @@ static int setup_guestos(int xc_handle,
     1.4      /* Mask all upcalls... */
     1.5      for ( i = 0; i < MAX_VIRT_CPUS; i++ )
     1.6          shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
     1.7 +    shared_info->n_vcpu = 2;
     1.8      munmap(shared_info, PAGE_SIZE);
     1.9  
    1.10      /* Send the page update requests down to the hypervisor. */
     2.1 --- a/xen/arch/x86/dom0_ops.c	Mon Nov 08 15:29:51 2004 +0000
     2.2 +++ b/xen/arch/x86/dom0_ops.c	Tue Nov 09 10:51:49 2004 +0000
     2.3 @@ -47,7 +47,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
     2.4  {
     2.5      long ret = 0;
     2.6  
     2.7 -    if ( !IS_PRIV(current) )
     2.8 +    if ( !IS_PRIV(current->domain) )
     2.9          return -EPERM;
    2.10  
    2.11      switch ( op->cmd )
    2.12 @@ -101,7 +101,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    2.13      return ret;
    2.14  }
    2.15  
    2.16 -void arch_getdomaininfo_ctxt(struct domain *d, full_execution_context_t *c)
    2.17 +void arch_getdomaininfo_ctxt(struct exec_domain *d, full_execution_context_t *c)
    2.18  { 
    2.19      int i;
    2.20  
    2.21 @@ -109,7 +109,7 @@ void arch_getdomaininfo_ctxt(struct doma
    2.22      memcpy(&c->cpu_ctxt, 
    2.23             &d->thread.user_ctxt,
    2.24             sizeof(d->thread.user_ctxt));
    2.25 -    if ( test_bit(DF_DONEFPUINIT, &d->flags) )
    2.26 +    if ( test_bit(EDF_DONEFPUINIT, &d->ed_flags) )
    2.27          c->flags |= ECF_I387_VALID;
    2.28      memcpy(&c->fpu_ctxt,
    2.29             &d->thread.i387,
     3.1 --- a/xen/arch/x86/domain.c	Mon Nov 08 15:29:51 2004 +0000
     3.2 +++ b/xen/arch/x86/domain.c	Tue Nov 09 10:51:49 2004 +0000
     3.3 @@ -91,7 +91,7 @@ void startup_cpu_idle_loop(void)
     3.4  {
     3.5      /* Just some sanity to ensure that the scheduler is set up okay. */
     3.6      ASSERT(current->id == IDLE_DOMAIN_ID);
     3.7 -    domain_unpause_by_systemcontroller(current);
     3.8 +    domain_unpause_by_systemcontroller(current->domain);
     3.9      __enter_scheduler();
    3.10  
    3.11      /*
    3.12 @@ -210,18 +210,18 @@ void machine_halt(void)
    3.13      __machine_halt(NULL);
    3.14  }
    3.15  
    3.16 -void free_perdomain_pt(struct domain *d)
    3.17 +void free_perdomain_pt(struct exec_domain *d)
    3.18  {
    3.19      free_xenheap_page((unsigned long)d->mm.perdomain_pt);
    3.20  }
    3.21  
    3.22 -void arch_do_createdomain(struct domain *d)
    3.23 +void arch_do_createdomain(struct exec_domain *d)
    3.24  {
    3.25      d->shared_info = (void *)alloc_xenheap_page();
    3.26      memset(d->shared_info, 0, PAGE_SIZE);
    3.27      d->shared_info->arch.mfn_to_pfn_start = 
    3.28  	virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
    3.29 -    SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
    3.30 +    SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d->domain);
    3.31      machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 
    3.32                             PAGE_SHIFT] = INVALID_P2M_ENTRY;
    3.33  
    3.34 @@ -231,14 +231,14 @@ void arch_do_createdomain(struct domain 
    3.35                             PAGE_SHIFT] = INVALID_P2M_ENTRY;
    3.36  }
    3.37  
    3.38 -int arch_final_setup_guestos(struct domain *d, full_execution_context_t *c)
    3.39 +int arch_final_setup_guestos(struct exec_domain *d, full_execution_context_t *c)
    3.40  {
    3.41      unsigned long phys_basetab;
    3.42      int i, rc;
    3.43  
    3.44 -    clear_bit(DF_DONEFPUINIT, &d->flags);
    3.45 +    clear_bit(EDF_DONEFPUINIT, &d->ed_flags);
    3.46      if ( c->flags & ECF_I387_VALID )
    3.47 -        set_bit(DF_DONEFPUINIT, &d->flags);
    3.48 +        set_bit(EDF_DONEFPUINIT, &d->ed_flags);
    3.49  
    3.50      memcpy(&d->thread.user_ctxt,
    3.51             &c->cpu_ctxt,
    3.52 @@ -283,7 +283,7 @@ int arch_final_setup_guestos(struct doma
    3.53      
    3.54      phys_basetab = c->pt_base;
    3.55      d->mm.pagetable = mk_pagetable(phys_basetab);
    3.56 -    if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d, 
    3.57 +    if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d->domain, 
    3.58                              PGT_base_page_table) )
    3.59          return -EINVAL;
    3.60  
    3.61 @@ -304,7 +304,7 @@ int arch_final_setup_guestos(struct doma
    3.62  
    3.63  #if defined(__i386__)
    3.64  
    3.65 -void new_thread(struct domain *d,
    3.66 +void new_thread(struct exec_domain *d,
    3.67                  unsigned long start_pc,
    3.68                  unsigned long start_stack,
    3.69                  unsigned long start_info)
    3.70 @@ -342,7 +342,7 @@ void new_thread(struct domain *d,
    3.71  			:"r" (thread->debugreg[register]))
    3.72  
    3.73  
    3.74 -void switch_to(struct domain *prev_p, struct domain *next_p)
    3.75 +void switch_to(struct exec_domain *prev_p, struct exec_domain *next_p)
    3.76  {
    3.77      struct thread_struct *next = &next_p->thread;
    3.78      struct tss_struct *tss = init_tss + smp_processor_id();
    3.79 @@ -352,7 +352,7 @@ void switch_to(struct domain *prev_p, st
    3.80      __cli();
    3.81  
    3.82      /* Switch guest general-register state. */
    3.83 -    if ( !is_idle_task(prev_p) )
    3.84 +    if ( !is_idle_task(prev_p->domain) )
    3.85      {
    3.86          memcpy(&prev_p->thread.user_ctxt,
    3.87                 stack_ec, 
    3.88 @@ -361,7 +361,7 @@ void switch_to(struct domain *prev_p, st
    3.89          CLEAR_FAST_TRAP(&prev_p->thread);
    3.90      }
    3.91  
    3.92 -    if ( !is_idle_task(next_p) )
    3.93 +    if ( !is_idle_task(next_p->domain) )
    3.94      {
    3.95          memcpy(stack_ec,
    3.96                 &next_p->thread.user_ctxt,
    3.97 @@ -389,36 +389,36 @@ void switch_to(struct domain *prev_p, st
    3.98          write_ptbase(&next_p->mm);
    3.99      }
   3.100  
   3.101 -    if ( unlikely(prev_p->io_bitmap != NULL) || 
   3.102 -         unlikely(next_p->io_bitmap != NULL) )
   3.103 +    if ( unlikely(prev_p->domain->io_bitmap != NULL) || 
   3.104 +         unlikely(next_p->domain->io_bitmap != NULL) )
   3.105      {
   3.106 -        if ( next_p->io_bitmap != NULL )
   3.107 +        if ( next_p->domain->io_bitmap != NULL )
   3.108          {
   3.109              /* Copy in the appropriate parts of the IO bitmap.  We use the
   3.110               * selector to copy only the interesting parts of the bitmap. */
   3.111  
   3.112              u64 old_sel = ~0ULL; /* IO bitmap selector for previous task. */
   3.113  
   3.114 -            if ( prev_p->io_bitmap != NULL)
   3.115 +            if ( prev_p->domain->io_bitmap != NULL)
   3.116              {
   3.117 -                old_sel = prev_p->io_bitmap_sel;
   3.118 +                old_sel = prev_p->domain->io_bitmap_sel;
   3.119  
   3.120                  /* Replace any areas of the IO bitmap that had bits cleared. */
   3.121 -                for ( i = 0; i < sizeof(prev_p->io_bitmap_sel) * 8; i++ )
   3.122 -                    if ( !test_bit(i, &prev_p->io_bitmap_sel) )
   3.123 +                for ( i = 0; i < sizeof(prev_p->domain->io_bitmap_sel) * 8; i++ )
   3.124 +                    if ( !test_bit(i, &prev_p->domain->io_bitmap_sel) )
   3.125                          memcpy(&tss->io_bitmap[i * IOBMP_SELBIT_LWORDS],
   3.126 -                               &next_p->io_bitmap[i * IOBMP_SELBIT_LWORDS],
   3.127 +                               &next_p->domain->io_bitmap[i * IOBMP_SELBIT_LWORDS],
   3.128                                 IOBMP_SELBIT_LWORDS * sizeof(unsigned long));
   3.129              }
   3.130  
   3.131              /* Copy in any regions of the new task's bitmap that have bits
   3.132               * clear and we haven't already dealt with. */
   3.133 -            for ( i = 0; i < sizeof(prev_p->io_bitmap_sel) * 8; i++ )
   3.134 +            for ( i = 0; i < sizeof(prev_p->domain->io_bitmap_sel) * 8; i++ )
   3.135              {
   3.136                  if ( test_bit(i, &old_sel)
   3.137 -                     && !test_bit(i, &next_p->io_bitmap_sel) )
   3.138 +                     && !test_bit(i, &next_p->domain->io_bitmap_sel) )
   3.139                      memcpy(&tss->io_bitmap[i * IOBMP_SELBIT_LWORDS],
   3.140 -                           &next_p->io_bitmap[i * IOBMP_SELBIT_LWORDS],
   3.141 +                           &next_p->domain->io_bitmap[i * IOBMP_SELBIT_LWORDS],
   3.142                             IOBMP_SELBIT_LWORDS * sizeof(unsigned long));
   3.143              }
   3.144  
   3.145 @@ -430,8 +430,8 @@ void switch_to(struct domain *prev_p, st
   3.146              /* In this case, we're switching FROM a task with IO port access,
   3.147               * to a task that doesn't use the IO bitmap.  We set any TSS bits
   3.148               * that might have been cleared, ready for future use. */
   3.149 -            for ( i = 0; i < sizeof(prev_p->io_bitmap_sel) * 8; i++ )
   3.150 -                if ( !test_bit(i, &prev_p->io_bitmap_sel) )
   3.151 +            for ( i = 0; i < sizeof(prev_p->domain->io_bitmap_sel) * 8; i++ )
   3.152 +                if ( !test_bit(i, &prev_p->domain->io_bitmap_sel) )
   3.153                      memset(&tss->io_bitmap[i * IOBMP_SELBIT_LWORDS],
   3.154                             0xFF, IOBMP_SELBIT_LWORDS * sizeof(unsigned long));
   3.155  
   3.156 @@ -536,8 +536,8 @@ void domain_relinquish_memory(struct dom
   3.157      shadow_mode_disable(d);
   3.158  
   3.159      /* Drop the in-use reference to the page-table base. */
   3.160 -    if ( pagetable_val(d->mm.pagetable) != 0 )
   3.161 -        put_page_and_type(&frame_table[pagetable_val(d->mm.pagetable) >>
   3.162 +    if ( pagetable_val(d->exec_domain[0]->mm.pagetable) != 0 )
   3.163 +        put_page_and_type(&frame_table[pagetable_val(d->exec_domain[0]->mm.pagetable) >>
   3.164                                        PAGE_SHIFT]);
   3.165  
   3.166      /*
   3.167 @@ -569,6 +569,7 @@ int construct_dom0(struct domain *p,
   3.168      l1_pgentry_t *l1tab = NULL, *l1start = NULL;
   3.169      struct pfn_info *page = NULL;
   3.170      start_info_t *si;
   3.171 +    struct exec_domain *ed = p->exec_domain[0];
   3.172  
   3.173      /*
   3.174       * This fully describes the memory layout of the initial domain. All 
   3.175 @@ -596,7 +597,7 @@ int construct_dom0(struct domain *p,
   3.176      /* Sanity! */
   3.177      if ( p->id != 0 ) 
   3.178          BUG();
   3.179 -    if ( test_bit(DF_CONSTRUCTED, &p->flags) ) 
   3.180 +    if ( test_bit(DF_CONSTRUCTED, &p->d_flags) ) 
   3.181          BUG();
   3.182  
   3.183      memset(&dsi, 0, sizeof(struct domain_setup_info));
   3.184 @@ -734,18 +735,18 @@ int construct_dom0(struct domain *p,
   3.185  
   3.186      mpt_alloc = (vpt_start - dsi.v_start) + alloc_start;
   3.187  
   3.188 -    SET_GDT_ENTRIES(p, DEFAULT_GDT_ENTRIES);
   3.189 -    SET_GDT_ADDRESS(p, DEFAULT_GDT_ADDRESS);
   3.190 +    SET_GDT_ENTRIES(ed, DEFAULT_GDT_ENTRIES);
   3.191 +    SET_GDT_ADDRESS(ed, DEFAULT_GDT_ADDRESS);
   3.192  
   3.193      /*
   3.194       * We're basically forcing default RPLs to 1, so that our "what privilege
   3.195       * level are we returning to?" logic works.
   3.196       */
   3.197 -    p->failsafe_selector = FLAT_GUESTOS_CS;
   3.198 -    p->event_selector    = FLAT_GUESTOS_CS;
   3.199 -    p->thread.guestos_ss = FLAT_GUESTOS_DS;
   3.200 +    ed->failsafe_selector = FLAT_GUESTOS_CS;
   3.201 +    ed->event_selector    = FLAT_GUESTOS_CS;
   3.202 +    ed->thread.guestos_ss = FLAT_GUESTOS_DS;
   3.203      for ( i = 0; i < 256; i++ ) 
   3.204 -        p->thread.traps[i].cs = FLAT_GUESTOS_CS;
   3.205 +        ed->thread.traps[i].cs = FLAT_GUESTOS_CS;
   3.206  
   3.207      /* WARNING: The new domain must have its 'processor' field filled in! */
   3.208      l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
   3.209 @@ -753,8 +754,8 @@ int construct_dom0(struct domain *p,
   3.210      l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
   3.211          mk_l2_pgentry((unsigned long)l2start | __PAGE_HYPERVISOR);
   3.212      l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
   3.213 -        mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR);
   3.214 -    p->mm.pagetable = mk_pagetable((unsigned long)l2start);
   3.215 +        mk_l2_pgentry(__pa(ed->mm.perdomain_pt) | __PAGE_HYPERVISOR);
   3.216 +    ed->mm.pagetable = mk_pagetable((unsigned long)l2start);
   3.217  
   3.218      l2tab += l2_table_offset(dsi.v_start);
   3.219      mfn = alloc_start >> PAGE_SHIFT;
   3.220 @@ -825,15 +826,16 @@ int construct_dom0(struct domain *p,
   3.221      }
   3.222  
   3.223      /* Set up shared-info area. */
   3.224 -    update_dom_time(p->shared_info);
   3.225 -    p->shared_info->domain_time = 0;
   3.226 +    update_dom_time(ed->shared_info);
   3.227 +    ed->shared_info->domain_time = 0;
   3.228      /* Mask all upcalls... */
   3.229      for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   3.230 -        p->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   3.231 +        ed->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   3.232 +    ed->shared_info->n_vcpu = 1;
   3.233  
   3.234      /* Install the new page tables. */
   3.235      __cli();
   3.236 -    write_ptbase(&p->mm);
   3.237 +    write_ptbase(&ed->mm);
   3.238  
   3.239      /* Copy the OS image. */
   3.240      (void)loadelfimage(image_start);
   3.241 @@ -846,7 +848,7 @@ int construct_dom0(struct domain *p,
   3.242      si = (start_info_t *)vstartinfo_start;
   3.243      memset(si, 0, PAGE_SIZE);
   3.244      si->nr_pages     = p->tot_pages;
   3.245 -    si->shared_info  = virt_to_phys(p->shared_info);
   3.246 +    si->shared_info  = virt_to_phys(ed->shared_info);
   3.247      si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
   3.248      si->pt_base      = vpt_start;
   3.249      si->nr_pt_frames = nr_pt_pages;
   3.250 @@ -898,9 +900,9 @@ int construct_dom0(struct domain *p,
   3.251      /* DOM0 gets access to everything. */
   3.252      physdev_init_dom0(p);
   3.253  
   3.254 -    set_bit(DF_CONSTRUCTED, &p->flags);
   3.255 +    set_bit(DF_CONSTRUCTED, &p->d_flags);
   3.256  
   3.257 -    new_thread(p, dsi.v_kernentry, vstack_end, vstartinfo_start);
   3.258 +    new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start);
   3.259  
   3.260  #if 0 /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */
   3.261      shadow_lock(&p->mm);
     4.1 --- a/xen/arch/x86/i387.c	Mon Nov 08 15:29:51 2004 +0000
     4.2 +++ b/xen/arch/x86/i387.c	Tue Nov 09 10:51:49 2004 +0000
     4.3 @@ -17,10 +17,10 @@ void init_fpu(void)
     4.4  {
     4.5      __asm__("fninit");
     4.6      if ( cpu_has_xmm ) load_mxcsr(0x1f80);
     4.7 -    set_bit(DF_DONEFPUINIT, &current->flags);
     4.8 +    set_bit(EDF_DONEFPUINIT, &current->ed_flags);
     4.9  }
    4.10  
    4.11 -static inline void __save_init_fpu( struct domain *tsk )
    4.12 +static inline void __save_init_fpu( struct exec_domain *tsk )
    4.13  {
    4.14      if ( cpu_has_fxsr ) {
    4.15          asm volatile( "fxsave %0 ; fnclex"
    4.16 @@ -29,22 +29,22 @@ static inline void __save_init_fpu( stru
    4.17          asm volatile( "fnsave %0 ; fwait"
    4.18                        : "=m" (tsk->thread.i387) );
    4.19      }
    4.20 -    clear_bit(DF_USEDFPU, &tsk->flags);
    4.21 +    clear_bit(EDF_USEDFPU, &tsk->ed_flags);
    4.22  }
    4.23  
    4.24 -void save_init_fpu( struct domain *tsk )
    4.25 +void save_init_fpu( struct exec_domain *tsk )
    4.26  {
    4.27      /*
    4.28       * The guest OS may have set the 'virtual STTS' flag.
    4.29       * This causes us to set the real flag, so we'll need
    4.30       * to temporarily clear it while saving f-p state.
    4.31       */
    4.32 -    if ( test_bit(DF_GUEST_STTS, &tsk->flags) ) clts();
    4.33 +    if ( test_bit(EDF_GUEST_STTS, &tsk->ed_flags) ) clts();
    4.34      __save_init_fpu(tsk);
    4.35      stts();
    4.36  }
    4.37  
    4.38 -void restore_fpu( struct domain *tsk )
    4.39 +void restore_fpu( struct exec_domain *tsk )
    4.40  {
    4.41      if ( cpu_has_fxsr ) {
    4.42          asm volatile( "fxrstor %0"
     5.1 --- a/xen/arch/x86/idle0_task.c	Mon Nov 08 15:29:51 2004 +0000
     5.2 +++ b/xen/arch/x86/idle0_task.c	Tue Nov 09 10:51:49 2004 +0000
     5.3 @@ -2,17 +2,24 @@
     5.4  #include <xen/sched.h>
     5.5  #include <asm/desc.h>
     5.6  
     5.7 -#define IDLE0_TASK(_t)           \
     5.8 -{                                \
     5.9 -    processor:   0,              \
    5.10 -    id:          IDLE_DOMAIN_ID, \
    5.11 -    mm:          IDLE0_MM,       \
    5.12 -    thread:      INIT_THREAD,    \
    5.13 -    flags:       1<<DF_IDLETASK, \
    5.14 -    refcnt:      ATOMIC_INIT(1)  \
    5.15 +#define IDLE0_EXEC_DOMAIN(_ed,_d)    \
    5.16 +{                                    \
    5.17 +    processor:   0,                  \
    5.18 +    mm:          IDLE0_MM,           \
    5.19 +    thread:      INIT_THREAD,        \
    5.20 +    domain:      (_d)                \
    5.21  }
    5.22  
    5.23 -struct domain idle0_task = IDLE0_TASK(idle0_task);
    5.24 +#define IDLE0_DOMAIN(_t)             \
    5.25 +{                                    \
    5.26 +    id:          IDLE_DOMAIN_ID,     \
    5.27 +    d_flags:     1<<DF_IDLETASK,     \
    5.28 +    refcnt:      ATOMIC_INIT(1)      \
    5.29 +}
    5.30 +
    5.31 +struct domain idle0_domain = IDLE0_DOMAIN(idle0_domain);
    5.32 +struct exec_domain idle0_exec_domain = IDLE0_EXEC_DOMAIN(idle0_exec_domain,
    5.33 +                                                         &idle0_domain);
    5.34  
    5.35  /*
    5.36   * per-CPU TSS segments. Threads are completely 'soft' on Linux,
     6.1 --- a/xen/arch/x86/irq.c	Mon Nov 08 15:29:51 2004 +0000
     6.2 +++ b/xen/arch/x86/irq.c	Tue Nov 09 10:51:49 2004 +0000
     6.3 @@ -212,7 +212,7 @@ int pirq_guest_unmask(struct domain *d)
     6.4      irq_desc_t    *desc;
     6.5      unsigned int   i, j, pirq;
     6.6      u32            m;
     6.7 -    shared_info_t *s = d->shared_info;
     6.8 +    shared_info_t *s = d->exec_domain[0]->shared_info;
     6.9  
    6.10      for ( i = 0; i < ARRAY_SIZE(d->pirq_mask); i++ )
    6.11      {
    6.12 @@ -279,7 +279,7 @@ int pirq_guest_bind(struct domain *d, in
    6.13          /* Attempt to bind the interrupt target to the correct CPU. */
    6.14          if ( desc->handler->set_affinity != NULL )
    6.15              desc->handler->set_affinity(
    6.16 -                irq, apicid_to_phys_cpu_present(d->processor));
    6.17 +                irq, apicid_to_phys_cpu_present(d->exec_domain[0]->processor));
    6.18      }
    6.19      else if ( !will_share || !action->shareable )
    6.20      {
     7.1 --- a/xen/arch/x86/memory.c	Mon Nov 08 15:29:51 2004 +0000
     7.2 +++ b/xen/arch/x86/memory.c	Tue Nov 09 10:51:49 2004 +0000
     7.3 @@ -104,7 +104,7 @@
     7.4  #ifdef VERBOSE
     7.5  #define MEM_LOG(_f, _a...)                           \
     7.6    printk("DOM%u: (file=memory.c, line=%d) " _f "\n", \
     7.7 -         current->id , __LINE__ , ## _a )
     7.8 +         current->domain->id , __LINE__ , ## _a )
     7.9  #else
    7.10  #define MEM_LOG(_f, _a...) ((void)0)
    7.11  #endif
    7.12 @@ -136,7 +136,7 @@ static struct {
    7.13   * Returns the current foreign domain; defaults to the currently-executing
    7.14   * domain if a foreign override hasn't been specified.
    7.15   */
    7.16 -#define FOREIGNDOM (percpu_info[smp_processor_id()].foreign ? : current)
    7.17 +#define FOREIGNDOM (percpu_info[smp_processor_id()].foreign ? : current->domain)
    7.18  
    7.19  /* Private domain structs for DOMID_XEN and DOMID_IO. */
    7.20  static struct domain *dom_xen, *dom_io;
    7.21 @@ -196,7 +196,7 @@ void arch_init_memory(void)
    7.22      }
    7.23  }
    7.24  
    7.25 -static void __invalidate_shadow_ldt(struct domain *d)
    7.26 +static void __invalidate_shadow_ldt(struct exec_domain *d)
    7.27  {
    7.28      int i;
    7.29      unsigned long pfn;
    7.30 @@ -220,7 +220,7 @@ static void __invalidate_shadow_ldt(stru
    7.31  }
    7.32  
    7.33  
    7.34 -static inline void invalidate_shadow_ldt(struct domain *d)
    7.35 +static inline void invalidate_shadow_ldt(struct exec_domain *d)
    7.36  {
    7.37      if ( d->mm.shadow_ldt_mapcnt != 0 )
    7.38          __invalidate_shadow_ldt(d);
    7.39 @@ -248,13 +248,14 @@ static int alloc_segdesc_page(struct pfn
    7.40  /* Map shadow page at offset @off. */
    7.41  int map_ldt_shadow_page(unsigned int off)
    7.42  {
    7.43 -    struct domain *d = current;
    7.44 +    struct exec_domain *ed = current;
    7.45 +    struct domain *d = ed->domain;
    7.46      unsigned long l1e;
    7.47  
    7.48      if ( unlikely(in_irq()) )
    7.49          BUG();
    7.50  
    7.51 -    __get_user(l1e, (unsigned long *)&linear_pg_table[(d->mm.ldt_base >> 
    7.52 +    __get_user(l1e, (unsigned long *)&linear_pg_table[(ed->mm.ldt_base >> 
    7.53                                                         PAGE_SHIFT) + off]);
    7.54  
    7.55      if ( unlikely(!(l1e & _PAGE_PRESENT)) ||
    7.56 @@ -262,8 +263,8 @@ int map_ldt_shadow_page(unsigned int off
    7.57                                       d, PGT_ldt_page)) )
    7.58          return 0;
    7.59  
    7.60 -    d->mm.perdomain_pt[off + 16] = mk_l1_pgentry(l1e | _PAGE_RW);
    7.61 -    d->mm.shadow_ldt_mapcnt++;
    7.62 +    ed->mm.perdomain_pt[off + 16] = mk_l1_pgentry(l1e | _PAGE_RW);
    7.63 +    ed->mm.shadow_ldt_mapcnt++;
    7.64  
    7.65      return 1;
    7.66  }
    7.67 @@ -389,7 +390,7 @@ get_page_from_l1e(
    7.68      {
    7.69          /* Revert to caller privileges if FD == DOMID_IO. */
    7.70          if ( d == dom_io )
    7.71 -            d = current;
    7.72 +            d = current->domain;
    7.73  
    7.74          if ( IS_PRIV(d) )
    7.75              return 1;
    7.76 @@ -474,7 +475,7 @@ static void put_page_from_l1e(l1_pgentry
    7.77          if ( unlikely(((page->u.inuse.type_info & PGT_type_mask) == 
    7.78                         PGT_ldt_page)) &&
    7.79               unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) )
    7.80 -            invalidate_shadow_ldt(e);
    7.81 +            invalidate_shadow_ldt(e->exec_domain[0]);
    7.82          put_page(page);
    7.83      }
    7.84  }
    7.85 @@ -514,7 +515,7 @@ static int alloc_l2_table(struct pfn_inf
    7.86      pl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    7.87          mk_l2_pgentry((page_nr << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    7.88      pl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
    7.89 -        mk_l2_pgentry(__pa(page->u.inuse.domain->mm.perdomain_pt) | 
    7.90 +        mk_l2_pgentry(__pa(page->u.inuse.domain->exec_domain[0]->mm.perdomain_pt) | 
    7.91                        __PAGE_HYPERVISOR);
    7.92  #endif
    7.93  
    7.94 @@ -625,7 +626,7 @@ static int mod_l2_entry(l2_pgentry_t *pl
    7.95          if ( ((l2_pgentry_val(ol2e) ^ l2_pgentry_val(nl2e)) & ~0xffe) == 0 )
    7.96              return update_l2e(pl2e, ol2e, nl2e);
    7.97  
    7.98 -        if ( unlikely(!get_page_from_l2e(nl2e, pfn, current, 
    7.99 +        if ( unlikely(!get_page_from_l2e(nl2e, pfn, current->domain,
   7.100                                          ((unsigned long)pl2e & 
   7.101                                           ~PAGE_MASK) >> 2)) )
   7.102              return 0;
   7.103 @@ -672,7 +673,7 @@ static int mod_l1_entry(l1_pgentry_t *pl
   7.104  {
   7.105      l1_pgentry_t ol1e;
   7.106      unsigned long _ol1e;
   7.107 -    struct domain *d = current;
   7.108 +    struct domain *d = current->domain;
   7.109  
   7.110      if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
   7.111      {
   7.112 @@ -749,11 +750,11 @@ void free_page_type(struct pfn_info *pag
   7.113          BUG();
   7.114      }
   7.115  
   7.116 -    if ( unlikely(d->mm.shadow_mode) && 
   7.117 -         (get_shadow_status(&d->mm, page_to_pfn(page)) & PSH_shadowed) )
   7.118 +    if ( unlikely(d->exec_domain[0]->mm.shadow_mode) && 
   7.119 +         (get_shadow_status(&d->exec_domain[0]->mm, page_to_pfn(page)) & PSH_shadowed) )
   7.120      {
   7.121          unshadow_table(page_to_pfn(page), type);
   7.122 -        put_shadow_status(&d->mm);
   7.123 +        put_shadow_status(&d->exec_domain[0]->mm);
   7.124      }
   7.125  }
   7.126  
   7.127 @@ -835,11 +836,11 @@ int get_page_type(struct pfn_info *page,
   7.128                   * circumstances should be very rare.
   7.129                   */
   7.130                  struct domain *d = page->u.inuse.domain;
   7.131 -                if ( unlikely(NEED_FLUSH(tlbflush_time[d->processor],
   7.132 +                if ( unlikely(NEED_FLUSH(tlbflush_time[d->exec_domain[0]->processor],
   7.133                                           page->tlbflush_timestamp)) )
   7.134                  {
   7.135                      perfc_incr(need_flush_tlb_flush);
   7.136 -                    flush_tlb_cpu(d->processor);
   7.137 +                    flush_tlb_cpu(d->exec_domain[0]->processor);
   7.138                  }
   7.139  
   7.140                  /* We lose existing type, back pointer, and validity. */
   7.141 @@ -918,7 +919,8 @@ static int do_extended_command(unsigned 
   7.142      unsigned long pfn = ptr >> PAGE_SHIFT;
   7.143      unsigned long old_base_pfn;
   7.144      struct pfn_info *page = &frame_table[pfn];
   7.145 -    struct domain *d = current, *nd, *e;
   7.146 +    struct exec_domain *ed = current;
   7.147 +    struct domain *d = ed->domain, *nd, *e;
   7.148      u32 x, y;
   7.149      domid_t domid;
   7.150      grant_ref_t gntref;
   7.151 @@ -979,15 +981,15 @@ static int do_extended_command(unsigned 
   7.152          okay = get_page_and_type_from_pagenr(pfn, PGT_l2_page_table, d);
   7.153          if ( likely(okay) )
   7.154          {
   7.155 -            invalidate_shadow_ldt(d);
   7.156 +            invalidate_shadow_ldt(ed);
   7.157  
   7.158              percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
   7.159 -            old_base_pfn = pagetable_val(d->mm.pagetable) >> PAGE_SHIFT;
   7.160 -            d->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
   7.161 +            old_base_pfn = pagetable_val(ed->mm.pagetable) >> PAGE_SHIFT;
   7.162 +            ed->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
   7.163  
   7.164 -            shadow_mk_pagetable(&d->mm);
   7.165 +            shadow_mk_pagetable(&ed->mm);
   7.166  
   7.167 -            write_ptbase(&d->mm);
   7.168 +            write_ptbase(&ed->mm);
   7.169  
   7.170              put_page_and_type(&frame_table[old_base_pfn]);
   7.171          }
   7.172 @@ -1028,13 +1030,13 @@ static int do_extended_command(unsigned 
   7.173              okay = 0;
   7.174              MEM_LOG("Bad args to SET_LDT: ptr=%08lx, ents=%08lx", ptr, ents);
   7.175          }
   7.176 -        else if ( (d->mm.ldt_ents != ents) || 
   7.177 -                  (d->mm.ldt_base != ptr) )
   7.178 +        else if ( (ed->mm.ldt_ents != ents) || 
   7.179 +                  (ed->mm.ldt_base != ptr) )
   7.180          {
   7.181 -            invalidate_shadow_ldt(d);
   7.182 -            d->mm.ldt_base = ptr;
   7.183 -            d->mm.ldt_ents = ents;
   7.184 -            load_LDT(d);
   7.185 +            invalidate_shadow_ldt(ed);
   7.186 +            ed->mm.ldt_base = ptr;
   7.187 +            ed->mm.ldt_ents = ents;
   7.188 +            load_LDT(ed);
   7.189              percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT;
   7.190              if ( ents != 0 )
   7.191                  percpu_info[cpu].deferred_ops |= DOP_RELOAD_LDT;
   7.192 @@ -1146,13 +1148,13 @@ static int do_extended_command(unsigned 
   7.193           * Also, a domain mustn't have PGC_allocated pages when it is dying.
   7.194           */
   7.195          ASSERT(e->tot_pages <= e->max_pages);
   7.196 -        if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
   7.197 +        if ( unlikely(test_bit(DF_DYING, &e->d_flags)) ||
   7.198               unlikely(e->tot_pages == e->max_pages) ||
   7.199               unlikely(!gnttab_prepare_for_transfer(e, d, gntref)) )
   7.200          {
   7.201              MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
   7.202                      "provided a bad grant ref, or is dying (%08lx).\n",
   7.203 -                    e->tot_pages, e->max_pages, e->flags);
   7.204 +                    e->tot_pages, e->max_pages, e->d_flags);
   7.205              spin_unlock(&e->page_alloc_lock);
   7.206              put_domain(e);
   7.207              okay = 0;
   7.208 @@ -1206,7 +1208,7 @@ static int do_extended_command(unsigned 
   7.209          }
   7.210  
   7.211          /* A domain shouldn't have PGC_allocated pages when it is dying. */
   7.212 -        if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
   7.213 +        if ( unlikely(test_bit(DF_DYING, &e->d_flags)) ||
   7.214               unlikely(IS_XEN_HEAP_FRAME(page)) )
   7.215          {
   7.216              MEM_LOG("Reassignment page is Xen heap, or dest dom is dying.");
   7.217 @@ -1287,7 +1289,8 @@ int do_mmu_update(mmu_update_t *ureqs, i
   7.218      unsigned int cmd;
   7.219      unsigned long prev_spfn = 0;
   7.220      l1_pgentry_t *prev_spl1e = 0;
   7.221 -    struct domain *d = current;
   7.222 +    struct exec_domain *ed = current;
   7.223 +    struct domain *d = ed->domain;
   7.224      u32 type_info;
   7.225  
   7.226      perfc_incrc(calls_to_mmu_update); 
   7.227 @@ -1318,7 +1321,7 @@ int do_mmu_update(mmu_update_t *ureqs, i
   7.228               * MMU_NORMAL_PT_UPDATE: Normal update to any level of page table.
   7.229               */
   7.230          case MMU_NORMAL_PT_UPDATE:
   7.231 -            if ( unlikely(!get_page_from_pagenr(pfn, current)) )
   7.232 +            if ( unlikely(!get_page_from_pagenr(pfn, current->domain)) )
   7.233              {
   7.234                  MEM_LOG("Could not get page for normal update");
   7.235                  break;
   7.236 @@ -1346,13 +1349,13 @@ int do_mmu_update(mmu_update_t *ureqs, i
   7.237                      okay = mod_l1_entry((l1_pgentry_t *)va, 
   7.238                                          mk_l1_pgentry(req.val)); 
   7.239  
   7.240 -                    if ( unlikely(d->mm.shadow_mode) && okay &&
   7.241 -                         (get_shadow_status(&d->mm, page-frame_table) &
   7.242 +                    if ( unlikely(ed->mm.shadow_mode) && okay &&
   7.243 +                         (get_shadow_status(&ed->mm, page-frame_table) &
   7.244                            PSH_shadowed) )
   7.245                      {
   7.246                          shadow_l1_normal_pt_update(
   7.247                              req.ptr, req.val, &prev_spfn, &prev_spl1e);
   7.248 -                        put_shadow_status(&d->mm);
   7.249 +                        put_shadow_status(&ed->mm);
   7.250                      }
   7.251  
   7.252                      put_page_type(page);
   7.253 @@ -1365,12 +1368,12 @@ int do_mmu_update(mmu_update_t *ureqs, i
   7.254                                          mk_l2_pgentry(req.val),
   7.255                                          pfn); 
   7.256  
   7.257 -                    if ( unlikely(d->mm.shadow_mode) && okay &&
   7.258 -                         (get_shadow_status(&d->mm, page-frame_table) & 
   7.259 +                    if ( unlikely(ed->mm.shadow_mode) && okay &&
   7.260 +                         (get_shadow_status(&ed->mm, page-frame_table) & 
   7.261                            PSH_shadowed) )
   7.262                      {
   7.263                          shadow_l2_normal_pt_update(req.ptr, req.val);
   7.264 -                        put_shadow_status(&d->mm);
   7.265 +                        put_shadow_status(&ed->mm);
   7.266                      }
   7.267  
   7.268                      put_page_type(page);
   7.269 @@ -1403,9 +1406,9 @@ int do_mmu_update(mmu_update_t *ureqs, i
   7.270               * If in log-dirty mode, mark the corresponding pseudo-physical
   7.271               * page as dirty.
   7.272               */
   7.273 -            if ( unlikely(d->mm.shadow_mode == SHM_logdirty) && 
   7.274 -                 mark_dirty(&d->mm, pfn) )
   7.275 -                d->mm.shadow_dirty_block_count++;
   7.276 +            if ( unlikely(ed->mm.shadow_mode == SHM_logdirty) && 
   7.277 +                 mark_dirty(&ed->mm, pfn) )
   7.278 +                ed->mm.shadow_dirty_block_count++;
   7.279  
   7.280              put_page(&frame_table[pfn]);
   7.281              break;
   7.282 @@ -1465,9 +1468,10 @@ int do_update_va_mapping(unsigned long p
   7.283                           unsigned long val, 
   7.284                           unsigned long flags)
   7.285  {
   7.286 -    struct domain *d = current;
   7.287 +    struct exec_domain *ed = current;
   7.288 +    struct domain *d = ed->domain;
   7.289      int err = 0;
   7.290 -    unsigned int cpu = d->processor;
   7.291 +    unsigned int cpu = ed->processor;
   7.292      unsigned long deferred_ops;
   7.293  
   7.294      perfc_incrc(calls_to_update_va);
   7.295 @@ -1486,11 +1490,11 @@ int do_update_va_mapping(unsigned long p
   7.296                                  mk_l1_pgentry(val))) )
   7.297          err = -EINVAL;
   7.298  
   7.299 -    if ( unlikely(d->mm.shadow_mode) )
   7.300 +    if ( unlikely(ed->mm.shadow_mode) )
   7.301      {
   7.302          unsigned long sval;
   7.303  
   7.304 -        l1pte_propagate_from_guest(&d->mm, &val, &sval);
   7.305 +        l1pte_propagate_from_guest(&ed->mm, &val, &sval);
   7.306  
   7.307          if ( unlikely(__put_user(sval, ((unsigned long *)(
   7.308              &shadow_linear_pg_table[page_nr])))) )
   7.309 @@ -1507,10 +1511,10 @@ int do_update_va_mapping(unsigned long p
   7.310           * the PTE in the PT-holding page. We need the machine frame number
   7.311           * for this.
   7.312           */
   7.313 -        if ( d->mm.shadow_mode == SHM_logdirty )
   7.314 +        if ( ed->mm.shadow_mode == SHM_logdirty )
   7.315              mark_dirty(&current->mm, va_to_l1mfn(page_nr << PAGE_SHIFT));  
   7.316    
   7.317 -        check_pagetable(&d->mm, d->mm.pagetable, "va"); /* debug */
   7.318 +        check_pagetable(&ed->mm, ed->mm.pagetable, "va"); /* debug */
   7.319      }
   7.320  
   7.321      deferred_ops = percpu_info[cpu].deferred_ops;
   7.322 @@ -1537,7 +1541,7 @@ int do_update_va_mapping_otherdomain(uns
   7.323      struct domain *d;
   7.324      int rc;
   7.325  
   7.326 -    if ( unlikely(!IS_PRIV(current)) )
   7.327 +    if ( unlikely(!IS_PRIV(current->domain)) )
   7.328          return -EPERM;
   7.329  
   7.330      percpu_info[cpu].foreign = d = find_domain_by_id(domid);
   7.331 @@ -1579,7 +1583,8 @@ void ptwr_flush(const int which)
   7.332      l1_pgentry_t  *sl1e = NULL, *pl1e, ol1e, nl1e;
   7.333      l2_pgentry_t  *pl2e, nl2e;
   7.334      int            i, cpu = smp_processor_id();
   7.335 -    struct domain *d = current;
   7.336 +    struct exec_domain *ed = current;
   7.337 +    struct domain *d = ed->domain;
   7.338  
   7.339      l1va = ptwr_info[cpu].ptinfo[which].l1va;
   7.340      ptep = (unsigned long *)&linear_pg_table[l1va>>PAGE_SHIFT];
   7.341 @@ -1601,15 +1606,15 @@ void ptwr_flush(const int which)
   7.342                  PTWR_PRINT_WHICH, ptep, pte);
   7.343      pte &= ~_PAGE_RW;
   7.344  
   7.345 -    if ( unlikely(d->mm.shadow_mode) )
   7.346 +    if ( unlikely(ed->mm.shadow_mode) )
   7.347      {
   7.348          /* Write-protect the p.t. page in the shadow page table. */
   7.349 -        l1pte_propagate_from_guest(&d->mm, &pte, &spte);
   7.350 +        l1pte_propagate_from_guest(&ed->mm, &pte, &spte);
   7.351          __put_user(
   7.352              spte, (unsigned long *)&shadow_linear_pg_table[l1va>>PAGE_SHIFT]);
   7.353  
   7.354          /* Is the p.t. page itself shadowed? Map it into Xen space if so. */
   7.355 -        sstat = get_shadow_status(&d->mm, pte >> PAGE_SHIFT);
   7.356 +        sstat = get_shadow_status(&ed->mm, pte >> PAGE_SHIFT);
   7.357          if ( sstat & PSH_shadowed )
   7.358              sl1e = map_domain_mem((sstat & PSH_pfn_mask) << PAGE_SHIFT);
   7.359      }
   7.360 @@ -1654,7 +1659,7 @@ void ptwr_flush(const int which)
   7.361              {
   7.362                  if ( unlikely(sl1e != NULL) )
   7.363                      l1pte_propagate_from_guest(
   7.364 -                        &d->mm, &l1_pgentry_val(nl1e), 
   7.365 +                        &ed->mm, &l1_pgentry_val(nl1e), 
   7.366                          &l1_pgentry_val(sl1e[i]));
   7.367                  put_page_type(&frame_table[l1_pgentry_to_pagenr(nl1e)]);
   7.368              }
   7.369 @@ -1677,7 +1682,7 @@ void ptwr_flush(const int which)
   7.370          
   7.371          if ( unlikely(sl1e != NULL) )
   7.372              l1pte_propagate_from_guest(
   7.373 -                &d->mm, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i]));
   7.374 +                &ed->mm, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i]));
   7.375  
   7.376          if ( unlikely(l1_pgentry_val(ol1e) & _PAGE_PRESENT) )
   7.377              put_page_from_l1e(ol1e, d);
   7.378 @@ -1688,7 +1693,7 @@ void ptwr_flush(const int which)
   7.379       * STEP 3. Reattach the L1 p.t. page into the current address space.
   7.380       */
   7.381  
   7.382 -    if ( (which == PTWR_PT_ACTIVE) && likely(!d->mm.shadow_mode) )
   7.383 +    if ( (which == PTWR_PT_ACTIVE) && likely(!ed->mm.shadow_mode) )
   7.384      {
   7.385          pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
   7.386          nl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT);
   7.387 @@ -1704,7 +1709,7 @@ void ptwr_flush(const int which)
   7.388      if ( unlikely(sl1e != NULL) )
   7.389      {
   7.390          unmap_domain_mem(sl1e);
   7.391 -        put_shadow_status(&d->mm);
   7.392 +        put_shadow_status(&ed->mm);
   7.393      }
   7.394  }
   7.395  
     8.1 --- a/xen/arch/x86/setup.c	Mon Nov 08 15:29:51 2004 +0000
     8.2 +++ b/xen/arch/x86/setup.c	Tue Nov 09 10:51:49 2004 +0000
     8.3 @@ -38,7 +38,7 @@ EXPORT_SYMBOL(mmu_cr4_features);
     8.4  
     8.5  unsigned long wait_init_idle;
     8.6  
     8.7 -struct domain *idle_task[NR_CPUS] = { &idle0_task };
     8.8 +struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
     8.9  
    8.10  #ifdef	CONFIG_ACPI_INTERPRETER
    8.11  int acpi_disabled = 0;
     9.1 --- a/xen/arch/x86/shadow.c	Mon Nov 08 15:29:51 2004 +0000
     9.2 +++ b/xen/arch/x86/shadow.c	Tue Nov 09 10:51:49 2004 +0000
     9.3 @@ -171,7 +171,7 @@ void shadow_mode_init(void)
     9.4  
     9.5  int shadow_mode_enable(struct domain *p, unsigned int mode)
     9.6  {
     9.7 -    struct mm_struct *m = &p->mm;
     9.8 +    struct mm_struct *m = &p->exec_domain[0]->mm;
     9.9  
    9.10      m->shadow_ht = xmalloc(
    9.11          shadow_ht_buckets * sizeof(struct shadow_status));
    9.12 @@ -206,7 +206,7 @@ int shadow_mode_enable(struct domain *p,
    9.13  
    9.14  void __shadow_mode_disable(struct domain *d)
    9.15  {
    9.16 -    struct mm_struct *m = &d->mm;
    9.17 +    struct mm_struct *m = &d->exec_domain[0]->mm;
    9.18      struct shadow_status *x, *n;
    9.19  
    9.20      free_shadow_state(m);
    9.21 @@ -243,7 +243,7 @@ static int shadow_mode_table_op(
    9.22      struct domain *d, dom0_shadow_control_t *sc)
    9.23  {
    9.24      unsigned int      op = sc->op;
    9.25 -    struct mm_struct *m = &d->mm;
    9.26 +    struct mm_struct *m = &d->exec_domain[0]->mm;
    9.27      int               i, rc = 0;
    9.28  
    9.29      ASSERT(spin_is_locked(&m->shadow_lock));
    9.30 @@ -356,7 +356,7 @@ int shadow_mode_control(struct domain *d
    9.31      unsigned int op = sc->op;
    9.32      int          rc = 0;
    9.33  
    9.34 -    if ( unlikely(d == current) )
    9.35 +    if ( unlikely(d == current->domain) )
    9.36      {
    9.37          DPRINTK("Don't try to do a shadow op on yourself!\n");
    9.38          return -EINVAL;
    9.39 @@ -365,7 +365,7 @@ int shadow_mode_control(struct domain *d
    9.40      domain_pause(d);
    9.41      synchronise_pagetables(~0UL);
    9.42  
    9.43 -    shadow_lock(&d->mm);
    9.44 +    shadow_lock(&d->exec_domain[0]->mm);
    9.45  
    9.46      switch ( op )
    9.47      {
    9.48 @@ -384,11 +384,11 @@ int shadow_mode_control(struct domain *d
    9.49          break;
    9.50  
    9.51      default:
    9.52 -        rc = shadow_mode(d) ? shadow_mode_table_op(d, sc) : -EINVAL;
    9.53 +        rc = shadow_mode(d->exec_domain[0]) ? shadow_mode_table_op(d, sc) : -EINVAL;
    9.54          break;
    9.55      }
    9.56  
    9.57 -    shadow_unlock(&d->mm);
    9.58 +    shadow_unlock(&d->exec_domain[0]->mm);
    9.59  
    9.60      domain_unpause(d);
    9.61  
    9.62 @@ -428,9 +428,9 @@ void unshadow_table(unsigned long gpfn, 
    9.63       * guests there won't be a race here as this CPU was the one that 
    9.64       * cmpxchg'ed the page to invalid.
    9.65       */
    9.66 -    spfn = __shadow_status(&d->mm, gpfn) & PSH_pfn_mask;
    9.67 -    delete_shadow_status(&d->mm, gpfn);
    9.68 -    free_shadow_page(&d->mm, &frame_table[spfn]);
    9.69 +    spfn = __shadow_status(&d->exec_domain[0]->mm, gpfn) & PSH_pfn_mask;
    9.70 +    delete_shadow_status(&d->exec_domain[0]->mm, gpfn);
    9.71 +    free_shadow_page(&d->exec_domain[0]->mm, &frame_table[spfn]);
    9.72  }
    9.73  
    9.74  unsigned long shadow_l2_table( 
    9.75 @@ -473,7 +473,7 @@ unsigned long shadow_l2_table(
    9.76      spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    9.77          mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
    9.78      spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
    9.79 -        mk_l2_pgentry(__pa(frame_table[gpfn].u.inuse.domain->mm.perdomain_pt) |
    9.80 +        mk_l2_pgentry(__pa(frame_table[gpfn].u.inuse.domain->exec_domain[0]->mm.perdomain_pt) |
    9.81                        __PAGE_HYPERVISOR);
    9.82  #endif
    9.83  
    10.1 --- a/xen/arch/x86/smpboot.c	Mon Nov 08 15:29:51 2004 +0000
    10.2 +++ b/xen/arch/x86/smpboot.c	Tue Nov 09 10:51:49 2004 +0000
    10.3 @@ -647,6 +647,7 @@ static void __init do_boot_cpu (int apic
    10.4   */
    10.5  {
    10.6      struct domain *idle;
    10.7 +    struct exec_domain *ed;
    10.8      unsigned long boot_error = 0;
    10.9      int timeout, cpu;
   10.10      unsigned long start_eip, stack;
   10.11 @@ -656,17 +657,19 @@ static void __init do_boot_cpu (int apic
   10.12      if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
   10.13          panic("failed 'createdomain' for CPU %d", cpu);
   10.14  
   10.15 -    set_bit(DF_IDLETASK, &idle->flags);
   10.16 +    ed = idle->exec_domain[0];
   10.17  
   10.18 -    idle->mm.pagetable = mk_pagetable(__pa(idle_pg_table));
   10.19 +    set_bit(DF_IDLETASK, &idle->d_flags);
   10.20 +
   10.21 +    ed->mm.pagetable = mk_pagetable(__pa(idle_pg_table));
   10.22  
   10.23      map_cpu_to_boot_apicid(cpu, apicid);
   10.24  
   10.25  #if defined(__i386__)
   10.26 -    SET_DEFAULT_FAST_TRAP(&idle->thread);
   10.27 +    SET_DEFAULT_FAST_TRAP(&ed->thread);
   10.28  #endif
   10.29  
   10.30 -    idle_task[cpu] = idle;
   10.31 +    idle_task[cpu] = ed;
   10.32  
   10.33      /* start_eip had better be page-aligned! */
   10.34      start_eip = setup_trampoline();
    11.1 --- a/xen/arch/x86/traps.c	Mon Nov 08 15:29:51 2004 +0000
    11.2 +++ b/xen/arch/x86/traps.c	Tue Nov 09 10:51:49 2004 +0000
    11.3 @@ -207,7 +207,7 @@ static inline void do_trap(int trapnr, c
    11.4                             struct xen_regs *regs, 
    11.5                             long error_code, int use_error_code)
    11.6  {
    11.7 -    struct domain *p = current;
    11.8 +    struct exec_domain *ed = current;
    11.9      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
   11.10      trap_info_t *ti;
   11.11      unsigned long fixup;
   11.12 @@ -221,7 +221,7 @@ static inline void do_trap(int trapnr, c
   11.13      gtb->cs         = ti->cs;
   11.14      gtb->eip        = ti->address;
   11.15      if ( TI_GET_IF(ti) )
   11.16 -        p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
   11.17 +        ed->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
   11.18      return; 
   11.19  
   11.20   xen_fault:
   11.21 @@ -267,7 +267,7 @@ DO_ERROR_NOCODE(19, "simd error", simd_c
   11.22  
   11.23  asmlinkage void do_int3(struct xen_regs *regs, long error_code)
   11.24  {
   11.25 -    struct domain *p = current;
   11.26 +    struct exec_domain *ed = current;
   11.27      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
   11.28      trap_info_t *ti;
   11.29  
   11.30 @@ -293,7 +293,7 @@ asmlinkage void do_int3(struct xen_regs 
   11.31      gtb->cs         = ti->cs;
   11.32      gtb->eip        = ti->address;
   11.33      if ( TI_GET_IF(ti) )
   11.34 -        p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
   11.35 +        ed->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
   11.36  }
   11.37  
   11.38  asmlinkage void do_double_fault(void)
   11.39 @@ -332,9 +332,10 @@ asmlinkage void do_page_fault(struct xen
   11.40      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
   11.41      trap_info_t *ti;
   11.42      unsigned long off, addr, fixup;
   11.43 -    struct domain *d = current;
   11.44 +    struct exec_domain *ed = current;
   11.45 +    struct domain *d = ed->domain;
   11.46      extern int map_ldt_shadow_page(unsigned int);
   11.47 -    int cpu = d->processor;
   11.48 +    int cpu = ed->processor;
   11.49  
   11.50      __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
   11.51  
   11.52 @@ -356,19 +357,19 @@ asmlinkage void do_page_fault(struct xen
   11.53              return;
   11.54      }
   11.55  
   11.56 -    if ( unlikely(d->mm.shadow_mode) && 
   11.57 +    if ( unlikely(ed->mm.shadow_mode) && 
   11.58           (addr < PAGE_OFFSET) && shadow_fault(addr, error_code) )
   11.59          return; /* Returns TRUE if fault was handled. */
   11.60  
   11.61      if ( unlikely(addr >= LDT_VIRT_START) && 
   11.62 -         (addr < (LDT_VIRT_START + (d->mm.ldt_ents*LDT_ENTRY_SIZE))) )
   11.63 +         (addr < (LDT_VIRT_START + (ed->mm.ldt_ents*LDT_ENTRY_SIZE))) )
   11.64      {
   11.65          /*
   11.66           * Copy a mapping from the guest's LDT, if it is valid. Otherwise we
   11.67           * send the fault up to the guest OS to be handled.
   11.68           */
   11.69          off  = addr - LDT_VIRT_START;
   11.70 -        addr = d->mm.ldt_base + off;
   11.71 +        addr = ed->mm.ldt_base + off;
   11.72          if ( likely(map_ldt_shadow_page(off >> PAGE_SHIFT)) )
   11.73              return; /* successfully copied the mapping */
   11.74      }
   11.75 @@ -376,14 +377,14 @@ asmlinkage void do_page_fault(struct xen
   11.76      if ( unlikely(!(regs->xcs & 3)) )
   11.77          goto xen_fault;
   11.78  
   11.79 -    ti = d->thread.traps + 14;
   11.80 +    ti = ed->thread.traps + 14;
   11.81      gtb->flags = GTBF_TRAP_CR2; /* page fault pushes %cr2 */
   11.82      gtb->cr2        = addr;
   11.83      gtb->error_code = error_code;
   11.84      gtb->cs         = ti->cs;
   11.85      gtb->eip        = ti->address;
   11.86      if ( TI_GET_IF(ti) )
   11.87 -        d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
   11.88 +        ed->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
   11.89      return; 
   11.90  
   11.91   xen_fault:
   11.92 @@ -391,7 +392,7 @@ asmlinkage void do_page_fault(struct xen
   11.93      if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
   11.94      {
   11.95          perfc_incrc(copy_user_faults);
   11.96 -        if ( !d->mm.shadow_mode )
   11.97 +        if ( !ed->mm.shadow_mode )
   11.98              DPRINTK("Page fault: %08lx -> %08lx\n", regs->eip, fixup);
   11.99          regs->eip = fixup;
  11.100          regs->xds = regs->xes = regs->xfs = regs->xgs = __HYPERVISOR_DS;
  11.101 @@ -435,7 +436,8 @@ asmlinkage void do_page_fault(struct xen
  11.102  
  11.103  asmlinkage void do_general_protection(struct xen_regs *regs, long error_code)
  11.104  {
  11.105 -    struct domain *d = current;
  11.106 +    struct exec_domain *ed = current;
  11.107 +    struct domain *d = ed->domain;
  11.108      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
  11.109      trap_info_t *ti;
  11.110      unsigned long fixup;
  11.111 @@ -500,7 +502,7 @@ asmlinkage void do_general_protection(st
  11.112      gtb->cs         = ti->cs;
  11.113      gtb->eip        = ti->address;
  11.114      if ( TI_GET_IF(ti) )
  11.115 -        d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
  11.116 +        ed->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
  11.117      return;
  11.118  
  11.119   gp_in_kernel:
  11.120 @@ -582,10 +584,10 @@ static void nmi_softirq(void)
  11.121          return;
  11.122  
  11.123      if ( test_and_clear_bit(0, &nmi_softirq_reason) )
  11.124 -        send_guest_virq(dom0, VIRQ_PARITY_ERR);
  11.125 +        send_guest_virq(dom0->exec_domain[0], VIRQ_PARITY_ERR);
  11.126  
  11.127      if ( test_and_clear_bit(1, &nmi_softirq_reason) )
  11.128 -        send_guest_virq(dom0, VIRQ_IO_ERR);
  11.129 +        send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR);
  11.130  }
  11.131  
  11.132  asmlinkage void math_state_restore(struct xen_regs *regs, long error_code)
  11.133 @@ -593,16 +595,16 @@ asmlinkage void math_state_restore(struc
  11.134      /* Prevent recursion. */
  11.135      clts();
  11.136  
  11.137 -    if ( !test_bit(DF_USEDFPU, &current->flags) )
  11.138 +    if ( !test_bit(EDF_USEDFPU, &current->ed_flags) )
  11.139      {
  11.140 -        if ( test_bit(DF_DONEFPUINIT, &current->flags) )
  11.141 +        if ( test_bit(EDF_DONEFPUINIT, &current->ed_flags) )
  11.142              restore_fpu(current);
  11.143          else
  11.144              init_fpu();
  11.145 -        set_bit(DF_USEDFPU, &current->flags); /* so we fnsave on switch_to() */
  11.146 +        set_bit(EDF_USEDFPU, &current->ed_flags); /* so we fnsave on switch_to() */
  11.147      }
  11.148  
  11.149 -    if ( test_and_clear_bit(DF_GUEST_STTS, &current->flags) )
  11.150 +    if ( test_and_clear_bit(EDF_GUEST_STTS, &current->ed_flags) )
  11.151      {
  11.152          struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
  11.153          gtb->flags      = GTBF_TRAP_NOCODE;
  11.154 @@ -637,7 +639,7 @@ asmlinkage void do_pdb_debug(struct xen_
  11.155  asmlinkage void do_debug(struct xen_regs *regs, long error_code)
  11.156  {
  11.157      unsigned int condition;
  11.158 -    struct domain *tsk = current;
  11.159 +    struct exec_domain *tsk = current;
  11.160      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
  11.161  
  11.162  #ifdef XEN_DEBUGGER
  11.163 @@ -831,7 +833,7 @@ long do_set_callbacks(unsigned long even
  11.164                        unsigned long failsafe_selector,
  11.165                        unsigned long failsafe_address)
  11.166  {
  11.167 -    struct domain *p = current;
  11.168 +    struct exec_domain *p = current;
  11.169  
  11.170      if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
  11.171          return -EPERM;
  11.172 @@ -845,7 +847,7 @@ long do_set_callbacks(unsigned long even
  11.173  }
  11.174  
  11.175  
  11.176 -long set_fast_trap(struct domain *p, int idx)
  11.177 +long set_fast_trap(struct exec_domain *p, int idx)
  11.178  {
  11.179      trap_info_t *ti;
  11.180  
  11.181 @@ -898,13 +900,13 @@ long do_set_fast_trap(int idx)
  11.182  
  11.183  long do_fpu_taskswitch(void)
  11.184  {
  11.185 -    set_bit(DF_GUEST_STTS, &current->flags);
  11.186 +    set_bit(EDF_GUEST_STTS, &current->ed_flags);
  11.187      stts();
  11.188      return 0;
  11.189  }
  11.190  
  11.191  
  11.192 -long set_debugreg(struct domain *p, int reg, unsigned long value)
  11.193 +long set_debugreg(struct exec_domain *p, int reg, unsigned long value)
  11.194  {
  11.195      int i;
  11.196  
    12.1 --- a/xen/arch/x86/x86_32/mm.c	Mon Nov 08 15:29:51 2004 +0000
    12.2 +++ b/xen/arch/x86/x86_32/mm.c	Tue Nov 09 10:51:49 2004 +0000
    12.3 @@ -135,8 +135,8 @@ void __init zap_low_mappings(void)
    12.4   */
    12.5  static void __synchronise_pagetables(void *mask)
    12.6  {
    12.7 -    struct domain *d = current;
    12.8 -    if ( ((unsigned long)mask & (1<<d->processor)) && is_idle_task(d) )
    12.9 +    struct exec_domain *d = current;
   12.10 +    if ( ((unsigned long)mask & (1<<d->processor)) && is_idle_task(d->domain) )
   12.11          write_ptbase(&d->mm);
   12.12  }
   12.13  void synchronise_pagetables(unsigned long cpu_mask)
   12.14 @@ -242,22 +242,26 @@ int check_descriptor(unsigned long *d)
   12.15  
   12.16  void destroy_gdt(struct domain *d)
   12.17  {
   12.18 +    struct exec_domain *ed;
   12.19      int i;
   12.20      unsigned long pfn;
   12.21  
   12.22 -    for ( i = 0; i < 16; i++ )
   12.23 -    {
   12.24 -        if ( (pfn = l1_pgentry_to_pagenr(d->mm.perdomain_pt[i])) != 0 )
   12.25 -            put_page_and_type(&frame_table[pfn]);
   12.26 -        d->mm.perdomain_pt[i] = mk_l1_pgentry(0);
   12.27 +    for_each_exec_domain(d, ed) {
   12.28 +        for ( i = 0; i < 16; i++ )
   12.29 +        {
   12.30 +            if ( (pfn = l1_pgentry_to_pagenr(ed->mm.perdomain_pt[i])) != 0 )
   12.31 +                put_page_and_type(&frame_table[pfn]);
   12.32 +            ed->mm.perdomain_pt[i] = mk_l1_pgentry(0);
   12.33 +        }
   12.34      }
   12.35  }
   12.36  
   12.37  
   12.38 -long set_gdt(struct domain *d, 
   12.39 +long set_gdt(struct exec_domain *ed, 
   12.40               unsigned long *frames,
   12.41               unsigned int entries)
   12.42  {
   12.43 +    struct domain *d = ed->domain;
   12.44      /* NB. There are 512 8-byte entries per GDT page. */
   12.45      int i = 0, nr_pages = (entries + 511) / 512;
   12.46      struct desc_struct *vgdt;
   12.47 @@ -302,11 +306,11 @@ long set_gdt(struct domain *d,
   12.48  
   12.49      /* Install the new GDT. */
   12.50      for ( i = 0; i < nr_pages; i++ )
   12.51 -        d->mm.perdomain_pt[i] =
   12.52 +        ed->mm.perdomain_pt[i] =
   12.53              mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   12.54  
   12.55 -    SET_GDT_ADDRESS(d, GDT_VIRT_START);
   12.56 -    SET_GDT_ENTRIES(d, entries);
   12.57 +    SET_GDT_ADDRESS(ed, GDT_VIRT_START);
   12.58 +    SET_GDT_ENTRIES(ed, entries);
   12.59  
   12.60      return 0;
   12.61  
   12.62 @@ -353,7 +357,7 @@ long do_update_descriptor(
   12.63          return -EINVAL;
   12.64  
   12.65      page = &frame_table[pfn];
   12.66 -    if ( unlikely(!get_page(page, current)) )
   12.67 +    if ( unlikely(!get_page(page, current->domain)) )
   12.68          return -EINVAL;
   12.69  
   12.70      /* Check if the given frame is in use in an unsafe context. */
    13.1 --- a/xen/arch/x86/x86_32/seg_fixup.c	Mon Nov 08 15:29:51 2004 +0000
    13.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c	Tue Nov 09 10:51:49 2004 +0000
    13.3 @@ -105,7 +105,7 @@ static unsigned char insn_decode[256] = 
    13.4   */
    13.5  int get_baselimit(u16 seg, unsigned long *base, unsigned long *limit)
    13.6  {
    13.7 -    struct domain *d = current;
    13.8 +    struct exec_domain *d = current;
    13.9      unsigned long *table, a, b;
   13.10      int            ldt = !!(seg & 4);
   13.11      int            idx = (seg >> 3) & 8191;
   13.12 @@ -171,7 +171,7 @@ int linearise_address(u16 seg, unsigned 
   13.13  
   13.14  int fixup_seg(u16 seg, int positive_access)
   13.15  {
   13.16 -    struct domain *d = current;
   13.17 +    struct exec_domain *d = current;
   13.18      unsigned long *table, a, b, base, limit;
   13.19      int            ldt = !!(seg & 4);
   13.20      int            idx = (seg >> 3) & 8191;
   13.21 @@ -284,7 +284,7 @@ void *decode_reg(struct xen_regs *regs, 
   13.22   */
   13.23  int gpf_emulate_4gb(struct xen_regs *regs)
   13.24  {
   13.25 -    struct domain *d = current;
   13.26 +    struct exec_domain *d = current;
   13.27      trap_info_t   *ti;
   13.28      struct guest_trap_bounce *gtb;
   13.29      u8            modrm, mod, reg, rm, decode;
   13.30 @@ -463,7 +463,7 @@ int gpf_emulate_4gb(struct xen_regs *reg
   13.31      perfc_incrc(seg_fixups);
   13.32  
   13.33      /* If requested, give a callback on otherwise unused vector 15. */
   13.34 -    if ( VM_ASSIST(d, VMASST_TYPE_4gb_segments_notify) )
   13.35 +    if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) )
   13.36      {
   13.37          ti  = &d->thread.traps[15];
   13.38          gtb = &guest_trap_bounce[d->processor];
    14.1 --- a/xen/common/Makefile	Mon Nov 08 15:29:51 2004 +0000
    14.2 +++ b/xen/common/Makefile	Tue Nov 09 10:51:49 2004 +0000
    14.3 @@ -19,6 +19,9 @@ ifneq ($(trace),y)
    14.4  OBJS := $(subst trace.o,,$(OBJS))
    14.5  endif
    14.6  
    14.7 +OBJS := $(subst sched_atropos.o,,$(OBJS))
    14.8 +OBJS := $(subst sched_rrobin.o,,$(OBJS))
    14.9 +
   14.10  default: $(OBJS)
   14.11  	$(LD) $(LDFLAGS) -r -o common.o $(OBJS)
   14.12  
    15.1 --- a/xen/common/dom0_ops.c	Mon Nov 08 15:29:51 2004 +0000
    15.2 +++ b/xen/common/dom0_ops.c	Tue Nov 09 10:51:49 2004 +0000
    15.3 @@ -26,7 +26,7 @@
    15.4  extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
    15.5  extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
    15.6  extern void arch_getdomaininfo_ctxt(
    15.7 -    struct domain *, full_execution_context_t *);
    15.8 +    struct exec_domain *, full_execution_context_t *);
    15.9  
   15.10  static inline int is_free_domid(domid_t dom)
   15.11  {
   15.12 @@ -96,7 +96,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   15.13      long ret = 0;
   15.14      dom0_op_t curop, *op = &curop;
   15.15  
   15.16 -    if ( !IS_PRIV(current) )
   15.17 +    if ( !IS_PRIV(current->domain) )
   15.18          return -EPERM;
   15.19  
   15.20      if ( copy_from_user(op, u_dom0_op, sizeof(*op)) )
   15.21 @@ -131,7 +131,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   15.22          if ( d != NULL )
   15.23          {
   15.24              ret = -EINVAL;
   15.25 -            if ( d != current )
   15.26 +            if ( d != current->domain )
   15.27              {
   15.28                  domain_pause_by_systemcontroller(d);
   15.29                  ret = 0;
   15.30 @@ -148,7 +148,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   15.31          if ( d != NULL )
   15.32          {
   15.33              ret = -EINVAL;
   15.34 -            if ( test_bit(DF_CONSTRUCTED, &d->flags) )
   15.35 +            if ( test_bit(DF_CONSTRUCTED, &d->d_flags) )
   15.36              {
   15.37                  domain_unpause_by_systemcontroller(d);
   15.38                  ret = 0;
   15.39 @@ -178,11 +178,14 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   15.40          {
   15.41              /* Do an initial placement. Pick the least-populated CPU. */
   15.42              struct domain *d;
   15.43 +            struct exec_domain *ed;
   15.44              unsigned int i, cnt[NR_CPUS] = { 0 };
   15.45  
   15.46              read_lock(&domlist_lock);
   15.47 -            for_each_domain ( d )
   15.48 -                cnt[d->processor]++;
   15.49 +            for_each_domain ( d ) {
   15.50 +                for_each_exec_domain ( d, ed )
   15.51 +                    cnt[ed->processor]++;
   15.52 +            }
   15.53              read_unlock(&domlist_lock);
   15.54  
   15.55              for ( i = 0; i < smp_num_cpus; i++ )
   15.56 @@ -217,7 +220,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   15.57          if ( d != NULL )
   15.58          {
   15.59              ret = -EINVAL;
   15.60 -            if ( d != current )
   15.61 +            if ( d != current->domain )
   15.62              {
   15.63                  domain_kill(d);
   15.64                  ret = 0;
   15.65 @@ -231,6 +234,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   15.66      {
   15.67          domid_t dom = op->u.pincpudomain.domain;
   15.68          struct domain *d = find_domain_by_id(dom);
   15.69 +        struct exec_domain *ed;
   15.70          int cpu = op->u.pincpudomain.cpu;
   15.71  
   15.72          if ( d == NULL )
   15.73 @@ -239,7 +243,15 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   15.74              break;
   15.75          }
   15.76          
   15.77 -        if ( d == current )
   15.78 +        ed = d->exec_domain[op->u.pincpudomain.exec_domain];
   15.79 +        if ( ed == NULL )
   15.80 +        {
   15.81 +            ret = -ESRCH;
   15.82 +            put_domain(d);
   15.83 +            break;
   15.84 +        }
   15.85 +
   15.86 +        if ( ed == current )
   15.87          {
   15.88              ret = -EINVAL;
   15.89              put_domain(d);
   15.90 @@ -248,17 +260,17 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   15.91  
   15.92          if ( cpu == -1 )
   15.93          {
   15.94 -            clear_bit(DF_CPUPINNED, &d->flags);
   15.95 +            clear_bit(EDF_CPUPINNED, &ed->ed_flags);
   15.96          }
   15.97          else
   15.98          {
   15.99 -            domain_pause(d);
  15.100 +            exec_domain_pause(ed);
  15.101              synchronise_pagetables(~0UL);
  15.102 -            if ( d->processor != (cpu % smp_num_cpus) )
  15.103 -                set_bit(DF_MIGRATED, &d->flags);
  15.104 -            set_bit(DF_CPUPINNED, &d->flags);
  15.105 -            d->processor = cpu % smp_num_cpus;
  15.106 -            domain_unpause(d);
  15.107 +            if ( ed->processor != (cpu % smp_num_cpus) )
  15.108 +                set_bit(EDF_MIGRATED, &ed->ed_flags);
  15.109 +            set_bit(EDF_CPUPINNED, &ed->ed_flags);
  15.110 +            ed->processor = cpu % smp_num_cpus;
  15.111 +            exec_domain_unpause(ed);
  15.112          }
  15.113  
  15.114          put_domain(d);
  15.115 @@ -321,6 +333,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  15.116      { 
  15.117          full_execution_context_t *c;
  15.118          struct domain            *d;
  15.119 +        struct exec_domain       *ed;
  15.120  
  15.121          read_lock(&domlist_lock);
  15.122  
  15.123 @@ -340,24 +353,26 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  15.124          read_unlock(&domlist_lock);
  15.125  
  15.126          op->u.getdomaininfo.domain = d->id;
  15.127 -        
  15.128 +
  15.129 +        ed = d->exec_domain[0]; // op->u.getdomaininfo.exec_domain];
  15.130 +
  15.131          op->u.getdomaininfo.flags =
  15.132 -            (test_bit(DF_DYING,     &d->flags) ? DOMFLAGS_DYING    : 0) |
  15.133 -            (test_bit(DF_CRASHED,   &d->flags) ? DOMFLAGS_CRASHED  : 0) |
  15.134 -            (test_bit(DF_SHUTDOWN,  &d->flags) ? DOMFLAGS_SHUTDOWN : 0) |
  15.135 -            (test_bit(DF_CTRLPAUSE, &d->flags) ? DOMFLAGS_PAUSED   : 0) |
  15.136 -            (test_bit(DF_BLOCKED,   &d->flags) ? DOMFLAGS_BLOCKED  : 0) |
  15.137 -            (test_bit(DF_RUNNING,   &d->flags) ? DOMFLAGS_RUNNING  : 0);
  15.138 +            (test_bit( DF_DYING,      &d->d_flags)  ? DOMFLAGS_DYING    : 0) |
  15.139 +            (test_bit( DF_CRASHED,    &d->d_flags)  ? DOMFLAGS_CRASHED  : 0) |
  15.140 +            (test_bit( DF_SHUTDOWN,   &d->d_flags)  ? DOMFLAGS_SHUTDOWN : 0) |
  15.141 +            (test_bit(EDF_CTRLPAUSE, &ed->ed_flags) ? DOMFLAGS_PAUSED   : 0) |
  15.142 +            (test_bit(EDF_BLOCKED,   &ed->ed_flags) ? DOMFLAGS_BLOCKED  : 0) |
  15.143 +            (test_bit(EDF_RUNNING,   &ed->ed_flags) ? DOMFLAGS_RUNNING  : 0);
  15.144  
  15.145 -        op->u.getdomaininfo.flags |= d->processor << DOMFLAGS_CPUSHIFT;
  15.146 +        op->u.getdomaininfo.flags |= ed->processor << DOMFLAGS_CPUSHIFT;
  15.147          op->u.getdomaininfo.flags |= 
  15.148              d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
  15.149  
  15.150          op->u.getdomaininfo.tot_pages   = d->tot_pages;
  15.151          op->u.getdomaininfo.max_pages   = d->max_pages;
  15.152 -        op->u.getdomaininfo.cpu_time    = d->cpu_time;
  15.153 +        op->u.getdomaininfo.cpu_time    = ed->cpu_time;
  15.154          op->u.getdomaininfo.shared_info_frame = 
  15.155 -            __pa(d->shared_info) >> PAGE_SHIFT;
  15.156 +            __pa(ed->shared_info) >> PAGE_SHIFT;
  15.157  
  15.158          if ( op->u.getdomaininfo.ctxt != NULL )
  15.159          {
  15.160 @@ -368,13 +383,13 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  15.161                  break;
  15.162              }
  15.163  
  15.164 -            if ( d != current )
  15.165 -                domain_pause(d);
  15.166 +            if ( ed != current )
  15.167 +                exec_domain_pause(ed);
  15.168  
  15.169 -            arch_getdomaininfo_ctxt(d,c);
  15.170 +            arch_getdomaininfo_ctxt(ed,c);
  15.171  
  15.172 -            if ( d != current )
  15.173 -                domain_unpause(d);
  15.174 +            if ( ed != current )
  15.175 +                exec_domain_unpause(ed);
  15.176  
  15.177              if ( copy_to_user(op->u.getdomaininfo.ctxt, c, sizeof(*c)) )
  15.178                  ret = -EINVAL;
  15.179 @@ -524,7 +539,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  15.180          if ( d != NULL )
  15.181          { 
  15.182              /* should only be used *before* domain is built. */
  15.183 -            if ( !test_bit(DF_CONSTRUCTED, &d->flags) )
  15.184 +            if ( !test_bit(DF_CONSTRUCTED, &d->d_flags) )
  15.185                  ret = alloc_new_dom_mem( 
  15.186                      d, op->u.setdomaininitialmem.initial_memkb );
  15.187              else
    16.1 --- a/xen/common/dom_mem_ops.c	Mon Nov 08 15:29:51 2004 +0000
    16.2 +++ b/xen/common/dom_mem_ops.c	Tue Nov 09 10:51:49 2004 +0000
    16.3 @@ -27,7 +27,7 @@ static long alloc_dom_mem(struct domain 
    16.4                               nr_extents*sizeof(*extent_list))) )
    16.5          return 0;
    16.6  
    16.7 -    if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current) )
    16.8 +    if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
    16.9      {
   16.10          DPRINTK("Only I/O-capable domains may allocate > order-0 memory.\n");
   16.11          return 0;
   16.12 @@ -105,8 +105,8 @@ long do_dom_mem_op(unsigned int   op,
   16.13      long           rc;
   16.14  
   16.15      if ( likely(domid == DOMID_SELF) )
   16.16 -        d = current;
   16.17 -    else if ( unlikely(!IS_PRIV(current)) )
   16.18 +        d = current->domain;
   16.19 +    else if ( unlikely(!IS_PRIV(current->domain)) )
   16.20          return -EPERM;
   16.21      else if ( unlikely((d = find_domain_by_id(domid)) == NULL) )
   16.22  	return -ESRCH;
    17.1 --- a/xen/common/domain.c	Mon Nov 08 15:29:51 2004 +0000
    17.2 +++ b/xen/common/domain.c	Tue Nov 09 10:51:49 2004 +0000
    17.3 @@ -25,20 +25,23 @@ struct domain *domain_list;
    17.4  struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
    17.5  {
    17.6      struct domain *d, **pd;
    17.7 +    struct exec_domain *ed;
    17.8  
    17.9      if ( (d = alloc_domain_struct()) == NULL )
   17.10          return NULL;
   17.11  
   17.12 +    ed = d->exec_domain[0];
   17.13 +
   17.14      atomic_set(&d->refcnt, 1);
   17.15 -    atomic_set(&d->pausecnt, 0);
   17.16 +    atomic_set(&ed->pausecnt, 0);
   17.17  
   17.18 -    shadow_lock_init(d);
   17.19 +    shadow_lock_init(ed);
   17.20  
   17.21      d->id          = dom_id;
   17.22 -    d->processor   = cpu;
   17.23 +    ed->processor   = cpu;
   17.24      d->create_time = NOW();
   17.25   
   17.26 -    memcpy(&d->thread, &idle0_task.thread, sizeof(d->thread));
   17.27 +    memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread));
   17.28  
   17.29      spin_lock_init(&d->page_alloc_lock);
   17.30      INIT_LIST_HEAD(&d->page_list);
   17.31 @@ -57,7 +60,7 @@ struct domain *do_createdomain(domid_t d
   17.32              return NULL;
   17.33          }
   17.34  
   17.35 -        arch_do_createdomain(d);
   17.36 +        arch_do_createdomain(ed);
   17.37  
   17.38          sched_add_domain(d);
   17.39  
   17.40 @@ -128,7 +131,7 @@ struct domain *find_last_domain(void)
   17.41  void domain_kill(struct domain *d)
   17.42  {
   17.43      domain_pause(d);
   17.44 -    if ( !test_and_set_bit(DF_DYING, &d->flags) )
   17.45 +    if ( !test_and_set_bit(DF_DYING, &d->d_flags) )
   17.46      {
   17.47          sched_rem_domain(d);
   17.48          domain_relinquish_memory(d);
   17.49 @@ -139,12 +142,14 @@ void domain_kill(struct domain *d)
   17.50  
   17.51  void domain_crash(void)
   17.52  {
   17.53 -    if ( current->id == 0 )
   17.54 +    struct domain *d = current->domain;
   17.55 +
   17.56 +    if ( d->id == 0 )
   17.57          BUG();
   17.58  
   17.59 -    set_bit(DF_CRASHED, &current->flags);
   17.60 +    set_bit(DF_CRASHED, &d->d_flags);
   17.61  
   17.62 -    send_guest_virq(dom0, VIRQ_DOM_EXC);
   17.63 +    send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
   17.64      
   17.65      __enter_scheduler();
   17.66      BUG();
   17.67 @@ -152,7 +157,9 @@ void domain_crash(void)
   17.68  
   17.69  void domain_shutdown(u8 reason)
   17.70  {
   17.71 -    if ( current->id == 0 )
   17.72 +    struct domain *d = current->domain;
   17.73 +
   17.74 +    if ( d->id == 0 )
   17.75      {
   17.76          extern void machine_restart(char *);
   17.77          extern void machine_halt(void);
   17.78 @@ -169,10 +176,10 @@ void domain_shutdown(u8 reason)
   17.79          }
   17.80      }
   17.81  
   17.82 -    current->shutdown_code = reason;
   17.83 -    set_bit(DF_SHUTDOWN, &current->flags);
   17.84 +    d->shutdown_code = reason;
   17.85 +    set_bit(DF_SHUTDOWN, &d->d_flags);
   17.86  
   17.87 -    send_guest_virq(dom0, VIRQ_DOM_EXC);
   17.88 +    send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
   17.89  
   17.90      __enter_scheduler();
   17.91  }
   17.92 @@ -206,9 +213,10 @@ unsigned int alloc_new_dom_mem(struct do
   17.93  void domain_destruct(struct domain *d)
   17.94  {
   17.95      struct domain **pd;
   17.96 +    struct exec_domain *ed;
   17.97      atomic_t      old, new;
   17.98  
   17.99 -    if ( !test_bit(DF_DYING, &d->flags) )
  17.100 +    if ( !test_bit(DF_DYING, &d->d_flags) )
  17.101          BUG();
  17.102  
  17.103      /* May be already destructed, or get_domain() can race us. */
  17.104 @@ -233,8 +241,9 @@ void domain_destruct(struct domain *d)
  17.105      destroy_event_channels(d);
  17.106      grant_table_destroy(d);
  17.107  
  17.108 -    free_perdomain_pt(d);
  17.109 -    free_xenheap_page((unsigned long)d->shared_info);
  17.110 +    for_each_exec_domain(d, ed)
  17.111 +        free_perdomain_pt(ed);
  17.112 +    free_xenheap_page((unsigned long)d->exec_domain[0]->shared_info);
  17.113  
  17.114      free_domain_struct(d);
  17.115  }
  17.116 @@ -253,7 +262,7 @@ int final_setup_guestos(struct domain *p
  17.117      if ( (c = xmalloc(sizeof(*c))) == NULL )
  17.118          return -ENOMEM;
  17.119  
  17.120 -    if ( test_bit(DF_CONSTRUCTED, &p->flags) )
  17.121 +    if ( test_bit(DF_CONSTRUCTED, &p->d_flags) )
  17.122      {
  17.123          rc = -EINVAL;
  17.124          goto out;
  17.125 @@ -265,13 +274,13 @@ int final_setup_guestos(struct domain *p
  17.126          goto out;
  17.127      }
  17.128      
  17.129 -    if ( (rc = arch_final_setup_guestos(p,c)) != 0 )
  17.130 +    if ( (rc = arch_final_setup_guestos(p->exec_domain[0],c)) != 0 )
  17.131          goto out;
  17.132  
  17.133      /* Set up the shared info structure. */
  17.134 -    update_dom_time(p->shared_info);
  17.135 +    update_dom_time(p->exec_domain[0]->shared_info);
  17.136  
  17.137 -    set_bit(DF_CONSTRUCTED, &p->flags);
  17.138 +    set_bit(DF_CONSTRUCTED, &p->d_flags);
  17.139  
  17.140   out:    
  17.141      if ( c != NULL )
    18.1 --- a/xen/common/event_channel.c	Mon Nov 08 15:29:51 2004 +0000
    18.2 +++ b/xen/common/event_channel.c	Tue Nov 09 10:51:49 2004 +0000
    18.3 @@ -71,7 +71,7 @@ static int get_free_port(struct domain *
    18.4  
    18.5  static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
    18.6  {
    18.7 -    struct domain *d = current;
    18.8 +    struct domain *d = current->domain;
    18.9      int            port;
   18.10  
   18.11      spin_lock(&d->event_channel_lock);
   18.12 @@ -100,16 +100,16 @@ static long evtchn_bind_interdomain(evtc
   18.13      domid_t        dom1 = bind->dom1, dom2 = bind->dom2;
   18.14      long           rc = 0;
   18.15  
   18.16 -    if ( !IS_PRIV(current) && (dom1 != DOMID_SELF) )
   18.17 +    if ( !IS_PRIV(current->domain) && (dom1 != DOMID_SELF) )
   18.18          return -EPERM;
   18.19  
   18.20      if ( (port1 < 0) || (port2 < 0) )
   18.21          return -EINVAL;
   18.22  
   18.23      if ( dom1 == DOMID_SELF )
   18.24 -        dom1 = current->id;
   18.25 +        dom1 = current->domain->id;
   18.26      if ( dom2 == DOMID_SELF )
   18.27 -        dom2 = current->id;
   18.28 +        dom2 = current->domain->id;
   18.29  
   18.30      if ( ((d1 = find_domain_by_id(dom1)) == NULL) ||
   18.31           ((d2 = find_domain_by_id(dom2)) == NULL) )
   18.32 @@ -183,7 +183,7 @@ static long evtchn_bind_interdomain(evtc
   18.33      switch ( d2->event_channel[port2].state )
   18.34      {
   18.35      case ECS_FREE:
   18.36 -        if ( !IS_PRIV(current) && (dom2 != DOMID_SELF) )
   18.37 +        if ( !IS_PRIV(current->domain) && (dom2 != DOMID_SELF) )
   18.38              ERROR_EXIT(-EPERM);
   18.39          break;
   18.40  
   18.41 @@ -235,7 +235,7 @@ static long evtchn_bind_interdomain(evtc
   18.42  
   18.43  static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
   18.44  {
   18.45 -    struct domain *d = current;
   18.46 +    struct domain *d = current->domain;
   18.47      int            port, virq = bind->virq;
   18.48  
   18.49      if ( virq >= ARRAY_SIZE(d->virq_to_evtchn) )
   18.50 @@ -271,7 +271,7 @@ static long evtchn_bind_virq(evtchn_bind
   18.51  
   18.52  static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
   18.53  {
   18.54 -    struct domain *d = current;
   18.55 +    struct domain *d = current->domain;
   18.56      int            port, rc, pirq = bind->pirq;
   18.57  
   18.58      if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
   18.59 @@ -417,8 +417,8 @@ static long evtchn_close(evtchn_close_t 
   18.60      domid_t        dom = close->dom;
   18.61  
   18.62      if ( dom == DOMID_SELF )
   18.63 -        dom = current->id;
   18.64 -    else if ( !IS_PRIV(current) )
   18.65 +        dom = current->domain->id;
   18.66 +    else if ( !IS_PRIV(current->domain) )
   18.67          return -EPERM;
   18.68  
   18.69      if ( (d = find_domain_by_id(dom)) == NULL )
   18.70 @@ -433,7 +433,7 @@ static long evtchn_close(evtchn_close_t 
   18.71  
   18.72  static long evtchn_send(int lport)
   18.73  {
   18.74 -    struct domain *ld = current, *rd;
   18.75 +    struct domain *ld = current->domain, *rd;
   18.76      int            rport;
   18.77  
   18.78      spin_lock(&ld->event_channel_lock);
   18.79 @@ -466,8 +466,8 @@ static long evtchn_status(evtchn_status_
   18.80      long             rc = 0;
   18.81  
   18.82      if ( dom == DOMID_SELF )
   18.83 -        dom = current->id;
   18.84 -    else if ( !IS_PRIV(current) )
   18.85 +        dom = current->domain->id;
   18.86 +    else if ( !IS_PRIV(current->domain) )
   18.87          return -EPERM;
   18.88  
   18.89      if ( (d = find_domain_by_id(dom)) == NULL )
    19.1 --- a/xen/common/grant_table.c	Mon Nov 08 15:29:51 2004 +0000
    19.2 +++ b/xen/common/grant_table.c	Tue Nov 09 10:51:49 2004 +0000
    19.3 @@ -74,7 +74,7 @@ static void
    19.4       */
    19.5      int            retries = 0;
    19.6  
    19.7 -    ld = current;
    19.8 +    ld = current->domain;
    19.9  
   19.10      /* Bitwise-OR avoids short-circuiting which screws control flow. */
   19.11      if ( unlikely(__get_user(dom, &uop->dom) |
   19.12 @@ -291,7 +291,7 @@ static void
   19.13      s16            rc = 0;
   19.14      unsigned long  frame, virt;
   19.15  
   19.16 -    ld = current;
   19.17 +    ld = current->domain;
   19.18  
   19.19      /* Bitwise-OR avoids short-circuiting which screws control flow. */
   19.20      if ( unlikely(__get_user(virt, &uop->host_virt_addr) |
   19.21 @@ -404,9 +404,9 @@ gnttab_setup_table(
   19.22  
   19.23      if ( op.dom == DOMID_SELF )
   19.24      {
   19.25 -        op.dom = current->id;
   19.26 +        op.dom = current->domain->id;
   19.27      }
   19.28 -    else if ( unlikely(!IS_PRIV(current)) )
   19.29 +    else if ( unlikely(!IS_PRIV(current->domain)) )
   19.30      {
   19.31          (void)put_user(GNTST_permission_denied, &uop->status);
   19.32          return 0;
    20.1 --- a/xen/common/kernel.c	Mon Nov 08 15:29:51 2004 +0000
    20.2 +++ b/xen/common/kernel.c	Tue Nov 09 10:51:49 2004 +0000
    20.3 @@ -29,6 +29,7 @@
    20.4  unsigned long xenheap_phys_end;
    20.5  
    20.6  xmem_cache_t *domain_struct_cachep;
    20.7 +xmem_cache_t *exec_domain_struct_cachep;
    20.8  struct domain *dom0;
    20.9  
   20.10  vm_assist_info_t vm_assist_info[MAX_VMASST_TYPE + 1];
   20.11 @@ -184,7 +185,7 @@ void cmain(multiboot_info_t *mbi)
   20.12      }
   20.13  
   20.14      /* Must do this early -- e.g., spinlocks rely on get_current(). */
   20.15 -    set_current(&idle0_task);
   20.16 +    set_current(&idle0_exec_domain);
   20.17  
   20.18      /* We initialise the serial devices very early so we can get debugging. */
   20.19      serial_init_stage1();
   20.20 @@ -304,6 +305,12 @@ void cmain(multiboot_info_t *mbi)
   20.21      if ( domain_struct_cachep == NULL )
   20.22          panic("No slab cache for task structs.");
   20.23  
   20.24 +    exec_domain_struct_cachep = xmem_cache_create(
   20.25 +        "exec_dom_cache", sizeof(struct exec_domain),
   20.26 +        0, SLAB_HWCACHE_ALIGN, NULL, NULL);
   20.27 +    if ( exec_domain_struct_cachep == NULL )
   20.28 +        panic("No slab cache for task structs.");
   20.29 +
   20.30      start_of_day();
   20.31  
   20.32      grant_table_init();
   20.33 @@ -313,7 +320,7 @@ void cmain(multiboot_info_t *mbi)
   20.34      if ( dom0 == NULL )
   20.35          panic("Error creating domain 0\n");
   20.36  
   20.37 -    set_bit(DF_PRIVILEGED, &dom0->flags);
   20.38 +    set_bit(DF_PRIVILEGED, &dom0->d_flags);
   20.39  
   20.40      shadow_mode_init();
   20.41  
   20.42 @@ -352,7 +359,7 @@ void cmain(multiboot_info_t *mbi)
   20.43      /* Give up the VGA console if DOM0 is configured to grab it. */
   20.44      console_endboot(cmdline && strstr(cmdline, "tty0"));
   20.45  
   20.46 -    domain_unpause_by_systemcontroller(current);
   20.47 +    domain_unpause_by_systemcontroller(current->domain);
   20.48      domain_unpause_by_systemcontroller(dom0);
   20.49      startup_cpu_idle_loop();
   20.50  }
   20.51 @@ -370,7 +377,7 @@ long do_xen_version(int cmd)
   20.52  
   20.53  long do_vm_assist(unsigned int cmd, unsigned int type)
   20.54  {
   20.55 -    return vm_assist(current, cmd, type);
   20.56 +    return vm_assist(current->domain, cmd, type);
   20.57  }
   20.58  
   20.59  long do_ni_hypercall(void)
    21.1 --- a/xen/common/keyhandler.c	Mon Nov 08 15:29:51 2004 +0000
    21.2 +++ b/xen/common/keyhandler.c	Tue Nov 09 10:51:49 2004 +0000
    21.3 @@ -67,6 +67,7 @@ static void halt_machine(unsigned char k
    21.4  void do_task_queues(unsigned char key)
    21.5  {
    21.6      struct domain *d;
    21.7 +    struct exec_domain *ed;
    21.8      s_time_t       now = NOW();
    21.9      struct list_head *ent;
   21.10      struct pfn_info  *page;
   21.11 @@ -78,10 +79,8 @@ void do_task_queues(unsigned char key)
   21.12  
   21.13      for_each_domain ( d )
   21.14      {
   21.15 -        printk("Xen: DOM %u, CPU %d [has=%c] flags=%lx refcnt=%d nr_pages=%d "
   21.16 -               "xenheap_pages=%d\n",
   21.17 -               d->id, d->processor, 
   21.18 -               test_bit(DF_RUNNING, &d->flags) ? 'T':'F', d->flags,
   21.19 +        printk("Xen: DOM %u, flags=%lx refcnt=%d nr_pages=%d "
   21.20 +               "xenheap_pages=%d\n", d->id, d->d_flags,
   21.21                 atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
   21.22  
   21.23          if ( d->tot_pages < 10 )
   21.24 @@ -95,16 +94,22 @@ void do_task_queues(unsigned char key)
   21.25              }
   21.26          }
   21.27  
   21.28 -        page = virt_to_page(d->shared_info);
   21.29 +        page = virt_to_page(d->exec_domain[0]->shared_info);
   21.30          printk("Shared_info@%08x: caf=%08x, taf=%08x\n",
   21.31                 page_to_phys(page), page->count_info,
   21.32                 page->u.inuse.type_info);
   21.33                 
   21.34 -        printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n", 
   21.35 -               d->shared_info->vcpu_data[0].evtchn_upcall_pending, 
   21.36 -               d->shared_info->vcpu_data[0].evtchn_upcall_mask);
   21.37 +        for_each_exec_domain ( d, ed ) {
   21.38 +            printk("Guest: CPU %d [has=%c] flags=%lx "
   21.39 +                   "upcall_pend = %02x, upcall_mask = %02x\n",
   21.40 +                   ed->processor,
   21.41 +                   test_bit(EDF_RUNNING, &ed->ed_flags) ? 'T':'F',
   21.42 +                   ed->ed_flags,
   21.43 +                   ed->shared_info->vcpu_data[0].evtchn_upcall_pending, 
   21.44 +                   ed->shared_info->vcpu_data[0].evtchn_upcall_mask);
   21.45 +        }
   21.46          printk("Notifying guest...\n"); 
   21.47 -        send_guest_virq(d, VIRQ_DEBUG);
   21.48 +        send_guest_virq(d->exec_domain[0], VIRQ_DEBUG);
   21.49      }
   21.50  
   21.51      read_unlock(&domlist_lock);
    22.1 --- a/xen/common/page_alloc.c	Mon Nov 08 15:29:51 2004 +0000
    22.2 +++ b/xen/common/page_alloc.c	Tue Nov 09 10:51:49 2004 +0000
    22.3 @@ -456,13 +456,13 @@ struct pfn_info *alloc_domheap_pages(str
    22.4  
    22.5      spin_lock(&d->page_alloc_lock);
    22.6  
    22.7 -    if ( unlikely(test_bit(DF_DYING, &d->flags)) ||
    22.8 +    if ( unlikely(test_bit(DF_DYING, &d->d_flags)) ||
    22.9           unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
   22.10      {
   22.11          DPRINTK("Over-allocation for domain %u: %u > %u\n",
   22.12                  d->id, d->tot_pages + (1 << order), d->max_pages);
   22.13          DPRINTK("...or the domain is dying (%d)\n", 
   22.14 -                !!test_bit(DF_DYING, &d->flags));
   22.15 +                !!test_bit(DF_DYING, &d->d_flags));
   22.16          spin_unlock(&d->page_alloc_lock);
   22.17          free_heap_pages(MEMZONE_DOM, pg, order);
   22.18          return NULL;
   22.19 @@ -491,7 +491,9 @@ void free_domheap_pages(struct pfn_info 
   22.20  {
   22.21      int            i, drop_dom_ref;
   22.22      struct domain *d = pg->u.inuse.domain;
   22.23 +    struct exec_domain *ed;
   22.24      void          *p;
   22.25 +    int cpu_mask = 0;
   22.26  
   22.27      ASSERT(!in_irq());
   22.28  
   22.29 @@ -513,11 +515,14 @@ void free_domheap_pages(struct pfn_info 
   22.30          /* NB. May recursively lock from domain_relinquish_memory(). */
   22.31          spin_lock_recursive(&d->page_alloc_lock);
   22.32  
   22.33 +        for_each_exec_domain(d, ed)
   22.34 +            cpu_mask |= 1 << ed->processor;
   22.35 +
   22.36          for ( i = 0; i < (1 << order); i++ )
   22.37          {
   22.38              ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
   22.39              pg[i].tlbflush_timestamp  = tlbflush_current_time();
   22.40 -            pg[i].u.free.cpu_mask     = 1 << d->processor;
   22.41 +            pg[i].u.free.cpu_mask     = cpu_mask;
   22.42              list_del(&pg[i].list);
   22.43  
   22.44              /*
   22.45 @@ -525,7 +530,7 @@ void free_domheap_pages(struct pfn_info 
   22.46               * if it cares about the secrecy of their contents. However, after
   22.47               * a domain has died we assume responsibility for erasure.
   22.48               */
   22.49 -            if ( unlikely(test_bit(DF_DYING, &d->flags)) )
   22.50 +            if ( unlikely(test_bit(DF_DYING, &d->d_flags)) )
   22.51              {
   22.52                  p = map_domain_mem(page_to_phys(&pg[i]));
   22.53                  clear_page(p);
    23.1 --- a/xen/common/physdev.c	Mon Nov 08 15:29:51 2004 +0000
    23.2 +++ b/xen/common/physdev.c	Tue Nov 09 10:51:49 2004 +0000
    23.3 @@ -128,7 +128,7 @@ int physdev_pci_access_modify(
    23.4      struct pci_dev *pdev;
    23.5      int i, j, rc = 0;
    23.6   
    23.7 -    if ( !IS_PRIV(current) )
    23.8 +    if ( !IS_PRIV(current->domain) )
    23.9          BUG();
   23.10  
   23.11      if ( (bus > PCI_BUSMAX) || (dev > PCI_DEVMAX) || (func > PCI_FUNCMAX) )
   23.12 @@ -146,10 +146,10 @@ int physdev_pci_access_modify(
   23.13          return -ESRCH;
   23.14  
   23.15      /* Make the domain privileged. */
   23.16 -    set_bit(DF_PHYSDEV, &p->flags);
   23.17 +    set_bit(DF_PHYSDEV, &p->d_flags);
   23.18  	/* FIXME: MAW for now make the domain REALLY privileged so that it
   23.19  	 * can run a backend driver (hw access should work OK otherwise) */
   23.20 -	set_bit(DF_PRIVILEGED, &p->flags);
   23.21 +	set_bit(DF_PRIVILEGED, &p->d_flags);
   23.22  
   23.23      /* Grant write access to the specified device. */
   23.24      if ( (pdev = pci_find_slot(bus, PCI_DEVFN(dev, func))) == NULL )
   23.25 @@ -494,7 +494,7 @@ static long pci_cfgreg_read(int bus, int
   23.26      int ret;
   23.27      phys_dev_t *pdev;
   23.28  
   23.29 -    if ( (ret = check_dev_acc(current, bus, dev, func, &pdev)) != 0 )
   23.30 +    if ( (ret = check_dev_acc(current->domain, bus, dev, func, &pdev)) != 0 )
   23.31      {
   23.32          /* PCI spec states that reads from non-existent devices should return
   23.33           * all 1s.  In this case the domain has no read access, which should
   23.34 @@ -559,7 +559,7 @@ static long pci_cfgreg_write(int bus, in
   23.35      int ret;
   23.36      phys_dev_t *pdev;
   23.37  
   23.38 -    if ( (ret = check_dev_acc(current, bus, dev, func, &pdev)) != 0 )
   23.39 +    if ( (ret = check_dev_acc(current->domain, bus, dev, func, &pdev)) != 0 )
   23.40          return ret;
   23.41  
   23.42      /* special treatment for some registers */
   23.43 @@ -621,7 +621,7 @@ static long pci_probe_root_buses(u32 *bu
   23.44  
   23.45      memset(busmask, 0, 256/8);
   23.46  
   23.47 -    list_for_each ( tmp, &current->pcidev_list )
   23.48 +    list_for_each ( tmp, &current->domain->pcidev_list )
   23.49      {
   23.50          pdev = list_entry(tmp, phys_dev_t, node);
   23.51          set_bit(pdev->dev->bus->number, busmask);
   23.52 @@ -665,7 +665,7 @@ long do_physdev_op(physdev_op_t *uop)
   23.53          break;
   23.54  
   23.55      case PHYSDEVOP_PCI_INITIALISE_DEVICE:
   23.56 -        if ( (ret = check_dev_acc(current, 
   23.57 +        if ( (ret = check_dev_acc(current->domain, 
   23.58                                    op.u.pci_initialise_device.bus, 
   23.59                                    op.u.pci_initialise_device.dev, 
   23.60                                    op.u.pci_initialise_device.func, 
   23.61 @@ -678,7 +678,7 @@ long do_physdev_op(physdev_op_t *uop)
   23.62          break;
   23.63  
   23.64      case PHYSDEVOP_IRQ_UNMASK_NOTIFY:
   23.65 -        ret = pirq_guest_unmask(current);
   23.66 +        ret = pirq_guest_unmask(current->domain);
   23.67          break;
   23.68  
   23.69      case PHYSDEVOP_IRQ_STATUS_QUERY:
   23.70 @@ -757,6 +757,6 @@ void physdev_init_dom0(struct domain *p)
   23.71          list_add(&pdev->node, &p->pcidev_list);
   23.72      }
   23.73  
   23.74 -    set_bit(DF_PHYSDEV, &p->flags);
   23.75 +    set_bit(DF_PHYSDEV, &p->d_flags);
   23.76  }
   23.77  
    24.1 --- a/xen/common/sched_bvt.c	Mon Nov 08 15:29:51 2004 +0000
    24.2 +++ b/xen/common/sched_bvt.c	Tue Nov 09 10:51:49 2004 +0000
    24.3 @@ -28,13 +28,19 @@
    24.4  #include <xen/softirq.h>
    24.5  
    24.6  /* all per-domain BVT-specific scheduling info is stored here */
    24.7 +struct bvt_edom_info
    24.8 +{
    24.9 +    struct list_head    run_list;         /* runqueue list pointers */
   24.10 +    u32                 avt;              /* actual virtual time */
   24.11 +    u32                 evt;              /* effective virtual time */
   24.12 +    struct exec_domain  *exec_domain;
   24.13 +    struct bvt_dom_info *inf;
   24.14 +};
   24.15 +
   24.16  struct bvt_dom_info
   24.17  {
   24.18      struct domain       *domain;          /* domain this info belongs to */
   24.19 -    struct list_head    run_list;         /* runqueue list pointers */
   24.20      u32                 mcu_advance;      /* inverse of weight */
   24.21 -    u32                 avt;              /* actual virtual time */
   24.22 -    u32                 evt;              /* effective virtual time */
   24.23      int                 warpback;         /* warp?  */
   24.24      int                 warp;             /* warp set and within the warp 
   24.25                                               limits*/
   24.26 @@ -43,6 +49,8 @@ struct bvt_dom_info
   24.27      struct ac_timer     warp_timer;       /* deals with warpl */
   24.28      s_time_t            warpu;            /* unwarp time requirement */
   24.29      struct ac_timer     unwarp_timer;     /* deals with warpu */
   24.30 +
   24.31 +    struct bvt_edom_info ed_inf[MAX_VIRT_CPUS];
   24.32  };
   24.33  
   24.34  struct bvt_cpu_info
   24.35 @@ -52,8 +60,9 @@ struct bvt_cpu_info
   24.36  };
   24.37  
   24.38  #define BVT_INFO(p)   ((struct bvt_dom_info *)(p)->sched_priv)
   24.39 +#define EBVT_INFO(p)  ((struct bvt_edom_info *)(p)->ed_sched_priv)
   24.40  #define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
   24.41 -#define RUNLIST(p)    ((struct list_head *)&(BVT_INFO(p)->run_list))
   24.42 +#define RUNLIST(p)    ((struct list_head *)&(EBVT_INFO(p)->run_list))
   24.43  #define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
   24.44  #define CPU_SVT(cpu)  (CPU_INFO(cpu)->svt)
   24.45  
   24.46 @@ -64,24 +73,24 @@ static s32 ctx_allow = (s32)MILLISECS(5)
   24.47  
   24.48  static xmem_cache_t *dom_info_cache;
   24.49  
   24.50 -static inline void __add_to_runqueue_head(struct domain *d)
   24.51 +static inline void __add_to_runqueue_head(struct exec_domain *d)
   24.52  {
   24.53      list_add(RUNLIST(d), RUNQUEUE(d->processor));
   24.54  }
   24.55  
   24.56 -static inline void __add_to_runqueue_tail(struct domain *d)
   24.57 +static inline void __add_to_runqueue_tail(struct exec_domain *d)
   24.58  {
   24.59      list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
   24.60  }
   24.61  
   24.62 -static inline void __del_from_runqueue(struct domain *d)
   24.63 +static inline void __del_from_runqueue(struct exec_domain *d)
   24.64  {
   24.65      struct list_head *runlist = RUNLIST(d);
   24.66      list_del(runlist);
   24.67      runlist->next = NULL;
   24.68  }
   24.69  
   24.70 -static inline int __task_on_runqueue(struct domain *d)
   24.71 +static inline int __task_on_runqueue(struct exec_domain *d)
   24.72  {
   24.73      return (RUNLIST(d))->next != NULL;
   24.74  }
   24.75 @@ -91,7 +100,7 @@ static inline int __task_on_runqueue(str
   24.76  static void warp_timer_fn(unsigned long pointer)
   24.77  {
   24.78      struct bvt_dom_info *inf = (struct bvt_dom_info *)pointer;
   24.79 -    unsigned int cpu = inf->domain->processor;
   24.80 +    unsigned int cpu = inf->domain->exec_domain[0]->processor;
   24.81      
   24.82      spin_lock_irq(&schedule_data[cpu].schedule_lock);
   24.83  
   24.84 @@ -114,7 +123,7 @@ static void warp_timer_fn(unsigned long 
   24.85  static void unwarp_timer_fn(unsigned long pointer)
   24.86  {
   24.87      struct bvt_dom_info *inf = (struct bvt_dom_info *)pointer;
   24.88 -    unsigned int cpu = inf->domain->processor;
   24.89 +    unsigned int cpu = inf->domain->exec_domain[0]->processor;
   24.90  
   24.91      spin_lock_irq(&schedule_data[cpu].schedule_lock);
   24.92  
   24.93 @@ -127,24 +136,25 @@ static void unwarp_timer_fn(unsigned lon
   24.94      spin_unlock_irq(&schedule_data[cpu].schedule_lock);
   24.95  }
   24.96  
   24.97 -static inline u32 calc_avt(struct domain *d, s_time_t now)
   24.98 +static inline u32 calc_avt(struct exec_domain *d, s_time_t now)
   24.99  {
  24.100      u32 ranfor, mcus;
  24.101 -    struct bvt_dom_info *inf = BVT_INFO(d);
  24.102 +    struct bvt_dom_info *inf = BVT_INFO(d->domain);
  24.103 +    struct bvt_edom_info *einf = EBVT_INFO(d);
  24.104      
  24.105      ranfor = (u32)(now - d->lastschd);
  24.106      mcus = (ranfor + MCU - 1)/MCU;
  24.107  
  24.108 -    return inf->avt + mcus * inf->mcu_advance;
  24.109 +    return einf->avt + mcus * inf->mcu_advance;
  24.110  }
  24.111  
  24.112  /*
  24.113   * Calculate the effective virtual time for a domain. Take into account 
  24.114   * warping limits
  24.115   */
  24.116 -static inline u32 calc_evt(struct domain *d, u32 avt)
  24.117 +static inline u32 calc_evt(struct exec_domain *d, u32 avt)
  24.118  {
  24.119 -    struct bvt_dom_info *inf = BVT_INFO(d);
  24.120 +    struct bvt_dom_info *inf = BVT_INFO(d->domain);
  24.121      /* TODO The warp routines need to be rewritten GM */
  24.122   
  24.123      if ( inf->warp ) 
  24.124 @@ -159,25 +169,32 @@ static inline u32 calc_evt(struct domain
  24.125   *
  24.126   * Returns non-zero on failure.
  24.127   */
  24.128 -int bvt_alloc_task(struct domain *d)
  24.129 +int bvt_alloc_task(struct exec_domain *ed)
  24.130  {
  24.131 -    if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
  24.132 -        return -1;
  24.133 -    memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
  24.134 +    struct domain *d = ed->domain;
  24.135 +    if ( (d->sched_priv == NULL) ) {
  24.136 +        if ( (d->sched_priv = xmem_cache_alloc(dom_info_cache)) == NULL )
  24.137 +            return -1;
  24.138 +        memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
  24.139 +    }
  24.140 +    ed->ed_sched_priv = &BVT_INFO(d)->ed_inf[ed->eid];
  24.141 +    BVT_INFO(d)->ed_inf[ed->eid].inf = BVT_INFO(d);
  24.142      return 0;
  24.143  }
  24.144  
  24.145  /*
  24.146   * Add and remove a domain
  24.147   */
  24.148 -void bvt_add_task(struct domain *d) 
  24.149 +void bvt_add_task(struct exec_domain *d) 
  24.150  {
  24.151 -    struct bvt_dom_info *inf = BVT_INFO(d);
  24.152 +    struct bvt_dom_info *inf = BVT_INFO(d->domain);
  24.153 +    struct bvt_edom_info *einf = EBVT_INFO(d);
  24.154      ASSERT(inf != NULL);
  24.155      ASSERT(d   != NULL);
  24.156  
  24.157      inf->mcu_advance = MCU_ADVANCE;
  24.158 -    inf->domain      = d;
  24.159 +    inf->domain      = d->domain;
  24.160 +    einf->exec_domain = d;
  24.161      inf->warpback    = 0;
  24.162      /* Set some default values here. */
  24.163      inf->warp        = 0;
  24.164 @@ -194,36 +211,36 @@ void bvt_add_task(struct domain *d)
  24.165      inf->unwarp_timer.data = (unsigned long)inf;
  24.166      inf->unwarp_timer.function = &unwarp_timer_fn;
  24.167      
  24.168 -    if ( d->id == IDLE_DOMAIN_ID )
  24.169 +    if ( d->domain->id == IDLE_DOMAIN_ID )
  24.170      {
  24.171 -        inf->avt = inf->evt = ~0U;
  24.172 +        einf->avt = einf->evt = ~0U;
  24.173      } 
  24.174      else 
  24.175      {
  24.176          /* Set avt and evt to system virtual time. */
  24.177 -        inf->avt = CPU_SVT(d->processor);
  24.178 -        inf->evt = CPU_SVT(d->processor);
  24.179 +        einf->avt = CPU_SVT(d->processor);
  24.180 +        einf->evt = CPU_SVT(d->processor);
  24.181      }
  24.182  }
  24.183  
  24.184 -int bvt_init_idle_task(struct domain *p)
  24.185 +int bvt_init_idle_task(struct exec_domain *p)
  24.186  {
  24.187      if ( bvt_alloc_task(p) < 0 )
  24.188          return -1;
  24.189  
  24.190      bvt_add_task(p);
  24.191  
  24.192 -    set_bit(DF_RUNNING, &p->flags);
  24.193 +    set_bit(EDF_RUNNING, &p->ed_flags);
  24.194      if ( !__task_on_runqueue(p) )
  24.195          __add_to_runqueue_head(p);
  24.196          
  24.197      return 0;
  24.198  }
  24.199  
  24.200 -void bvt_wake(struct domain *d)
  24.201 +void bvt_wake(struct exec_domain *d)
  24.202  {
  24.203 -    struct bvt_dom_info *inf = BVT_INFO(d);
  24.204 -    struct domain       *curr;
  24.205 +    struct bvt_edom_info *einf = EBVT_INFO(d);
  24.206 +    struct exec_domain  *curr;
  24.207      s_time_t            now, r_time;
  24.208      int                 cpu = d->processor;
  24.209      u32                 curr_evt;
  24.210 @@ -237,31 +254,31 @@ void bvt_wake(struct domain *d)
  24.211  
  24.212      /* Set the BVT parameters. AVT should always be updated 
  24.213         if CPU migration ocurred.*/
  24.214 -    if ( inf->avt < CPU_SVT(cpu) || 
  24.215 -         unlikely(test_bit(DF_MIGRATED, &d->flags)) )
  24.216 -        inf->avt = CPU_SVT(cpu);
  24.217 +    if ( einf->avt < CPU_SVT(cpu) || 
  24.218 +         unlikely(test_bit(EDF_MIGRATED, &d->ed_flags)) )
  24.219 +        einf->avt = CPU_SVT(cpu);
  24.220  
  24.221      /* Deal with warping here. */
  24.222 -    inf->evt = calc_evt(d, inf->avt);
  24.223 +    einf->evt = calc_evt(d, einf->avt);
  24.224      
  24.225      curr = schedule_data[cpu].curr;
  24.226      curr_evt = calc_evt(curr, calc_avt(curr, now));
  24.227      /* Calculate the time the current domain would run assuming
  24.228         the second smallest evt is of the newly woken domain */
  24.229      r_time = curr->lastschd +
  24.230 -        ((inf->evt - curr_evt) / BVT_INFO(curr)->mcu_advance) +
  24.231 +        ((einf->evt - curr_evt) / BVT_INFO(curr->domain)->mcu_advance) +
  24.232          ctx_allow;
  24.233  
  24.234 -    if ( is_idle_task(curr) || (inf->evt <= curr_evt) )
  24.235 +    if ( is_idle_task(curr->domain) || (einf->evt <= curr_evt) )
  24.236          cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
  24.237      else if ( schedule_data[cpu].s_timer.expires > r_time )
  24.238          mod_ac_timer(&schedule_data[cpu].s_timer, r_time);
  24.239  }
  24.240  
  24.241  
  24.242 -static void bvt_sleep(struct domain *d)
  24.243 +static void bvt_sleep(struct exec_domain *d)
  24.244  {
  24.245 -    if ( test_bit(DF_RUNNING, &d->flags) )
  24.246 +    if ( test_bit(EDF_RUNNING, &d->ed_flags) )
  24.247          cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
  24.248      else  if ( __task_on_runqueue(d) )
  24.249          __del_from_runqueue(d);
  24.250 @@ -347,25 +364,27 @@ int bvt_adjdom(
  24.251   */
  24.252  static task_slice_t bvt_do_schedule(s_time_t now)
  24.253  {
  24.254 -    struct domain      *prev = current, *next = NULL, *next_prime, *p; 
  24.255 +    struct domain *d;
  24.256 +    struct exec_domain      *prev = current, *next = NULL, *next_prime, *ed; 
  24.257      struct list_head   *tmp;
  24.258      int                 cpu = prev->processor;
  24.259      s32                 r_time;     /* time for new dom to run */
  24.260      u32                 next_evt, next_prime_evt, min_avt;
  24.261 -    struct bvt_dom_info *prev_inf       = BVT_INFO(prev);
  24.262 -    struct bvt_dom_info *p_inf          = NULL;
  24.263 -    struct bvt_dom_info *next_inf       = NULL;
  24.264 -    struct bvt_dom_info *next_prime_inf = NULL;
  24.265 +    struct bvt_dom_info *prev_inf       = BVT_INFO(prev->domain);
  24.266 +    struct bvt_edom_info *prev_einf       = EBVT_INFO(prev);
  24.267 +    struct bvt_edom_info *p_einf          = NULL;
  24.268 +    struct bvt_edom_info *next_einf       = NULL;
  24.269 +    struct bvt_edom_info *next_prime_einf = NULL;
  24.270      task_slice_t        ret;
  24.271  
  24.272 -    ASSERT(prev->sched_priv != NULL);
  24.273 -    ASSERT(prev_inf != NULL);
  24.274 +    ASSERT(prev->ed_sched_priv != NULL);
  24.275 +    ASSERT(prev_einf != NULL);
  24.276      ASSERT(__task_on_runqueue(prev));
  24.277  
  24.278 -    if ( likely(!is_idle_task(prev)) ) 
  24.279 +    if ( likely(!is_idle_task(prev->domain)) ) 
  24.280      {
  24.281 -        prev_inf->avt = calc_avt(prev, now);
  24.282 -        prev_inf->evt = calc_evt(prev, prev_inf->avt);
  24.283 +        prev_einf->avt = calc_avt(prev, now);
  24.284 +        prev_einf->evt = calc_evt(prev, prev_einf->avt);
  24.285         
  24.286          if(prev_inf->warpback && prev_inf->warpl > 0)
  24.287              rem_ac_timer(&prev_inf->warp_timer);
  24.288 @@ -385,8 +404,8 @@ static task_slice_t bvt_do_schedule(s_ti
  24.289       * *and* the task the second lowest evt.
  24.290       * this code is O(n) but we expect n to be small.
  24.291       */
  24.292 -    next_inf        = BVT_INFO(schedule_data[cpu].idle);
  24.293 -    next_prime_inf  = NULL;
  24.294 +    next_einf       = EBVT_INFO(schedule_data[cpu].idle);
  24.295 +    next_prime_einf  = NULL;
  24.296  
  24.297      next_evt       = ~0U;
  24.298      next_prime_evt = ~0U;
  24.299 @@ -394,42 +413,42 @@ static task_slice_t bvt_do_schedule(s_ti
  24.300  
  24.301      list_for_each ( tmp, RUNQUEUE(cpu) )
  24.302      {
  24.303 -        p_inf = list_entry(tmp, struct bvt_dom_info, run_list);
  24.304 +        p_einf = list_entry(tmp, struct bvt_edom_info, run_list);
  24.305  
  24.306 -        if ( p_inf->evt < next_evt )
  24.307 +        if ( p_einf->evt < next_evt )
  24.308          {
  24.309 -            next_prime_inf  = next_inf;
  24.310 +            next_prime_einf  = next_einf;
  24.311              next_prime_evt  = next_evt;
  24.312 -            next_inf        = p_inf;
  24.313 -            next_evt        = p_inf->evt;
  24.314 +            next_einf        = p_einf;
  24.315 +            next_evt        = p_einf->evt;
  24.316          } 
  24.317          else if ( next_prime_evt == ~0U )
  24.318          {
  24.319 -            next_prime_evt  = p_inf->evt;
  24.320 -            next_prime_inf  = p_inf;
  24.321 +            next_prime_evt  = p_einf->evt;
  24.322 +            next_prime_einf  = p_einf;
  24.323          } 
  24.324 -        else if ( p_inf->evt < next_prime_evt )
  24.325 +        else if ( p_einf->evt < next_prime_evt )
  24.326          {
  24.327 -            next_prime_evt  = p_inf->evt;
  24.328 -            next_prime_inf  = p_inf;
  24.329 +            next_prime_evt  = p_einf->evt;
  24.330 +            next_prime_einf  = p_einf;
  24.331          }
  24.332  
  24.333          /* Determine system virtual time. */
  24.334 -        if ( p_inf->avt < min_avt )
  24.335 -            min_avt = p_inf->avt;
  24.336 +        if ( p_einf->avt < min_avt )
  24.337 +            min_avt = p_einf->avt;
  24.338      }
  24.339      
  24.340 -    if(next_inf->warp && next_inf->warpl > 0)
  24.341 +    if(next_einf->inf->warp && next_einf->inf->warpl > 0)
  24.342      {
  24.343          /* Set the timer up */ 
  24.344 -        next_inf->warp_timer.expires = now + next_inf->warpl;
  24.345 +        next_einf->inf->warp_timer.expires = now + next_einf->inf->warpl;
  24.346          /* Add it to the heap */
  24.347 -        add_ac_timer(&next_inf->warp_timer);
  24.348 +        add_ac_timer(&next_einf->inf->warp_timer);
  24.349      }
  24.350     
  24.351      /* Extract the domain pointers from the dom infos */
  24.352 -    next        = next_inf->domain;
  24.353 -    next_prime  = next_prime_inf->domain;
  24.354 +    next        = next_einf->exec_domain;
  24.355 +    next_prime  = next_prime_einf->exec_domain;
  24.356      
  24.357      /* Update system virtual time. */
  24.358      if ( min_avt != ~0U )
  24.359 @@ -442,13 +461,15 @@ static task_slice_t bvt_do_schedule(s_ti
  24.360  
  24.361          write_lock(&domlist_lock);
  24.362          
  24.363 -        for_each_domain ( p )
  24.364 +        for_each_domain ( d )
  24.365          {
  24.366 -            if ( p->processor == cpu )
  24.367 -            {
  24.368 -                p_inf = BVT_INFO(p);
  24.369 -                p_inf->evt -= 0xe0000000;
  24.370 -                p_inf->avt -= 0xe0000000;
  24.371 +            for_each_exec_domain (d, ed) {
  24.372 +                if ( ed->processor == cpu )
  24.373 +                {
  24.374 +                    p_einf = EBVT_INFO(ed);
  24.375 +                    p_einf->evt -= 0xe0000000;
  24.376 +                    p_einf->avt -= 0xe0000000;
  24.377 +                }
  24.378              }
  24.379          } 
  24.380          
  24.381 @@ -458,13 +479,13 @@ static task_slice_t bvt_do_schedule(s_ti
  24.382      }
  24.383  
  24.384      /* work out time for next run through scheduler */
  24.385 -    if ( is_idle_task(next) ) 
  24.386 +    if ( is_idle_task(next->domain) ) 
  24.387      {
  24.388          r_time = ctx_allow;
  24.389          goto sched_done;
  24.390      }
  24.391  
  24.392 -    if ( (next_prime == NULL) || is_idle_task(next_prime) )
  24.393 +    if ( (next_prime == NULL) || is_idle_task(next_prime->domain) )
  24.394      {
  24.395          /* We have only one runnable task besides the idle task. */
  24.396          r_time = 10 * ctx_allow;     /* RN: random constant */
  24.397 @@ -478,7 +499,7 @@ static task_slice_t bvt_do_schedule(s_ti
  24.398       */
  24.399      ASSERT(next_prime_inf->evt >= next_inf->evt);
  24.400      
  24.401 -    r_time = ((next_prime_inf->evt - next_inf->evt)/next_inf->mcu_advance)
  24.402 +    r_time = ((next_prime_einf->evt - next_einf->evt)/next_einf->inf->mcu_advance)
  24.403          + ctx_allow;
  24.404  
  24.405      ASSERT(r_time >= ctx_allow);
  24.406 @@ -490,12 +511,12 @@ static task_slice_t bvt_do_schedule(s_ti
  24.407  }
  24.408  
  24.409  
  24.410 -static void bvt_dump_runq_el(struct domain *p)
  24.411 +static void bvt_dump_runq_el(struct exec_domain *p)
  24.412  {
  24.413 -    struct bvt_dom_info *inf = BVT_INFO(p);
  24.414 +    struct bvt_edom_info *inf = EBVT_INFO(p);
  24.415      
  24.416      printk("mcua=%d ev=0x%08X av=0x%08X ",
  24.417 -           inf->mcu_advance, inf->evt, inf->avt);
  24.418 +           inf->inf->mcu_advance, inf->evt, inf->avt);
  24.419  }
  24.420  
  24.421  static void bvt_dump_settings(void)
  24.422 @@ -507,8 +528,8 @@ static void bvt_dump_cpu_state(int i)
  24.423  {
  24.424      struct list_head *list, *queue;
  24.425      int loop = 0;
  24.426 -    struct bvt_dom_info *d_inf;
  24.427 -    struct domain *d;
  24.428 +    struct bvt_edom_info *d_inf;
  24.429 +    struct exec_domain *d;
  24.430      
  24.431      printk("svt=0x%08lX ", CPU_SVT(i));
  24.432  
  24.433 @@ -518,10 +539,10 @@ static void bvt_dump_cpu_state(int i)
  24.434  
  24.435      list_for_each ( list, queue )
  24.436      {
  24.437 -        d_inf = list_entry(list, struct bvt_dom_info, run_list);
  24.438 -        d = d_inf->domain;
  24.439 -        printk("%3d: %u has=%c ", loop++, d->id,
  24.440 -               test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
  24.441 +        d_inf = list_entry(list, struct bvt_edom_info, run_list);
  24.442 +        d = d_inf->exec_domain;
  24.443 +        printk("%3d: %u has=%c ", loop++, d->domain->id,
  24.444 +               test_bit(EDF_RUNNING, &d->ed_flags) ? 'T':'F');
  24.445          bvt_dump_runq_el(d);
  24.446          printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
  24.447          printk("         l: %lx n: %lx  p: %lx\n",
    25.1 --- a/xen/common/schedule.c	Mon Nov 08 15:29:51 2004 +0000
    25.2 +++ b/xen/common/schedule.c	Tue Nov 09 10:51:49 2004 +0000
    25.3 @@ -68,12 +68,12 @@ static void fallback_timer_fn(unsigned l
    25.4  schedule_data_t schedule_data[NR_CPUS];
    25.5  
    25.6  extern struct scheduler sched_bvt_def;
    25.7 -extern struct scheduler sched_rrobin_def;
    25.8 -extern struct scheduler sched_atropos_def;
    25.9 +// extern struct scheduler sched_rrobin_def;
   25.10 +// extern struct scheduler sched_atropos_def;
   25.11  static struct scheduler *schedulers[] = { 
   25.12      &sched_bvt_def,
   25.13 -    &sched_rrobin_def,
   25.14 -    &sched_atropos_def,
   25.15 +//     &sched_rrobin_def,
   25.16 +//     &sched_atropos_def,
   25.17      NULL
   25.18  };
   25.19  
   25.20 @@ -94,29 +94,46 @@ static struct ac_timer t_timer[NR_CPUS];
   25.21  static struct ac_timer fallback_timer[NR_CPUS];
   25.22  
   25.23  extern xmem_cache_t *domain_struct_cachep;
   25.24 +extern xmem_cache_t *exec_domain_struct_cachep;
   25.25  
   25.26  void free_domain_struct(struct domain *d)
   25.27  {
   25.28 +    struct exec_domain *ed;
   25.29 +
   25.30      SCHED_OP(free_task, d);
   25.31 +    for_each_exec_domain(d, ed)
   25.32 +        xmem_cache_free(exec_domain_struct_cachep, ed);
   25.33      xmem_cache_free(domain_struct_cachep, d);
   25.34  }
   25.35  
   25.36  struct domain *alloc_domain_struct(void)
   25.37  {
   25.38      struct domain *d;
   25.39 +    struct exec_domain *ed = NULL;
   25.40  
   25.41      if ( (d = xmem_cache_alloc(domain_struct_cachep)) == NULL )
   25.42          return NULL;
   25.43      
   25.44      memset(d, 0, sizeof(*d));
   25.45  
   25.46 -    if ( SCHED_OP(alloc_task, d) < 0 )
   25.47 -    {
   25.48 -        xmem_cache_free(domain_struct_cachep, d);
   25.49 -        return NULL;
   25.50 -    }
   25.51 +    if ( (ed = xmem_cache_alloc(exec_domain_struct_cachep)) == NULL )
   25.52 +        goto out;
   25.53 +
   25.54 +    memset(ed, 0, sizeof(*ed));
   25.55 +
   25.56 +    d->exec_domain[0] = ed;
   25.57 +    ed->domain = d;
   25.58 +
   25.59 +    if ( SCHED_OP(alloc_task, ed) < 0 )
   25.60 +        goto out;
   25.61  
   25.62      return d;
   25.63 +
   25.64 + out:
   25.65 +    if ( ed )
   25.66 +        xmem_cache_free(exec_domain_struct_cachep, ed);
   25.67 +    xmem_cache_free(domain_struct_cachep, d);
   25.68 +    return NULL;
   25.69  }
   25.70  
   25.71  /*
   25.72 @@ -124,23 +141,27 @@ struct domain *alloc_domain_struct(void)
   25.73   */
   25.74  void sched_add_domain(struct domain *d) 
   25.75  {
   25.76 -    /* Must be unpaused by control software to start execution. */
   25.77 -    set_bit(DF_CTRLPAUSE, &d->flags);
   25.78 +    struct exec_domain *ed;
   25.79 +
   25.80 +    for_each_exec_domain(d, ed) {
   25.81 +        /* Must be unpaused by control software to start execution. */
   25.82 +        set_bit(EDF_CTRLPAUSE, &ed->ed_flags);
   25.83 +    }
   25.84  
   25.85      if ( d->id != IDLE_DOMAIN_ID )
   25.86      {
   25.87          /* Initialise the per-domain timer. */
   25.88          init_ac_timer(&d->timer);
   25.89 -        d->timer.cpu      = d->processor;
   25.90 +        d->timer.cpu      = d->exec_domain[0]->processor;
   25.91          d->timer.data     = (unsigned long)d;
   25.92          d->timer.function = &dom_timer_fn;
   25.93      }
   25.94      else
   25.95      {
   25.96 -        schedule_data[d->processor].idle = d;
   25.97 +        schedule_data[d->exec_domain[0]->processor].idle = d->exec_domain[0];
   25.98      }
   25.99  
  25.100 -    SCHED_OP(add_task, d);
  25.101 +    SCHED_OP(add_task, d->exec_domain[0]);
  25.102  
  25.103      TRACE_2D(TRC_SCHED_DOM_ADD, d->id, d);
  25.104  }
  25.105 @@ -158,7 +179,7 @@ void init_idle_task(void)
  25.106          BUG();
  25.107  }
  25.108  
  25.109 -void domain_sleep(struct domain *d)
  25.110 +void domain_sleep(struct exec_domain *d)
  25.111  {
  25.112      unsigned long flags;
  25.113  
  25.114 @@ -170,14 +191,14 @@ void domain_sleep(struct domain *d)
  25.115      spin_unlock_irqrestore(&schedule_data[d->processor].schedule_lock, flags);
  25.116   
  25.117      /* Synchronous. */
  25.118 -    while ( test_bit(DF_RUNNING, &d->flags) && !domain_runnable(d) )
  25.119 +    while ( test_bit(EDF_RUNNING, &d->ed_flags) && !domain_runnable(d) )
  25.120      {
  25.121          smp_mb();
  25.122          cpu_relax();
  25.123      }
  25.124  }
  25.125  
  25.126 -void domain_wake(struct domain *d)
  25.127 +void domain_wake(struct exec_domain *d)
  25.128  {
  25.129      unsigned long flags;
  25.130  
  25.131 @@ -192,7 +213,7 @@ void domain_wake(struct domain *d)
  25.132  #endif
  25.133      }
  25.134      
  25.135 -    clear_bit(DF_MIGRATED, &d->flags);
  25.136 +    clear_bit(EDF_MIGRATED, &d->ed_flags);
  25.137      
  25.138      spin_unlock_irqrestore(&schedule_data[d->processor].schedule_lock, flags);
  25.139  }
  25.140 @@ -202,7 +223,7 @@ long do_block(void)
  25.141  {
  25.142      ASSERT(current->id != IDLE_DOMAIN_ID);
  25.143      current->shared_info->vcpu_data[0].evtchn_upcall_mask = 0;
  25.144 -    set_bit(DF_BLOCKED, &current->flags);
  25.145 +    set_bit(EDF_BLOCKED, &current->ed_flags);
  25.146      TRACE_2D(TRC_SCHED_BLOCK, current->id, current);
  25.147      __enter_scheduler();
  25.148      return 0;
  25.149 @@ -254,7 +275,7 @@ long do_sched_op(unsigned long op)
  25.150  /* Per-domain one-shot-timer hypercall. */
  25.151  long do_set_timer_op(unsigned long timeout_hi, unsigned long timeout_lo)
  25.152  {
  25.153 -    struct domain *p = current;
  25.154 +    struct domain *p = current->domain;
  25.155  
  25.156      rem_ac_timer(&p->timer);
  25.157      
  25.158 @@ -303,9 +324,9 @@ long sched_adjdom(struct sched_adjdom_cm
  25.159  
  25.160      TRACE_1D(TRC_SCHED_ADJDOM, d->id);
  25.161  
  25.162 -    spin_lock_irq(&schedule_data[d->processor].schedule_lock);
  25.163 +    spin_lock_irq(&schedule_data[d->exec_domain[0]->processor].schedule_lock);
  25.164      SCHED_OP(adjdom, d, cmd);
  25.165 -    spin_unlock_irq(&schedule_data[d->processor].schedule_lock);
  25.166 +    spin_unlock_irq(&schedule_data[d->exec_domain[0]->processor].schedule_lock);
  25.167  
  25.168      put_domain(d);
  25.169      return 0;
  25.170 @@ -318,14 +339,14 @@ long sched_adjdom(struct sched_adjdom_cm
  25.171   */
  25.172  void __enter_scheduler(void)
  25.173  {
  25.174 -    struct domain *prev = current, *next = NULL;
  25.175 +    struct exec_domain *prev = current, *next = NULL;
  25.176      int                 cpu = prev->processor;
  25.177      s_time_t            now;
  25.178      task_slice_t        next_slice;
  25.179      s32                 r_time;     /* time for new dom to run */
  25.180  
  25.181      cleanup_writable_pagetable(
  25.182 -        prev, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE);
  25.183 +        prev->domain, PTWR_CLEANUP_ACTIVE | PTWR_CLEANUP_INACTIVE);
  25.184  
  25.185      perfc_incrc(sched_run);
  25.186      
  25.187 @@ -337,11 +358,11 @@ void __enter_scheduler(void)
  25.188      
  25.189      ASSERT(!in_irq());
  25.190  
  25.191 -    if ( test_bit(DF_BLOCKED, &prev->flags) )
  25.192 +    if ( test_bit(EDF_BLOCKED, &prev->ed_flags) )
  25.193      {
  25.194          /* This check is needed to avoid a race condition. */
  25.195          if ( event_pending(prev) )
  25.196 -            clear_bit(DF_BLOCKED, &prev->flags);
  25.197 +            clear_bit(EDF_BLOCKED, &prev->ed_flags);
  25.198          else
  25.199              SCHED_OP(do_block, prev);
  25.200      }
  25.201 @@ -363,12 +384,12 @@ void __enter_scheduler(void)
  25.202      add_ac_timer(&schedule_data[cpu].s_timer);
  25.203  
  25.204      /* Must be protected by the schedule_lock! */
  25.205 -    set_bit(DF_RUNNING, &next->flags);
  25.206 +    set_bit(EDF_RUNNING, &next->ed_flags);
  25.207  
  25.208      spin_unlock_irq(&schedule_data[cpu].schedule_lock);
  25.209  
  25.210      /* Ensure that the domain has an up-to-date time base. */
  25.211 -    if ( !is_idle_task(next) )
  25.212 +    if ( !is_idle_task(next->domain) )
  25.213          update_dom_time(next->shared_info);
  25.214  
  25.215      if ( unlikely(prev == next) )
  25.216 @@ -404,10 +425,10 @@ void __enter_scheduler(void)
  25.217       * 'prev' (after this point, a dying domain's info structure may be freed
  25.218       * without warning). 
  25.219       */
  25.220 -    clear_bit(DF_RUNNING, &prev->flags);
  25.221 +    clear_bit(EDF_RUNNING, &prev->ed_flags);
  25.222  
  25.223      /* Mark a timer event for the newly-scheduled domain. */
  25.224 -    if ( !is_idle_task(next) )
  25.225 +    if ( !is_idle_task(next->domain) )
  25.226          send_guest_virq(next, VIRQ_TIMER);
  25.227      
  25.228      schedule_tail(next);
  25.229 @@ -418,7 +439,7 @@ void __enter_scheduler(void)
  25.230  /* No locking needed -- pointer comparison is safe :-) */
  25.231  int idle_cpu(int cpu)
  25.232  {
  25.233 -    struct domain *p = schedule_data[cpu].curr;
  25.234 +    struct exec_domain *p = schedule_data[cpu].curr;
  25.235      return p == idle_task[cpu];
  25.236  }
  25.237  
  25.238 @@ -442,11 +463,11 @@ static void s_timer_fn(unsigned long unu
  25.239  /* Periodic tick timer: send timer event to current domain*/
  25.240  static void t_timer_fn(unsigned long unused)
  25.241  {
  25.242 -    struct domain *p = current;
  25.243 +    struct exec_domain *p = current;
  25.244  
  25.245      TRACE_0D(TRC_SCHED_T_TIMER_FN);
  25.246  
  25.247 -    if ( !is_idle_task(p) ) {
  25.248 +    if ( !is_idle_task(p->domain) ) {
  25.249          update_dom_time(p->shared_info);
  25.250          send_guest_virq(p, VIRQ_TIMER);
  25.251      }
  25.252 @@ -459,24 +480,26 @@ static void t_timer_fn(unsigned long unu
  25.253  static void dom_timer_fn(unsigned long data)
  25.254  {
  25.255      struct domain *p = (struct domain *)data;
  25.256 +    struct exec_domain *ed = p->exec_domain[0];
  25.257      TRACE_0D(TRC_SCHED_DOM_TIMER_FN);
  25.258 -    update_dom_time(p->shared_info);
  25.259 -    send_guest_virq(p, VIRQ_TIMER);
  25.260 +    update_dom_time(ed->shared_info);
  25.261 +    send_guest_virq(ed, VIRQ_TIMER);
  25.262  }
  25.263  
  25.264  
  25.265  /* Fallback timer to ensure guests get time updated 'often enough'. */
  25.266  static void fallback_timer_fn(unsigned long unused)
  25.267  {
  25.268 -    struct domain *p = current;
  25.269 +    struct exec_domain *ed = current;
  25.270 +    struct domain *p = ed->domain;
  25.271  
  25.272      TRACE_0D(TRC_SCHED_FALLBACK_TIMER_FN);
  25.273  
  25.274      if ( !is_idle_task(p) )
  25.275 -        update_dom_time(p->shared_info);
  25.276 +        update_dom_time(ed->shared_info);
  25.277  
  25.278 -    fallback_timer[p->processor].expires = NOW() + MILLISECS(500);
  25.279 -    add_ac_timer(&fallback_timer[p->processor]);
  25.280 +    fallback_timer[ed->processor].expires = NOW() + MILLISECS(500);
  25.281 +    add_ac_timer(&fallback_timer[ed->processor]);
  25.282  }
  25.283  
  25.284  /* Initialise the data structures. */
  25.285 @@ -489,7 +512,7 @@ void __init scheduler_init(void)
  25.286      for ( i = 0; i < NR_CPUS; i++ )
  25.287      {
  25.288          spin_lock_init(&schedule_data[i].schedule_lock);
  25.289 -        schedule_data[i].curr = &idle0_task;
  25.290 +        schedule_data[i].curr = &idle0_exec_domain;
  25.291          
  25.292          init_ac_timer(&schedule_data[i].s_timer);
  25.293          schedule_data[i].s_timer.cpu      = i;
  25.294 @@ -507,7 +530,7 @@ void __init scheduler_init(void)
  25.295          fallback_timer[i].function = &fallback_timer_fn;
  25.296      }
  25.297  
  25.298 -    schedule_data[0].idle = &idle0_task;
  25.299 +    schedule_data[0].idle = &idle0_exec_domain;
  25.300  
  25.301      extern char opt_sched[];
  25.302  
    26.1 --- a/xen/drivers/char/console.c	Mon Nov 08 15:29:51 2004 +0000
    26.2 +++ b/xen/drivers/char/console.c	Tue Nov 09 10:51:49 2004 +0000
    26.3 @@ -253,7 +253,7 @@ static void __serial_rx(unsigned char c,
    26.4      {
    26.5          serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod)] = c;
    26.6          if ( serial_rx_prod++ == serial_rx_cons )
    26.7 -            send_guest_virq(dom0, VIRQ_CONSOLE);
    26.8 +            send_guest_virq(dom0->exec_domain[0], VIRQ_CONSOLE);
    26.9      }
   26.10  }
   26.11  
   26.12 @@ -286,7 +286,7 @@ long do_console_io(int cmd, int count, c
   26.13  
   26.14  #ifndef VERBOSE
   26.15      /* Only domain-0 may access the emergency console. */
   26.16 -    if ( current->id != 0 )
   26.17 +    if ( current->domain->id != 0 )
   26.18          return -EPERM;
   26.19  #endif
   26.20  
    27.1 --- a/xen/include/asm-x86/domain.h	Mon Nov 08 15:29:51 2004 +0000
    27.2 +++ b/xen/include/asm-x86/domain.h	Tue Nov 09 10:51:49 2004 +0000
    27.3 @@ -2,12 +2,12 @@
    27.4  #ifndef __ASM_X86_DOMAIN_H__
    27.5  #define __ASM_X86_DOMAIN_H__
    27.6  
    27.7 -extern void arch_do_createdomain(struct domain *d);
    27.8 +extern void arch_do_createdomain(struct exec_domain *d);
    27.9  
   27.10  extern int  arch_final_setup_guestos(
   27.11 -    struct domain *d, full_execution_context_t *c);
   27.12 +    struct exec_domain *d, full_execution_context_t *c);
   27.13  
   27.14 -extern void free_perdomain_pt(struct domain *d);
   27.15 +extern void free_perdomain_pt(struct exec_domain *d);
   27.16  
   27.17  extern void domain_relinquish_memory(struct domain *d);
   27.18  
    28.1 --- a/xen/include/asm-x86/i387.h	Mon Nov 08 15:29:51 2004 +0000
    28.2 +++ b/xen/include/asm-x86/i387.h	Tue Nov 09 10:51:49 2004 +0000
    28.3 @@ -15,16 +15,16 @@
    28.4  #include <asm/processor.h>
    28.5  
    28.6  extern void init_fpu(void);
    28.7 -extern void save_init_fpu( struct domain *tsk );
    28.8 -extern void restore_fpu( struct domain *tsk );
    28.9 +extern void save_init_fpu( struct exec_domain *tsk );
   28.10 +extern void restore_fpu( struct exec_domain *tsk );
   28.11  
   28.12  #define unlazy_fpu( tsk ) do { \
   28.13 -	if ( test_bit(DF_USEDFPU, &tsk->flags) ) \
   28.14 +	if ( test_bit(EDF_USEDFPU, &tsk->ed_flags) ) \
   28.15  		save_init_fpu( tsk ); \
   28.16  } while (0)
   28.17  
   28.18  #define clear_fpu( tsk ) do { \
   28.19 -	if ( test_and_clear_bit(DF_USEDFPU, &tsk->flags) ) { \
   28.20 +	if ( test_and_clear_bit(EDF_USEDFPU, &tsk->ed_flags) ) { \
   28.21  		asm volatile("fwait"); \
   28.22  		stts(); \
   28.23  	} \
    29.1 --- a/xen/include/asm-x86/ldt.h	Mon Nov 08 15:29:51 2004 +0000
    29.2 +++ b/xen/include/asm-x86/ldt.h	Tue Nov 09 10:51:49 2004 +0000
    29.3 @@ -3,7 +3,7 @@
    29.4  
    29.5  #ifndef __ASSEMBLY__
    29.6  
    29.7 -static inline void load_LDT(struct domain *p)
    29.8 +static inline void load_LDT(struct exec_domain *p)
    29.9  {
   29.10      unsigned int cpu;
   29.11      struct desc_struct *desc;
    30.1 --- a/xen/include/asm-x86/processor.h	Mon Nov 08 15:29:51 2004 +0000
    30.2 +++ b/xen/include/asm-x86/processor.h	Tue Nov 09 10:51:49 2004 +0000
    30.3 @@ -18,6 +18,7 @@
    30.4  #include <public/xen.h>
    30.5  
    30.6  struct domain;
    30.7 +struct exec_domain;
    30.8  
    30.9  /*
   30.10   * Default implementation of macro that returns current
   30.11 @@ -327,7 +328,7 @@ extern struct desc_struct *idt_tables[];
   30.12              &((_p)->fast_trap_desc), 8))
   30.13  #endif
   30.14  
   30.15 -long set_fast_trap(struct domain *p, int idx);
   30.16 +long set_fast_trap(struct exec_domain *p, int idx);
   30.17  
   30.18  #define INIT_THREAD  {						\
   30.19  	0, 0,		      		       			\
   30.20 @@ -420,11 +421,11 @@ static inline void write_ptbase(struct m
   30.21  #define GET_GDT_ADDRESS(_p)     (*(unsigned long *)((_p)->mm.gdt + 2))
   30.22  
   30.23  void destroy_gdt(struct domain *d);
   30.24 -long set_gdt(struct domain *d, 
   30.25 +long set_gdt(struct exec_domain *d, 
   30.26               unsigned long *frames, 
   30.27               unsigned int entries);
   30.28  
   30.29 -long set_debugreg(struct domain *p, int reg, unsigned long value);
   30.30 +long set_debugreg(struct exec_domain *p, int reg, unsigned long value);
   30.31  
   30.32  struct microcode {
   30.33      unsigned int hdrver;
    31.1 --- a/xen/include/asm-x86/shadow.h	Mon Nov 08 15:29:51 2004 +0000
    31.2 +++ b/xen/include/asm-x86/shadow.h	Tue Nov 09 10:51:49 2004 +0000
    31.3 @@ -40,7 +40,7 @@ extern int shadow_mode_enable(struct dom
    31.4  extern void __shadow_mode_disable(struct domain *d);
    31.5  static inline void shadow_mode_disable(struct domain *d)
    31.6  {
    31.7 -    if ( shadow_mode(d) )
    31.8 +    if ( shadow_mode(d->exec_domain[0]) )
    31.9          __shadow_mode_disable(d);
   31.10  }
   31.11  
   31.12 @@ -62,7 +62,7 @@ struct shadow_status {
   31.13  #ifdef VERBOSE
   31.14  #define SH_LOG(_f, _a...)                             \
   31.15  printk("DOM%u: (file=shadow.c, line=%d) " _f "\n",    \
   31.16 -       current->id , __LINE__ , ## _a )
   31.17 +       current->domain->id , __LINE__ , ## _a )
   31.18  #else
   31.19  #define SH_LOG(_f, _a...) 
   31.20  #endif
    32.1 --- a/xen/include/asm-x86/x86_32/current.h	Mon Nov 08 15:29:51 2004 +0000
    32.2 +++ b/xen/include/asm-x86/x86_32/current.h	Tue Nov 09 10:51:49 2004 +0000
    32.3 @@ -6,9 +6,9 @@ struct domain;
    32.4  #define STACK_RESERVED \
    32.5      (sizeof(execution_context_t) + sizeof(struct domain *))
    32.6  
    32.7 -static inline struct domain * get_current(void)
    32.8 +static inline struct exec_domain * get_current(void)
    32.9  {
   32.10 -    struct domain *current;
   32.11 +    struct exec_domain *current;
   32.12      __asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0" 
   32.13                : "=r" (current) : "0" (STACK_SIZE-4) );
   32.14      return current;
   32.15 @@ -16,7 +16,7 @@ static inline struct domain * get_curren
   32.16   
   32.17  #define current get_current()
   32.18  
   32.19 -static inline void set_current(struct domain *p)
   32.20 +static inline void set_current(struct exec_domain *p)
   32.21  {
   32.22      __asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)" 
   32.23                : : "r" (STACK_SIZE-4), "r" (p) );    
   32.24 @@ -43,7 +43,7 @@ static inline unsigned long get_stack_to
   32.25      __asm__ __volatile__ (                                        \
   32.26          "andl %%esp,%0; addl %2,%0; movl %0,%%esp; jmp *%1"       \
   32.27          : : "r" (~(STACK_SIZE-1)),                                \
   32.28 -            "r" (unlikely(is_idle_task((_p))) ?                   \
   32.29 +            "r" (unlikely(is_idle_task((_p)->domain)) ?           \
   32.30                                  continue_cpu_idle_loop :          \
   32.31                                  continue_nonidle_task),           \
   32.32              "i" (STACK_SIZE-STACK_RESERVED) )
    33.1 --- a/xen/include/public/dom0_ops.h	Mon Nov 08 15:29:51 2004 +0000
    33.2 +++ b/xen/include/public/dom0_ops.h	Tue Nov 09 10:51:49 2004 +0000
    33.3 @@ -85,7 +85,7 @@ typedef struct {
    33.4  typedef struct {
    33.5      /* IN variables. */
    33.6      domid_t  domain;                  /*  0 */ /* NB. IN/OUT variable. */
    33.7 -    u16     __pad;
    33.8 +    u16      exec_domain;
    33.9      /* OUT variables. */
   33.10  #define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
   33.11  #define DOMFLAGS_CRASHED   (1<<1) /* Crashed domain; frozen for postmortem.  */
   33.12 @@ -208,7 +208,7 @@ typedef struct {
   33.13  typedef struct {
   33.14      /* IN variables. */
   33.15      domid_t      domain;              /*  0 */
   33.16 -    u16          __pad;
   33.17 +    u16          exec_domain;
   33.18      s32          cpu;                 /*  4: -1 implies unpin */
   33.19  } PACKED dom0_pincpudomain_t; /* 8 bytes */
   33.20  
    34.1 --- a/xen/include/public/xen.h	Mon Nov 08 15:29:51 2004 +0000
    34.2 +++ b/xen/include/public/xen.h	Tue Nov 09 10:51:49 2004 +0000
    34.3 @@ -251,7 +251,7 @@ typedef struct
    34.4  #define NR_EVENT_CHANNELS 1024
    34.5  
    34.6  /* No support for multi-processor guests. */
    34.7 -#define MAX_VIRT_CPUS 1
    34.8 +#define MAX_VIRT_CPUS 4
    34.9  
   34.10  /*
   34.11   * Xen/guestos shared data -- pointer provided in start_info.
   34.12 @@ -294,6 +294,8 @@ typedef struct shared_info_st
   34.13          u8 pad0, pad1;
   34.14      } PACKED vcpu_data[MAX_VIRT_CPUS];  /*   0 */
   34.15  
   34.16 +    u32 n_vcpu;
   34.17 +
   34.18      /*
   34.19       * A domain can have up to 1024 "event channels" on which it can send
   34.20       * and receive asynchronous event notifications. There are three classes
    35.1 --- a/xen/include/xen/event.h	Mon Nov 08 15:29:51 2004 +0000
    35.2 +++ b/xen/include/xen/event.h	Tue Nov 09 10:51:49 2004 +0000
    35.3 @@ -22,7 +22,8 @@
    35.4  
    35.5  static inline void evtchn_set_pending(struct domain *d, int port)
    35.6  {
    35.7 -    shared_info_t *s = d->shared_info;
    35.8 +    struct exec_domain *ed = d->exec_domain[0];
    35.9 +    shared_info_t *s = ed->shared_info;
   35.10      int            running;
   35.11  
   35.12      /* These three operations must happen in strict order. */
   35.13 @@ -42,10 +43,10 @@ static inline void evtchn_set_pending(st
   35.14           * NB2. We save DF_RUNNING across the unblock to avoid a needless
   35.15           * IPI for domains that we IPI'd to unblock.
   35.16           */
   35.17 -        running = test_bit(DF_RUNNING, &d->flags);
   35.18 -        domain_unblock(d);
   35.19 +        running = test_bit(EDF_RUNNING, &ed->ed_flags);
   35.20 +        exec_domain_unblock(ed);
   35.21          if ( running )
   35.22 -            smp_send_event_check_cpu(d->processor);
   35.23 +            smp_send_event_check_cpu(ed->processor);
   35.24      }
   35.25  }
   35.26  
   35.27 @@ -54,8 +55,9 @@ static inline void evtchn_set_pending(st
   35.28   *  @d:        Domain to which virtual IRQ should be sent
   35.29   *  @virq:     Virtual IRQ number (VIRQ_*)
   35.30   */
   35.31 -static inline void send_guest_virq(struct domain *d, int virq)
   35.32 +static inline void send_guest_virq(struct exec_domain *ed, int virq)
   35.33  {
   35.34 +    struct domain *d = ed->domain;
   35.35      evtchn_set_pending(d, d->virq_to_evtchn[virq]);
   35.36  }
   35.37  
    36.1 --- a/xen/include/xen/sched-if.h	Mon Nov 08 15:29:51 2004 +0000
    36.2 +++ b/xen/include/xen/sched-if.h	Tue Nov 09 10:51:49 2004 +0000
    36.3 @@ -13,8 +13,8 @@ typedef struct schedule_data_st
    36.4  {
    36.5      spinlock_t          schedule_lock;  /* spinlock protecting curr pointer
    36.6                                              TODO check this */
    36.7 -    struct domain       *curr;          /* current task */
    36.8 -    struct domain       *idle;          /* idle task for this cpu */
    36.9 +    struct exec_domain  *curr;          /* current task */
   36.10 +    struct exec_domain  *idle;          /* idle task for this cpu */
   36.11      void *              sched_priv;
   36.12      struct ac_timer     s_timer;        /* scheduling timer  */
   36.13  #ifdef BUCKETS
   36.14 @@ -25,7 +25,7 @@ typedef struct schedule_data_st
   36.15  
   36.16  typedef struct task_slice_st
   36.17  {
   36.18 -    struct domain *task;
   36.19 +    struct exec_domain *task;
   36.20      s_time_t            time;
   36.21  } task_slice_t;
   36.22  
   36.23 @@ -36,14 +36,14 @@ struct scheduler
   36.24      unsigned int sched_id;  /* ID for this scheduler             */
   36.25  
   36.26      int          (*init_scheduler) ();
   36.27 -    int          (*init_idle_task) (struct domain *);
   36.28 -    int          (*alloc_task)     (struct domain *);
   36.29 -    void         (*add_task)       (struct domain *);
   36.30 +    int          (*init_idle_task) (struct exec_domain *);
   36.31 +    int          (*alloc_task)     (struct exec_domain *);
   36.32 +    void         (*add_task)       (struct exec_domain *);
   36.33      void         (*free_task)      (struct domain *);
   36.34      void         (*rem_task)       (struct domain *);
   36.35 -    void         (*sleep)          (struct domain *);
   36.36 -    void         (*wake)           (struct domain *);
   36.37 -    void         (*do_block)       (struct domain *);
   36.38 +    void         (*sleep)          (struct exec_domain *);
   36.39 +    void         (*wake)           (struct exec_domain *);
   36.40 +    void         (*do_block)       (struct exec_domain *);
   36.41      task_slice_t (*do_schedule)    (s_time_t);
   36.42      int          (*control)        (struct sched_ctl_cmd *);
   36.43      int          (*adjdom)         (struct domain *,
    37.1 --- a/xen/include/xen/sched.h	Mon Nov 08 15:29:51 2004 +0000
    37.2 +++ b/xen/include/xen/sched.h	Tue Nov 09 10:51:49 2004 +0000
    37.3 @@ -53,7 +53,7 @@ typedef struct event_channel_st
    37.4  int  init_event_channels(struct domain *d);
    37.5  void destroy_event_channels(struct domain *d);
    37.6  
    37.7 -struct domain 
    37.8 +struct exec_domain 
    37.9  {
   37.10      /*
   37.11       * DO NOT CHANGE THE ORDER OF THE FOLLOWING.
   37.12 @@ -89,6 +89,27 @@ struct domain
   37.13       * From here on things can be added and shuffled without special attention
   37.14       */
   37.15  
   37.16 +    struct domain *domain;
   37.17 +    struct exec_domain *ed_next_list;
   37.18 +    int eid;
   37.19 +
   37.20 +    struct mm_struct mm;
   37.21 +
   37.22 +    struct thread_struct thread;
   37.23 +
   37.24 +    s_time_t         lastschd;      /* time this domain was last scheduled */
   37.25 +    s_time_t         lastdeschd;    /* time this domain was last descheduled */
   37.26 +    s_time_t         cpu_time;      /* total CPU time received till now */
   37.27 +    s_time_t         wokenup;       /* time domain got woken up */
   37.28 +    void            *ed_sched_priv;    /* scheduler-specific data */
   37.29 +
   37.30 +    unsigned long ed_flags;
   37.31 +
   37.32 +    atomic_t pausecnt;
   37.33 +
   37.34 +};
   37.35 +
   37.36 +struct domain {
   37.37      domid_t  id;
   37.38      s_time_t create_time;
   37.39  
   37.40 @@ -101,16 +122,9 @@ struct domain
   37.41  
   37.42      /* Scheduling. */
   37.43      int              shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
   37.44 -    s_time_t         lastschd;      /* time this domain was last scheduled */
   37.45 -    s_time_t         lastdeschd;    /* time this domain was last descheduled */
   37.46 -    s_time_t         cpu_time;      /* total CPU time received till now */
   37.47 -    s_time_t         wokenup;       /* time domain got woken up */
   37.48      struct ac_timer  timer;         /* one-shot timer for timeout values */
   37.49      void            *sched_priv;    /* scheduler-specific data */
   37.50  
   37.51 -    struct mm_struct mm;
   37.52 -
   37.53 -    struct thread_struct thread;
   37.54      struct domain *next_list, *next_hash;
   37.55  
   37.56      /* Event channel information. */
   37.57 @@ -142,11 +156,12 @@ struct domain
   37.58  #define IOBMP_SELBIT_LWORDS (IO_BITMAP_SIZE / 64)
   37.59      unsigned long *io_bitmap; /* Pointer to task's IO bitmap or NULL */
   37.60  
   37.61 -    unsigned long flags;
   37.62 +    unsigned long d_flags;
   37.63      unsigned long vm_assist;
   37.64  
   37.65      atomic_t refcnt;
   37.66 -    atomic_t pausecnt;
   37.67 +
   37.68 +    struct exec_domain *exec_domain[MAX_VIRT_CPUS];
   37.69  };
   37.70  
   37.71  struct domain_setup_info
   37.72 @@ -161,11 +176,12 @@ struct domain_setup_info
   37.73  
   37.74  #include <asm/uaccess.h> /* for KERNEL_DS */
   37.75  
   37.76 -extern struct domain idle0_task;
   37.77 +extern struct domain idle0_domain;
   37.78 +extern struct exec_domain idle0_exec_domain;
   37.79  
   37.80 -extern struct domain *idle_task[NR_CPUS];
   37.81 +extern struct exec_domain *idle_task[NR_CPUS];
   37.82  #define IDLE_DOMAIN_ID   (0x7FFFU)
   37.83 -#define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->flags))
   37.84 +#define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->d_flags))
   37.85  
   37.86  void free_domain_struct(struct domain *d);
   37.87  struct domain *alloc_domain_struct();
   37.88 @@ -220,7 +236,7 @@ extern void domain_kill(struct domain *d
   37.89  extern void domain_crash(void);
   37.90  extern void domain_shutdown(u8 reason);
   37.91  
   37.92 -void new_thread(struct domain *d,
   37.93 +void new_thread(struct exec_domain *d,
   37.94                  unsigned long start_pc,
   37.95                  unsigned long start_stack,
   37.96                  unsigned long start_info);
   37.97 @@ -237,13 +253,13 @@ long sched_ctl(struct sched_ctl_cmd *);
   37.98  long sched_adjdom(struct sched_adjdom_cmd *);
   37.99  int  sched_id();
  37.100  void init_idle_task(void);
  37.101 -void domain_wake(struct domain *d);
  37.102 -void domain_sleep(struct domain *d);
  37.103 +void domain_wake(struct exec_domain *d);
  37.104 +void domain_sleep(struct exec_domain *d);
  37.105  
  37.106  void __enter_scheduler(void);
  37.107  
  37.108 -extern void switch_to(struct domain *prev, 
  37.109 -                      struct domain *next);
  37.110 +extern void switch_to(struct exec_domain *prev, 
  37.111 +                      struct exec_domain *next);
  37.112  
  37.113  void domain_init(void);
  37.114  
  37.115 @@ -263,65 +279,100 @@ extern struct domain *domain_list;
  37.116  #define for_each_domain(_p) \
  37.117   for ( (_p) = domain_list; (_p) != NULL; (_p) = (_p)->next_list )
  37.118  
  37.119 -#define DF_DONEFPUINIT  0 /* Has the FPU been initialised for this task?    */
  37.120 -#define DF_USEDFPU      1 /* Has this task used the FPU since last save?    */
  37.121 -#define DF_GUEST_STTS   2 /* Has the guest OS requested 'stts'?             */
  37.122 -#define DF_CONSTRUCTED  3 /* Has the guest OS been fully built yet?         */
  37.123 -#define DF_IDLETASK     4 /* Is this one of the per-CPU idle domains?       */
  37.124 -#define DF_PRIVILEGED   5 /* Is this domain privileged?                     */
  37.125 -#define DF_PHYSDEV      6 /* May this domain do IO to physical devices?     */
  37.126 -#define DF_BLOCKED      7 /* Domain is blocked waiting for an event.        */
  37.127 -#define DF_CTRLPAUSE    8 /* Domain is paused by controller software.       */
  37.128 -#define DF_SHUTDOWN     9 /* Guest shut itself down for some reason.        */
  37.129 -#define DF_CRASHED     10 /* Domain crashed inside Xen, cannot continue.    */
  37.130 -#define DF_DYING       11 /* Death rattle.                                  */
  37.131 -#define DF_RUNNING     12 /* Currently running on a CPU.                    */
  37.132 -#define DF_CPUPINNED   13 /* Disables auto-migration.                       */
  37.133 -#define DF_MIGRATED    14 /* Domain migrated between CPUs.                  */ 
  37.134 +#define for_each_exec_domain(_d,_ed) \
  37.135 + for ( (_ed) = _d->exec_domain[0]; (_ed) != NULL; (_ed) = (_ed)->ed_next_list )
  37.136  
  37.137 -static inline int domain_runnable(struct domain *d)
  37.138 +#define EDF_DONEFPUINIT  0 /* Has the FPU been initialised for this task?    */
  37.139 +#define EDF_USEDFPU      1 /* Has this task used the FPU since last save?    */
  37.140 +#define EDF_GUEST_STTS   2 /* Has the guest OS requested 'stts'?             */
  37.141 +#define  DF_CONSTRUCTED  3 /* Has the guest OS been fully built yet?         */
  37.142 +#define  DF_IDLETASK     4 /* Is this one of the per-CPU idle domains?       */
  37.143 +#define  DF_PRIVILEGED   5 /* Is this domain privileged?                     */
  37.144 +#define  DF_PHYSDEV      6 /* May this domain do IO to physical devices?     */
  37.145 +#define EDF_BLOCKED      7 /* Domain is blocked waiting for an event.        */
  37.146 +#define EDF_CTRLPAUSE    8 /* Domain is paused by controller software.       */
  37.147 +#define  DF_SHUTDOWN     9 /* Guest shut itself down for some reason.        */
  37.148 +#define  DF_CRASHED     10 /* Domain crashed inside Xen, cannot continue.    */
  37.149 +#define  DF_DYING       11 /* Death rattle.                                  */
  37.150 +#define EDF_RUNNING     12 /* Currently running on a CPU.                    */
  37.151 +#define EDF_CPUPINNED   13 /* Disables auto-migration.                       */
  37.152 +#define EDF_MIGRATED    14 /* Domain migrated between CPUs.                  */
  37.153 +
  37.154 +static inline int domain_runnable(struct exec_domain *d)
  37.155  {
  37.156      return ( (atomic_read(&d->pausecnt) == 0) &&
  37.157 -             !(d->flags & ((1<<DF_BLOCKED)|(1<<DF_CTRLPAUSE)|
  37.158 -                           (1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
  37.159 +             !(d->ed_flags & ((1<<EDF_BLOCKED)|(1<<EDF_CTRLPAUSE))) &&
  37.160 +             !(d->domain->d_flags & ((1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
  37.161 +}
  37.162 +
  37.163 +static inline void exec_domain_pause(struct exec_domain *ed)
  37.164 +{
  37.165 +    ASSERT(ed != current);
  37.166 +    atomic_inc(&ed->pausecnt);
  37.167 +    domain_sleep(ed);
  37.168  }
  37.169  
  37.170  static inline void domain_pause(struct domain *d)
  37.171  {
  37.172 -    ASSERT(d != current);
  37.173 -    atomic_inc(&d->pausecnt);
  37.174 -    domain_sleep(d);
  37.175 +    struct exec_domain *ed;
  37.176 +
  37.177 +    for_each_exec_domain(d, ed)
  37.178 +        exec_domain_pause(ed);
  37.179 +}
  37.180 +
  37.181 +static inline void exec_domain_unpause(struct exec_domain *ed)
  37.182 +{
  37.183 +    ASSERT(ed != current);
  37.184 +    if ( atomic_dec_and_test(&ed->pausecnt) )
  37.185 +        domain_wake(ed);
  37.186  }
  37.187  
  37.188  static inline void domain_unpause(struct domain *d)
  37.189  {
  37.190 -    ASSERT(d != current);
  37.191 -    if ( atomic_dec_and_test(&d->pausecnt) )
  37.192 -        domain_wake(d);
  37.193 +    struct exec_domain *ed;
  37.194 +
  37.195 +    for_each_exec_domain(d, ed)
  37.196 +        exec_domain_unpause(ed);
  37.197 +}
  37.198 +
  37.199 +static inline void exec_domain_unblock(struct exec_domain *ed)
  37.200 +{
  37.201 +    if ( test_and_clear_bit(EDF_BLOCKED, &ed->ed_flags) )
  37.202 +        domain_wake(ed);
  37.203  }
  37.204  
  37.205  static inline void domain_unblock(struct domain *d)
  37.206  {
  37.207 -    if ( test_and_clear_bit(DF_BLOCKED, &d->flags) )
  37.208 -        domain_wake(d);
  37.209 +    struct exec_domain *ed;
  37.210 +
  37.211 +    for_each_exec_domain(d, ed)
  37.212 +        exec_domain_unblock(ed);
  37.213  }
  37.214  
  37.215  static inline void domain_pause_by_systemcontroller(struct domain *d)
  37.216  {
  37.217 -    ASSERT(d != current);
  37.218 -    if ( !test_and_set_bit(DF_CTRLPAUSE, &d->flags) )
  37.219 -        domain_sleep(d);
  37.220 +    struct exec_domain *ed;
  37.221 +
  37.222 +    for_each_exec_domain(d, ed) {
  37.223 +        ASSERT(ed != current);
  37.224 +        if ( !test_and_set_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
  37.225 +            domain_sleep(ed);
  37.226 +    }
  37.227  }
  37.228  
  37.229  static inline void domain_unpause_by_systemcontroller(struct domain *d)
  37.230  {
  37.231 -    if ( test_and_clear_bit(DF_CTRLPAUSE, &d->flags) )
  37.232 -        domain_wake(d);
  37.233 +    struct exec_domain *ed;
  37.234 +
  37.235 +    for_each_exec_domain(d, ed) {
  37.236 +        if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
  37.237 +            domain_wake(ed);
  37.238 +    }
  37.239  }
  37.240  
  37.241  
  37.242 -#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
  37.243 -#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
  37.244 +#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->d_flags))
  37.245 +#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->d_flags))
  37.246  
  37.247  #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
  37.248