ia64/xen-unstable

changeset 4798:979aa5d4764e

bitkeeper revision 1.1389.5.35 (427e00b2juTv-JMiPdIYinvwaH2N8Q)

Field-name cleanups.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun May 08 12:06:10 2005 +0000 (2005-05-08)
parents c0463989fca8
children dff93c0ff33e
files xen/arch/ia64/domain.c xen/arch/ia64/idle0_task.c xen/arch/ia64/xenmisc.c xen/arch/ia64/xensetup.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/i387.c xen/arch/x86/idle0_task.c xen/arch/x86/mm.c xen/arch/x86/physdev.c xen/arch/x86/setup.c xen/arch/x86/shadow.c xen/arch/x86/smpboot.c xen/arch/x86/traps.c xen/common/dom0_ops.c xen/common/domain.c xen/common/event_channel.c xen/common/keyhandler.c xen/common/page_alloc.c xen/common/sched_bvt.c xen/common/schedule.c xen/include/asm-x86/config.h xen/include/asm-x86/debugger.h xen/include/asm-x86/i387.h xen/include/xen/event.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/ia64/domain.c	Sun May 08 11:12:14 2005 +0000
     1.2 +++ b/xen/arch/ia64/domain.c	Sun May 08 12:06:10 2005 +0000
     1.3 @@ -626,7 +626,7 @@ int construct_dom0(struct domain *d,
     1.4  #ifndef CLONE_DOMAIN0
     1.5  	if ( d != dom0 ) 
     1.6  	    BUG();
     1.7 -	if ( test_bit(DF_CONSTRUCTED, &d->d_flags) ) 
     1.8 +	if ( test_bit(DF_CONSTRUCTED, &d->flags) ) 
     1.9  	    BUG();
    1.10  #endif
    1.11  
    1.12 @@ -753,7 +753,7 @@ int construct_dom0(struct domain *d,
    1.13  #endif
    1.14  	console_endboot(strstr(cmdline, "tty0") != NULL);
    1.15  
    1.16 -	set_bit(DF_CONSTRUCTED, &d->d_flags);
    1.17 +	set_bit(DF_CONSTRUCTED, &d->flags);
    1.18  
    1.19  	new_thread(ed, pkern_entry, 0, 0);
    1.20  	// FIXME: Hack for keyboard input
    1.21 @@ -783,7 +783,7 @@ int construct_domU(struct domain *d,
    1.22  	unsigned long pkern_entry;
    1.23  
    1.24  #ifndef DOMU_AUTO_RESTART
    1.25 -	if ( test_bit(DF_CONSTRUCTED, &d->d_flags) ) BUG();
    1.26 +	if ( test_bit(DF_CONSTRUCTED, &d->flags) ) BUG();
    1.27  #endif
    1.28  
    1.29  	printk("*** LOADING DOMAIN %d ***\n",d->id);
    1.30 @@ -816,7 +816,7 @@ int construct_domU(struct domain *d,
    1.31  	loaddomainelfimage(d,image_start);
    1.32  	printk("loaddomainelfimage returns\n");
    1.33  
    1.34 -	set_bit(DF_CONSTRUCTED, &d->d_flags);
    1.35 +	set_bit(DF_CONSTRUCTED, &d->flags);
    1.36  
    1.37  	printk("calling new_thread, entry=%p\n",pkern_entry);
    1.38  #ifdef DOMU_AUTO_RESTART
     2.1 --- a/xen/arch/ia64/idle0_task.c	Sun May 08 11:12:14 2005 +0000
     2.2 +++ b/xen/arch/ia64/idle0_task.c	Sun May 08 12:06:10 2005 +0000
     2.3 @@ -22,7 +22,7 @@
     2.4  #define IDLE0_DOMAIN(_t)             \
     2.5  {                                    \
     2.6      id:          IDLE_DOMAIN_ID,     \
     2.7 -    d_flags:     1<<DF_IDLETASK,     \
     2.8 +    flags:     1<<DF_IDLETASK,       \
     2.9      refcnt:      ATOMIC_INIT(1)      \
    2.10  }
    2.11  
     3.1 --- a/xen/arch/ia64/xenmisc.c	Sun May 08 11:12:14 2005 +0000
     3.2 +++ b/xen/arch/ia64/xenmisc.c	Sun May 08 12:06:10 2005 +0000
     3.3 @@ -240,7 +240,7 @@ int id = ((struct exec_domain *)current)
     3.4  if (!cnt[id]--) { printk("%x",id); cnt[id] = 50; }
     3.5  if (!i--) { printk("+",id); cnt[id] = 100; }
     3.6  }
     3.7 -	clear_bit(EDF_RUNNING, &prev->ed_flags);
     3.8 +	clear_bit(EDF_RUNNING, &prev->flags);
     3.9  	//if (!is_idle_task(next->domain) )
    3.10  		//send_guest_virq(next, VIRQ_TIMER);
    3.11  	load_region_regs(current);
    3.12 @@ -270,7 +270,7 @@ loop:
    3.13  	printf(buf);
    3.14  	if (regs) show_registers(regs);
    3.15  	domain_pause_by_systemcontroller(current->domain);
    3.16 -	set_bit(DF_CRASHED, ed->domain->d_flags);
    3.17 +	set_bit(DF_CRASHED, ed->domain->flags);
    3.18  	if (ed->domain->id == 0) {
    3.19  		int i = 1000000000L;
    3.20  		// if domain0 crashes, just periodically print out panic
     4.1 --- a/xen/arch/ia64/xensetup.c	Sun May 08 11:12:14 2005 +0000
     4.2 +++ b/xen/arch/ia64/xensetup.c	Sun May 08 12:06:10 2005 +0000
     4.3 @@ -267,7 +267,7 @@ printk("About to call init_idle_task()\n
     4.4      if ( dom0 == NULL )
     4.5          panic("Error creating domain 0\n");
     4.6  
     4.7 -    set_bit(DF_PRIVILEGED, &dom0->d_flags);
     4.8 +    set_bit(DF_PRIVILEGED, &dom0->flags);
     4.9  
    4.10      /*
    4.11       * We're going to setup domain0 using the module(s) that we stashed safely
     5.1 --- a/xen/arch/x86/dom0_ops.c	Sun May 08 11:12:14 2005 +0000
     5.2 +++ b/xen/arch/x86/dom0_ops.c	Sun May 08 12:06:10 2005 +0000
     5.3 @@ -397,7 +397,7 @@ void arch_getdomaininfo_ctxt(
     5.4  #endif
     5.5  
     5.6      c->flags = 0;
     5.7 -    if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
     5.8 +    if ( test_bit(EDF_DONEFPUINIT, &ed->flags) )
     5.9          c->flags |= VGCF_I387_VALID;
    5.10      if ( KERNEL_MODE(ed, &ed->arch.guest_context.user_regs) )
    5.11          c->flags |= VGCF_IN_KERNEL;
     6.1 --- a/xen/arch/x86/domain.c	Sun May 08 11:12:14 2005 +0000
     6.2 +++ b/xen/arch/x86/domain.c	Sun May 08 12:06:10 2005 +0000
     6.3 @@ -252,7 +252,7 @@ void arch_do_createdomain(struct exec_do
     6.4  
     6.5          d->shared_info = (void *)alloc_xenheap_page();
     6.6          memset(d->shared_info, 0, PAGE_SIZE);
     6.7 -        ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid];
     6.8 +        ed->vcpu_info = &d->shared_info->vcpu_data[ed->id];
     6.9          SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
    6.10          machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 
    6.11                                 PAGE_SHIFT] = INVALID_M2P_ENTRY;
    6.12 @@ -294,7 +294,7 @@ void arch_do_boot_vcpu(struct exec_domai
    6.13      struct domain *d = ed->domain;
    6.14      ed->arch.schedule_tail = d->exec_domain[0]->arch.schedule_tail;
    6.15      ed->arch.perdomain_ptes = 
    6.16 -        d->arch.mm_perdomain_pt + (ed->eid << PDPT_VCPU_SHIFT);
    6.17 +        d->arch.mm_perdomain_pt + (ed->id << PDPT_VCPU_SHIFT);
    6.18      ed->arch.flags = TF_kernel_mode;
    6.19  }
    6.20  
    6.21 @@ -397,9 +397,9 @@ int arch_set_info_guest(
    6.22                  return -EINVAL;
    6.23      }
    6.24  
    6.25 -    clear_bit(EDF_DONEFPUINIT, &ed->ed_flags);
    6.26 +    clear_bit(EDF_DONEFPUINIT, &ed->flags);
    6.27      if ( c->flags & VGCF_I387_VALID )
    6.28 -        set_bit(EDF_DONEFPUINIT, &ed->ed_flags);
    6.29 +        set_bit(EDF_DONEFPUINIT, &ed->flags);
    6.30  
    6.31      ed->arch.flags &= ~TF_kernel_mode;
    6.32      if ( c->flags & VGCF_IN_KERNEL )
    6.33 @@ -415,7 +415,7 @@ int arch_set_info_guest(
    6.34      if ( !IS_PRIV(d) )
    6.35          ed->arch.guest_context.user_regs.eflags &= 0xffffcfff;
    6.36  
    6.37 -    if ( test_bit(EDF_DONEINIT, &ed->ed_flags) )
    6.38 +    if ( test_bit(EDF_DONEINIT, &ed->flags) )
    6.39          return 0;
    6.40  
    6.41      if ( (rc = (int)set_fast_trap(ed, c->fast_trap_idx)) != 0 )
    6.42 @@ -426,7 +426,7 @@ int arch_set_info_guest(
    6.43      for ( i = 0; i < 8; i++ )
    6.44          (void)set_debugreg(ed, i, c->debugreg[i]);
    6.45  
    6.46 -    if ( ed->eid == 0 )
    6.47 +    if ( ed->id == 0 )
    6.48          d->vm_assist = c->vm_assist;
    6.49  
    6.50      phys_basetab = c->pt_base;
    6.51 @@ -478,7 +478,7 @@ int arch_set_info_guest(
    6.52      update_pagetables(ed);
    6.53      
    6.54      /* Don't redo final setup */
    6.55 -    set_bit(EDF_DONEINIT, &ed->ed_flags);
    6.56 +    set_bit(EDF_DONEINIT, &ed->flags);
    6.57  
    6.58      return 0;
    6.59  }
    6.60 @@ -796,7 +796,7 @@ void context_switch(struct exec_domain *
    6.61       * 'prev' (after this point, a dying domain's info structure may be freed
    6.62       * without warning). 
    6.63       */
    6.64 -    clear_bit(EDF_RUNNING, &prev->ed_flags);
    6.65 +    clear_bit(EDF_RUNNING, &prev->flags);
    6.66  
    6.67      schedule_tail(next);
    6.68      BUG();
     7.1 --- a/xen/arch/x86/domain_build.c	Sun May 08 11:12:14 2005 +0000
     7.2 +++ b/xen/arch/x86/domain_build.c	Sun May 08 12:06:10 2005 +0000
     7.3 @@ -114,7 +114,7 @@ int construct_dom0(struct domain *d,
     7.4      /* Sanity! */
     7.5      if ( d->id != 0 ) 
     7.6          BUG();
     7.7 -    if ( test_bit(DF_CONSTRUCTED, &d->d_flags) ) 
     7.8 +    if ( test_bit(DF_CONSTRUCTED, &d->flags) ) 
     7.9          BUG();
    7.10  
    7.11      memset(&dsi, 0, sizeof(struct domain_setup_info));
    7.12 @@ -540,7 +540,7 @@ int construct_dom0(struct domain *d,
    7.13      /* DOM0 gets access to everything. */
    7.14      physdev_init_dom0(d);
    7.15  
    7.16 -    set_bit(DF_CONSTRUCTED, &d->d_flags);
    7.17 +    set_bit(DF_CONSTRUCTED, &d->flags);
    7.18  
    7.19      new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start);
    7.20  
     8.1 --- a/xen/arch/x86/i387.c	Sun May 08 11:12:14 2005 +0000
     8.2 +++ b/xen/arch/x86/i387.c	Sun May 08 12:06:10 2005 +0000
     8.3 @@ -18,7 +18,7 @@ void init_fpu(void)
     8.4      __asm__ __volatile__ ( "fninit" );
     8.5      if ( cpu_has_xmm )
     8.6          load_mxcsr(0x1f80);
     8.7 -    set_bit(EDF_DONEFPUINIT, &current->ed_flags);
     8.8 +    set_bit(EDF_DONEFPUINIT, &current->flags);
     8.9  }
    8.10  
    8.11  void save_init_fpu(struct exec_domain *tsk)
    8.12 @@ -28,7 +28,7 @@ void save_init_fpu(struct exec_domain *t
    8.13       * This causes us to set the real flag, so we'll need
    8.14       * to temporarily clear it while saving f-p state.
    8.15       */
    8.16 -    if ( test_bit(EDF_GUEST_STTS, &tsk->ed_flags) )
    8.17 +    if ( test_bit(EDF_GUEST_STTS, &tsk->flags) )
    8.18          clts();
    8.19  
    8.20      if ( cpu_has_fxsr )
    8.21 @@ -40,7 +40,7 @@ void save_init_fpu(struct exec_domain *t
    8.22              "fnsave %0 ; fwait"
    8.23              : "=m" (tsk->arch.guest_context.fpu_ctxt) );
    8.24  
    8.25 -    clear_bit(EDF_USEDFPU, &tsk->ed_flags);
    8.26 +    clear_bit(EDF_USEDFPU, &tsk->flags);
    8.27      stts();
    8.28  }
    8.29  
     9.1 --- a/xen/arch/x86/idle0_task.c	Sun May 08 11:12:14 2005 +0000
     9.2 +++ b/xen/arch/x86/idle0_task.c	Sun May 08 12:06:10 2005 +0000
     9.3 @@ -5,7 +5,7 @@
     9.4  
     9.5  struct domain idle0_domain = {
     9.6      id:          IDLE_DOMAIN_ID,
     9.7 -    d_flags:     1<<DF_IDLETASK,
     9.8 +    flags:       1<<DF_IDLETASK,
     9.9      refcnt:      ATOMIC_INIT(1)
    9.10  };
    9.11  
    10.1 --- a/xen/arch/x86/mm.c	Sun May 08 11:12:14 2005 +0000
    10.2 +++ b/xen/arch/x86/mm.c	Sun May 08 12:06:10 2005 +0000
    10.3 @@ -1142,7 +1142,7 @@ void put_page_type(struct pfn_info *page
    10.4           * See domain.c:relinquish_list().
    10.5           */
    10.6          ASSERT((x & PGT_validated) || 
    10.7 -               test_bit(DF_DYING, &page_get_owner(page)->d_flags));
    10.8 +               test_bit(DF_DYING, &page_get_owner(page)->flags));
    10.9  
   10.10          if ( unlikely((nx & PGT_count_mask) == 0) )
   10.11          {
   10.12 @@ -1691,13 +1691,13 @@ int do_mmuext_op(
   10.13               * it is dying. 
   10.14               */
   10.15              ASSERT(e->tot_pages <= e->max_pages);
   10.16 -            if ( unlikely(test_bit(DF_DYING, &e->d_flags)) ||
   10.17 +            if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
   10.18                   unlikely(e->tot_pages == e->max_pages) ||
   10.19                   unlikely(IS_XEN_HEAP_FRAME(page)) )
   10.20              {
   10.21                  MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
   10.22                          "page is in Xen heap (%lx), or dom is dying (%ld).\n",
   10.23 -                        e->tot_pages, e->max_pages, op.mfn, e->d_flags);
   10.24 +                        e->tot_pages, e->max_pages, op.mfn, e->flags);
   10.25                  okay = 0;
   10.26                  goto reassign_fail;
   10.27              }
   10.28 @@ -2776,7 +2776,7 @@ int ptwr_do_page_fault(struct domain *d,
   10.29       * If this is a multi-processor guest then ensure that the page is hooked
   10.30       * into at most one L2 table, which must be the one running on this VCPU.
   10.31       */
   10.32 -    if ( (d->exec_domain[0]->ed_next_list != NULL) &&
   10.33 +    if ( (d->exec_domain[0]->next_in_list != NULL) &&
   10.34           ((page->u.inuse.type_info & PGT_count_mask) != 
   10.35            (!!(page->u.inuse.type_info & PGT_pinned) +
   10.36             (which == PTWR_PT_ACTIVE))) )
   10.37 @@ -2945,13 +2945,13 @@ void ptwr_destroy(struct domain *d)
   10.38           * Also, a domain mustn't have PGC_allocated pages when it is dying.
   10.39           */
   10.40          ASSERT(e->tot_pages <= e->max_pages);
   10.41 -        if ( unlikely(test_bit(DF_DYING, &e->d_flags)) ||
   10.42 +        if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
   10.43               unlikely(e->tot_pages == e->max_pages) ||
   10.44               unlikely(!gnttab_prepare_for_transfer(e, d, gntref)) )
   10.45          {
   10.46              MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
   10.47                      "provided a bad grant ref, or is dying (%p).\n",
   10.48 -                    e->tot_pages, e->max_pages, e->d_flags);
   10.49 +                    e->tot_pages, e->max_pages, e->flags);
   10.50              spin_unlock(&e->page_alloc_lock);
   10.51              put_domain(e);
   10.52              okay = 0;
    11.1 --- a/xen/arch/x86/physdev.c	Sun May 08 11:12:14 2005 +0000
    11.2 +++ b/xen/arch/x86/physdev.c	Sun May 08 12:06:10 2005 +0000
    11.3 @@ -130,7 +130,7 @@ void physdev_init_dom0(struct domain *d)
    11.4      BUG_ON(d->arch.iobmp_mask == NULL);
    11.5      memset(d->arch.iobmp_mask, 0, IOBMP_BYTES);
    11.6  
    11.7 -    set_bit(DF_PHYSDEV, &d->d_flags);
    11.8 +    set_bit(DF_PHYSDEV, &d->flags);
    11.9  }
   11.10  
   11.11  
    12.1 --- a/xen/arch/x86/setup.c	Sun May 08 11:12:14 2005 +0000
    12.2 +++ b/xen/arch/x86/setup.c	Sun May 08 12:06:10 2005 +0000
    12.3 @@ -581,7 +581,7 @@ void __init __start_xen(multiboot_info_t
    12.4      if ( dom0 == NULL )
    12.5          panic("Error creating domain 0\n");
    12.6  
    12.7 -    set_bit(DF_PRIVILEGED, &dom0->d_flags);
    12.8 +    set_bit(DF_PRIVILEGED, &dom0->flags);
    12.9  
   12.10      /* Grab the DOM0 command line. Skip past the image name. */
   12.11      cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
    13.1 --- a/xen/arch/x86/shadow.c	Sun May 08 11:12:14 2005 +0000
    13.2 +++ b/xen/arch/x86/shadow.c	Sun May 08 12:06:10 2005 +0000
    13.3 @@ -1120,7 +1120,7 @@ void __shadow_mode_disable(struct domain
    13.4       * Currently this does not fix up page ref counts, so it is valid to call
    13.5       * only when a domain is being destroyed.
    13.6       */
    13.7 -    BUG_ON(!test_bit(DF_DYING, &d->d_flags));
    13.8 +    BUG_ON(!test_bit(DF_DYING, &d->flags));
    13.9      d->arch.shadow_tainted_refcnts = 1;
   13.10  
   13.11      free_shadow_pages(d);
    14.1 --- a/xen/arch/x86/smpboot.c	Sun May 08 11:12:14 2005 +0000
    14.2 +++ b/xen/arch/x86/smpboot.c	Sun May 08 12:06:10 2005 +0000
    14.3 @@ -658,7 +658,7 @@ static void __init do_boot_cpu (int apic
    14.4  
    14.5      ed = idle->exec_domain[0];
    14.6  
    14.7 -    set_bit(DF_IDLETASK, &idle->d_flags);
    14.8 +    set_bit(DF_IDLETASK, &idle->flags);
    14.9  
   14.10      ed->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
   14.11  
    15.1 --- a/xen/arch/x86/traps.c	Sun May 08 11:12:14 2005 +0000
    15.2 +++ b/xen/arch/x86/traps.c	Sun May 08 12:06:10 2005 +0000
    15.3 @@ -361,13 +361,13 @@ long do_fpu_taskswitch(int set)
    15.4  
    15.5      if ( set )
    15.6      {
    15.7 -        set_bit(EDF_GUEST_STTS, &ed->ed_flags);
    15.8 +        set_bit(EDF_GUEST_STTS, &ed->flags);
    15.9          stts();
   15.10      }
   15.11      else
   15.12      {
   15.13 -        clear_bit(EDF_GUEST_STTS, &ed->ed_flags);
   15.14 -        if ( test_bit(EDF_USEDFPU, &ed->ed_flags) )
   15.15 +        clear_bit(EDF_GUEST_STTS, &ed->flags);
   15.16 +        if ( test_bit(EDF_USEDFPU, &ed->flags) )
   15.17              clts();
   15.18      }
   15.19  
   15.20 @@ -665,7 +665,7 @@ static int emulate_privileged_op(struct 
   15.21          case 0: /* Read CR0 */
   15.22              *reg = 
   15.23                  (read_cr0() & ~X86_CR0_TS) | 
   15.24 -                (test_bit(EDF_GUEST_STTS, &ed->ed_flags) ? X86_CR0_TS : 0);
   15.25 +                (test_bit(EDF_GUEST_STTS, &ed->flags) ? X86_CR0_TS : 0);
   15.26              break;
   15.27  
   15.28          case 2: /* Read CR2 */
   15.29 @@ -919,15 +919,15 @@ asmlinkage int math_state_restore(struct
   15.30      /* Prevent recursion. */
   15.31      clts();
   15.32  
   15.33 -    if ( !test_and_set_bit(EDF_USEDFPU, &current->ed_flags) )
   15.34 +    if ( !test_and_set_bit(EDF_USEDFPU, &current->flags) )
   15.35      {
   15.36 -        if ( test_bit(EDF_DONEFPUINIT, &current->ed_flags) )
   15.37 +        if ( test_bit(EDF_DONEFPUINIT, &current->flags) )
   15.38              restore_fpu(current);
   15.39          else
   15.40              init_fpu();
   15.41      }
   15.42  
   15.43 -    if ( test_and_clear_bit(EDF_GUEST_STTS, &current->ed_flags) )
   15.44 +    if ( test_and_clear_bit(EDF_GUEST_STTS, &current->flags) )
   15.45      {
   15.46          struct trap_bounce *tb = &current->arch.trap_bounce;
   15.47          tb->flags = TBF_EXCEPTION;
    16.1 --- a/xen/common/dom0_ops.c	Sun May 08 11:12:14 2005 +0000
    16.2 +++ b/xen/common/dom0_ops.c	Sun May 08 12:06:10 2005 +0000
    16.3 @@ -139,7 +139,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    16.4          {
    16.5              ret = -EINVAL;
    16.6              if ( (d != current->domain) && 
    16.7 -                 test_bit(DF_CONSTRUCTED, &d->d_flags) )
    16.8 +                 test_bit(DF_CONSTRUCTED, &d->flags) )
    16.9              {
   16.10                  domain_unpause_by_systemcontroller(d);
   16.11                  ret = 0;
   16.12 @@ -246,14 +246,14 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   16.13  
   16.14          if ( cpu == -1 )
   16.15          {
   16.16 -            clear_bit(EDF_CPUPINNED, &ed->ed_flags);
   16.17 +            clear_bit(EDF_CPUPINNED, &ed->flags);
   16.18          }
   16.19          else
   16.20          {
   16.21              exec_domain_pause(ed);
   16.22              if ( ed->processor != (cpu % smp_num_cpus) )
   16.23 -                set_bit(EDF_MIGRATED, &ed->ed_flags);
   16.24 -            set_bit(EDF_CPUPINNED, &ed->ed_flags);
   16.25 +                set_bit(EDF_MIGRATED, &ed->flags);
   16.26 +            set_bit(EDF_CPUPINNED, &ed->flags);
   16.27              ed->processor = cpu % smp_num_cpus;
   16.28              exec_domain_unpause(ed);
   16.29          }
   16.30 @@ -311,12 +311,12 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   16.31          ed = d->exec_domain[op->u.getdomaininfo.exec_domain];
   16.32  
   16.33          op->u.getdomaininfo.flags =
   16.34 -            (test_bit( DF_DYING,      &d->d_flags)  ? DOMFLAGS_DYING    : 0) |
   16.35 -            (test_bit( DF_CRASHED,    &d->d_flags)  ? DOMFLAGS_CRASHED  : 0) |
   16.36 -            (test_bit( DF_SHUTDOWN,   &d->d_flags)  ? DOMFLAGS_SHUTDOWN : 0) |
   16.37 -            (test_bit(EDF_CTRLPAUSE, &ed->ed_flags) ? DOMFLAGS_PAUSED   : 0) |
   16.38 -            (test_bit(EDF_BLOCKED,   &ed->ed_flags) ? DOMFLAGS_BLOCKED  : 0) |
   16.39 -            (test_bit(EDF_RUNNING,   &ed->ed_flags) ? DOMFLAGS_RUNNING  : 0);
   16.40 +            (test_bit( DF_DYING,      &d->flags)  ? DOMFLAGS_DYING    : 0) |
   16.41 +            (test_bit( DF_CRASHED,    &d->flags)  ? DOMFLAGS_CRASHED  : 0) |
   16.42 +            (test_bit( DF_SHUTDOWN,   &d->flags)  ? DOMFLAGS_SHUTDOWN : 0) |
   16.43 +            (test_bit(EDF_CTRLPAUSE, &ed->flags) ? DOMFLAGS_PAUSED   : 0) |
   16.44 +            (test_bit(EDF_BLOCKED,   &ed->flags) ? DOMFLAGS_BLOCKED  : 0) |
   16.45 +            (test_bit(EDF_RUNNING,   &ed->flags) ? DOMFLAGS_RUNNING  : 0);
   16.46  
   16.47          op->u.getdomaininfo.flags |= ed->processor << DOMFLAGS_CPUSHIFT;
   16.48          op->u.getdomaininfo.flags |= 
    17.1 --- a/xen/common/domain.c	Sun May 08 11:12:14 2005 +0000
    17.2 +++ b/xen/common/domain.c	Sun May 08 12:06:10 2005 +0000
    17.3 @@ -66,12 +66,12 @@ struct domain *do_createdomain(domid_t d
    17.4      {
    17.5          write_lock(&domlist_lock);
    17.6          pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
    17.7 -        for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_list )
    17.8 +        for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
    17.9              if ( (*pd)->id > d->id )
   17.10                  break;
   17.11 -        d->next_list = *pd;
   17.12 +        d->next_in_list = *pd;
   17.13          *pd = d;
   17.14 -        d->next_hash = domain_hash[DOMAIN_HASH(dom_id)];
   17.15 +        d->next_in_hashbucket = domain_hash[DOMAIN_HASH(dom_id)];
   17.16          domain_hash[DOMAIN_HASH(dom_id)] = d;
   17.17          write_unlock(&domlist_lock);
   17.18      }
   17.19 @@ -94,7 +94,7 @@ struct domain *find_domain_by_id(domid_t
   17.20                  d = NULL;
   17.21              break;
   17.22          }
   17.23 -        d = d->next_hash;
   17.24 +        d = d->next_in_hashbucket;
   17.25      }
   17.26      read_unlock(&domlist_lock);
   17.27  
   17.28 @@ -107,7 +107,7 @@ void domain_kill(struct domain *d)
   17.29      struct exec_domain *ed;
   17.30  
   17.31      domain_pause(d);
   17.32 -    if ( !test_and_set_bit(DF_DYING, &d->d_flags) )
   17.33 +    if ( !test_and_set_bit(DF_DYING, &d->flags) )
   17.34      {
   17.35          for_each_exec_domain(d, ed)
   17.36              sched_rem_domain(ed);
   17.37 @@ -124,7 +124,7 @@ void domain_crash(void)
   17.38      if ( d->id == 0 )
   17.39          BUG();
   17.40  
   17.41 -    set_bit(DF_CRASHED, &d->d_flags);
   17.42 +    set_bit(DF_CRASHED, &d->flags);
   17.43  
   17.44      send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
   17.45  
   17.46 @@ -164,9 +164,9 @@ void domain_shutdown(u8 reason)
   17.47      }
   17.48  
   17.49      if ( (d->shutdown_code = reason) == SHUTDOWN_crash )
   17.50 -        set_bit(DF_CRASHED, &d->d_flags);
   17.51 +        set_bit(DF_CRASHED, &d->flags);
   17.52      else
   17.53 -        set_bit(DF_SHUTDOWN, &d->d_flags);
   17.54 +        set_bit(DF_SHUTDOWN, &d->flags);
   17.55  
   17.56      send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
   17.57  
   17.58 @@ -180,7 +180,7 @@ void domain_destruct(struct domain *d)
   17.59      struct domain **pd;
   17.60      atomic_t      old, new;
   17.61  
   17.62 -    if ( !test_bit(DF_DYING, &d->d_flags) )
   17.63 +    if ( !test_bit(DF_DYING, &d->flags) )
   17.64          BUG();
   17.65  
   17.66      /* May be already destructed, or get_domain() can race us. */
   17.67 @@ -194,12 +194,12 @@ void domain_destruct(struct domain *d)
   17.68      write_lock(&domlist_lock);
   17.69      pd = &domain_list;
   17.70      while ( *pd != d ) 
   17.71 -        pd = &(*pd)->next_list;
   17.72 -    *pd = d->next_list;
   17.73 +        pd = &(*pd)->next_in_list;
   17.74 +    *pd = d->next_in_list;
   17.75      pd = &domain_hash[DOMAIN_HASH(d->id)];
   17.76      while ( *pd != d ) 
   17.77 -        pd = &(*pd)->next_hash;
   17.78 -    *pd = d->next_hash;
   17.79 +        pd = &(*pd)->next_in_hashbucket;
   17.80 +    *pd = d->next_in_hashbucket;
   17.81      write_unlock(&domlist_lock);
   17.82  
   17.83      destroy_event_channels(d);
   17.84 @@ -227,8 +227,8 @@ int set_info_guest(struct domain *p, dom
   17.85      if ( (vcpu >= MAX_VIRT_CPUS) || ((ed = p->exec_domain[vcpu]) == NULL) )
   17.86          return -EINVAL;
   17.87      
   17.88 -    if (test_bit(DF_CONSTRUCTED, &p->d_flags) && 
   17.89 -        !test_bit(EDF_CTRLPAUSE, &ed->ed_flags))
   17.90 +    if (test_bit(DF_CONSTRUCTED, &p->flags) && 
   17.91 +        !test_bit(EDF_CTRLPAUSE, &ed->flags))
   17.92          return -EINVAL;
   17.93  
   17.94      if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
   17.95 @@ -243,7 +243,7 @@ int set_info_guest(struct domain *p, dom
   17.96      if ( (rc = arch_set_info_guest(ed, c)) != 0 )
   17.97          goto out;
   17.98  
   17.99 -    set_bit(DF_CONSTRUCTED, &p->d_flags);
  17.100 +    set_bit(DF_CONSTRUCTED, &p->flags);
  17.101  
  17.102   out:    
  17.103      xfree(c);
  17.104 @@ -294,7 +294,7 @@ long do_boot_vcpu(unsigned long vcpu, st
  17.105      sched_add_domain(ed);
  17.106  
  17.107      /* domain_unpause_by_systemcontroller */
  17.108 -    if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
  17.109 +    if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->flags) )
  17.110          domain_wake(ed);
  17.111  
  17.112      xfree(c);
    18.1 --- a/xen/common/event_channel.c	Sun May 08 11:12:14 2005 +0000
    18.2 +++ b/xen/common/event_channel.c	Sun May 08 12:06:10 2005 +0000
    18.3 @@ -40,7 +40,7 @@ static int get_free_port(struct exec_dom
    18.4      max = d->max_event_channel;
    18.5      chn = d->event_channel;
    18.6  
    18.7 -    for ( port = ed->eid * EVENT_CHANNELS_SPREAD; port < max; port++ )
    18.8 +    for ( port = ed->id * EVENT_CHANNELS_SPREAD; port < max; port++ )
    18.9          if ( chn[port].state == ECS_FREE )
   18.10              break;
   18.11  
    19.1 --- a/xen/common/keyhandler.c	Sun May 08 11:12:14 2005 +0000
    19.2 +++ b/xen/common/keyhandler.c	Sun May 08 12:06:10 2005 +0000
    19.3 @@ -109,7 +109,7 @@ static void do_task_queues(unsigned char
    19.4      for_each_domain ( d )
    19.5      {
    19.6          printk("Xen: DOM %u, flags=%lx refcnt=%d nr_pages=%d "
    19.7 -               "xenheap_pages=%d\n", d->id, d->d_flags,
    19.8 +               "xenheap_pages=%d\n", d->id, d->flags,
    19.9                 atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
   19.10  
   19.11          dump_pageframe_info(d);
   19.12 @@ -118,11 +118,11 @@ static void do_task_queues(unsigned char
   19.13              printk("Guest: %p CPU %d [has=%c] flags=%lx "
   19.14                     "upcall_pend = %02x, upcall_mask = %02x\n", ed,
   19.15                     ed->processor,
   19.16 -                   test_bit(EDF_RUNNING, &ed->ed_flags) ? 'T':'F',
   19.17 -                   ed->ed_flags,
   19.18 +                   test_bit(EDF_RUNNING, &ed->flags) ? 'T':'F',
   19.19 +                   ed->flags,
   19.20                     ed->vcpu_info->evtchn_upcall_pending, 
   19.21                     ed->vcpu_info->evtchn_upcall_mask);
   19.22 -            printk("Notifying guest... %d/%d\n", d->id, ed->eid); 
   19.23 +            printk("Notifying guest... %d/%d\n", d->id, ed->id); 
   19.24              printk("port %d/%d stat %d %d %d\n",
   19.25                     VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
   19.26                     test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], 
    20.1 --- a/xen/common/page_alloc.c	Sun May 08 11:12:14 2005 +0000
    20.2 +++ b/xen/common/page_alloc.c	Sun May 08 12:06:10 2005 +0000
    20.3 @@ -503,13 +503,13 @@ struct pfn_info *alloc_domheap_pages(str
    20.4  
    20.5      spin_lock(&d->page_alloc_lock);
    20.6  
    20.7 -    if ( unlikely(test_bit(DF_DYING, &d->d_flags)) ||
    20.8 +    if ( unlikely(test_bit(DF_DYING, &d->flags)) ||
    20.9           unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
   20.10      {
   20.11          DPRINTK("Over-allocation for domain %u: %u > %u\n",
   20.12                  d->id, d->tot_pages + (1 << order), d->max_pages);
   20.13          DPRINTK("...or the domain is dying (%d)\n", 
   20.14 -                !!test_bit(DF_DYING, &d->d_flags));
   20.15 +                !!test_bit(DF_DYING, &d->flags));
   20.16          spin_unlock(&d->page_alloc_lock);
   20.17          free_heap_pages(MEMZONE_DOM, pg, order);
   20.18          return NULL;
   20.19 @@ -574,7 +574,7 @@ void free_domheap_pages(struct pfn_info 
   20.20  
   20.21          spin_unlock_recursive(&d->page_alloc_lock);
   20.22  
   20.23 -        if ( likely(!test_bit(DF_DYING, &d->d_flags)) )
   20.24 +        if ( likely(!test_bit(DF_DYING, &d->flags)) )
   20.25          {
   20.26              free_heap_pages(MEMZONE_DOM, pg, order);
   20.27          }
    21.1 --- a/xen/common/sched_bvt.c	Sun May 08 11:12:14 2005 +0000
    21.2 +++ b/xen/common/sched_bvt.c	Sun May 08 12:06:10 2005 +0000
    21.3 @@ -59,7 +59,7 @@ struct bvt_cpu_info
    21.4  };
    21.5  
    21.6  #define BVT_INFO(p)   ((struct bvt_dom_info *)(p)->sched_priv)
    21.7 -#define EBVT_INFO(p)  ((struct bvt_edom_info *)(p)->ed_sched_priv)
    21.8 +#define EBVT_INFO(p)  ((struct bvt_edom_info *)(p)->sched_priv)
    21.9  #define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
   21.10  #define RUNLIST(p)    ((struct list_head *)&(EBVT_INFO(p)->run_list))
   21.11  #define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
   21.12 @@ -174,9 +174,9 @@ static int bvt_alloc_task(struct exec_do
   21.13              return -1;
   21.14          memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
   21.15      }
   21.16 -    ed->ed_sched_priv = &BVT_INFO(d)->ed_inf[ed->eid];
   21.17 -    BVT_INFO(d)->ed_inf[ed->eid].inf = BVT_INFO(d);
   21.18 -    BVT_INFO(d)->ed_inf[ed->eid].exec_domain = ed;
   21.19 +    ed->sched_priv = &BVT_INFO(d)->ed_inf[ed->id];
   21.20 +    BVT_INFO(d)->ed_inf[ed->id].inf = BVT_INFO(d);
   21.21 +    BVT_INFO(d)->ed_inf[ed->id].exec_domain = ed;
   21.22      return 0;
   21.23  }
   21.24  
   21.25 @@ -190,7 +190,7 @@ static void bvt_add_task(struct exec_dom
   21.26      ASSERT(inf != NULL);
   21.27      ASSERT(d   != NULL);
   21.28  
   21.29 -    if (d->eid == 0) {
   21.30 +    if (d->id == 0) {
   21.31          inf->mcu_advance = MCU_ADVANCE;
   21.32          inf->domain      = d->domain;
   21.33          inf->warpback    = 0;
   21.34 @@ -224,43 +224,43 @@ static void bvt_add_task(struct exec_dom
   21.35      }
   21.36  }
   21.37  
   21.38 -static int bvt_init_idle_task(struct exec_domain *p)
   21.39 +static int bvt_init_idle_task(struct exec_domain *ed)
   21.40  {
   21.41 -    if ( bvt_alloc_task(p) < 0 )
   21.42 +    if ( bvt_alloc_task(ed) < 0 )
   21.43          return -1;
   21.44  
   21.45 -    bvt_add_task(p);
   21.46 +    bvt_add_task(ed);
   21.47  
   21.48 -    set_bit(EDF_RUNNING, &p->ed_flags);
   21.49 -    if ( !__task_on_runqueue(p) )
   21.50 -        __add_to_runqueue_head(p);
   21.51 -        
   21.52 +    set_bit(EDF_RUNNING, &ed->flags);
   21.53 +    if ( !__task_on_runqueue(ed) )
   21.54 +        __add_to_runqueue_head(ed);
   21.55 +
   21.56      return 0;
   21.57  }
   21.58  
   21.59 -static void bvt_wake(struct exec_domain *d)
   21.60 +static void bvt_wake(struct exec_domain *ed)
   21.61  {
   21.62 -    struct bvt_edom_info *einf = EBVT_INFO(d);
   21.63 +    struct bvt_edom_info *einf = EBVT_INFO(ed);
   21.64      struct exec_domain  *curr;
   21.65      s_time_t            now, r_time;
   21.66 -    int                 cpu = d->processor;
   21.67 +    int                 cpu = ed->processor;
   21.68      u32                 curr_evt;
   21.69  
   21.70 -    if ( unlikely(__task_on_runqueue(d)) )
   21.71 +    if ( unlikely(__task_on_runqueue(ed)) )
   21.72          return;
   21.73  
   21.74 -    __add_to_runqueue_head(d);
   21.75 +    __add_to_runqueue_head(ed);
   21.76  
   21.77      now = NOW();
   21.78  
   21.79      /* Set the BVT parameters. AVT should always be updated 
   21.80         if CPU migration ocurred.*/
   21.81      if ( einf->avt < CPU_SVT(cpu) || 
   21.82 -         unlikely(test_bit(EDF_MIGRATED, &d->ed_flags)) )
   21.83 +         unlikely(test_bit(EDF_MIGRATED, &ed->flags)) )
   21.84          einf->avt = CPU_SVT(cpu);
   21.85  
   21.86      /* Deal with warping here. */
   21.87 -    einf->evt = calc_evt(d, einf->avt);
   21.88 +    einf->evt = calc_evt(ed, einf->avt);
   21.89      
   21.90      curr = schedule_data[cpu].curr;
   21.91      curr_evt = calc_evt(curr, calc_avt(curr, now));
   21.92 @@ -277,12 +277,12 @@ static void bvt_wake(struct exec_domain 
   21.93  }
   21.94  
   21.95  
   21.96 -static void bvt_sleep(struct exec_domain *d)
   21.97 +static void bvt_sleep(struct exec_domain *ed)
   21.98  {
   21.99 -    if ( test_bit(EDF_RUNNING, &d->ed_flags) )
  21.100 -        cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
  21.101 -    else  if ( __task_on_runqueue(d) )
  21.102 -        __del_from_runqueue(d);
  21.103 +    if ( test_bit(EDF_RUNNING, &ed->flags) )
  21.104 +        cpu_raise_softirq(ed->processor, SCHEDULE_SOFTIRQ);
  21.105 +    else  if ( __task_on_runqueue(ed) )
  21.106 +        __del_from_runqueue(ed);
  21.107  }
  21.108  
  21.109  /**
  21.110 @@ -377,7 +377,7 @@ static struct task_slice bvt_do_schedule
  21.111      struct bvt_edom_info *next_prime_einf = NULL;
  21.112      struct task_slice     ret;
  21.113  
  21.114 -    ASSERT(prev->ed_sched_priv != NULL);
  21.115 +    ASSERT(prev->sched_priv != NULL);
  21.116      ASSERT(prev_einf != NULL);
  21.117      ASSERT(__task_on_runqueue(prev));
  21.118  
  21.119 @@ -526,8 +526,8 @@ static void bvt_dump_cpu_state(int i)
  21.120  {
  21.121      struct list_head *queue;
  21.122      int loop = 0;
  21.123 -    struct bvt_edom_info *d_inf;
  21.124 -    struct exec_domain *d;
  21.125 +    struct bvt_edom_info *ed_inf;
  21.126 +    struct exec_domain *ed;
  21.127      
  21.128      printk("svt=0x%08lX ", CPU_SVT(i));
  21.129  
  21.130 @@ -535,15 +535,15 @@ static void bvt_dump_cpu_state(int i)
  21.131      printk("QUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
  21.132             (unsigned long) queue->next, (unsigned long) queue->prev);
  21.133  
  21.134 -    list_for_each_entry ( d_inf, queue, run_list )
  21.135 +    list_for_each_entry ( ed_inf, queue, run_list )
  21.136      {
  21.137 -        d = d_inf->exec_domain;
  21.138 -        printk("%3d: %u has=%c ", loop++, d->domain->id,
  21.139 -               test_bit(EDF_RUNNING, &d->ed_flags) ? 'T':'F');
  21.140 -        bvt_dump_runq_el(d);
  21.141 -        printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
  21.142 +        ed = ed_inf->exec_domain;
  21.143 +        printk("%3d: %u has=%c ", loop++, ed->domain->id,
  21.144 +               test_bit(EDF_RUNNING, &ed->flags) ? 'T':'F');
  21.145 +        bvt_dump_runq_el(ed);
  21.146 +        printk("c=0x%X%08X\n", (u32)(ed->cpu_time>>32), (u32)ed->cpu_time);
  21.147          printk("         l: %p n: %p  p: %p\n",
  21.148 -               &d_inf->run_list, d_inf->run_list.next, d_inf->run_list.prev);
  21.149 +               &ed_inf->run_list, ed_inf->run_list.next, ed_inf->run_list.prev);
  21.150      }
  21.151  }
  21.152  
    22.1 --- a/xen/common/schedule.c	Sun May 08 11:12:14 2005 +0000
    22.2 +++ b/xen/common/schedule.c	Sun May 08 12:06:10 2005 +0000
    22.3 @@ -93,24 +93,27 @@ struct exec_domain *alloc_exec_domain_st
    22.4  
    22.5      d->exec_domain[vcpu] = ed;
    22.6      ed->domain = d;
    22.7 -    ed->eid = vcpu;
    22.8 +    ed->id = vcpu;
    22.9  
   22.10      if ( SCHED_OP(alloc_task, ed) < 0 )
   22.11          goto out;
   22.12  
   22.13 -    if (vcpu != 0) {
   22.14 -        ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid];
   22.15 +    if ( vcpu != 0 )
   22.16 +    {
   22.17 +        ed->vcpu_info = &d->shared_info->vcpu_data[ed->id];
   22.18  
   22.19 -        for_each_exec_domain(d, edc) {
   22.20 -            if (edc->ed_next_list == NULL || edc->ed_next_list->eid > vcpu)
   22.21 +        for_each_exec_domain( d, edc )
   22.22 +        {
   22.23 +            if ( (edc->next_in_list == NULL) ||
   22.24 +                 (edc->next_in_list->id > vcpu) )
   22.25                  break;
   22.26          }
   22.27 -        ed->ed_next_list = edc->ed_next_list;
   22.28 -        edc->ed_next_list = ed;
   22.29 +        ed->next_in_list  = edc->next_in_list;
   22.30 +        edc->next_in_list = ed;
   22.31  
   22.32 -        if (test_bit(EDF_CPUPINNED, &edc->ed_flags)) {
   22.33 +        if (test_bit(EDF_CPUPINNED, &edc->flags)) {
   22.34              ed->processor = (edc->processor + 1) % smp_num_cpus;
   22.35 -            set_bit(EDF_CPUPINNED, &ed->ed_flags);
   22.36 +            set_bit(EDF_CPUPINNED, &ed->flags);
   22.37          } else {
   22.38              ed->processor = (edc->processor + 1) % smp_num_cpus;  /* XXX */
   22.39          }
   22.40 @@ -152,7 +155,7 @@ void sched_add_domain(struct exec_domain
   22.41      struct domain *d = ed->domain;
   22.42  
   22.43      /* Must be unpaused by control software to start execution. */
   22.44 -    set_bit(EDF_CTRLPAUSE, &ed->ed_flags);
   22.45 +    set_bit(EDF_CTRLPAUSE, &ed->flags);
   22.46  
   22.47      if ( d->id != IDLE_DOMAIN_ID )
   22.48      {
   22.49 @@ -168,14 +171,14 @@ void sched_add_domain(struct exec_domain
   22.50      }
   22.51  
   22.52      SCHED_OP(add_task, ed);
   22.53 -    TRACE_2D(TRC_SCHED_DOM_ADD, d->id, ed->eid);
   22.54 +    TRACE_2D(TRC_SCHED_DOM_ADD, d->id, ed->id);
   22.55  }
   22.56  
   22.57  void sched_rem_domain(struct exec_domain *ed) 
   22.58  {
   22.59      rem_ac_timer(&ed->timer);
   22.60      SCHED_OP(rem_task, ed);
   22.61 -    TRACE_2D(TRC_SCHED_DOM_REM, ed->domain->id, ed->eid);
   22.62 +    TRACE_2D(TRC_SCHED_DOM_REM, ed->domain->id, ed->id);
   22.63  }
   22.64  
   22.65  void init_idle_task(void)
   22.66 @@ -193,10 +196,10 @@ void domain_sleep(struct exec_domain *ed
   22.67          SCHED_OP(sleep, ed);
   22.68      spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
   22.69  
   22.70 -    TRACE_2D(TRC_SCHED_SLEEP, ed->domain->id, ed->eid);
   22.71 +    TRACE_2D(TRC_SCHED_SLEEP, ed->domain->id, ed->id);
   22.72   
   22.73      /* Synchronous. */
   22.74 -    while ( test_bit(EDF_RUNNING, &ed->ed_flags) && !domain_runnable(ed) )
   22.75 +    while ( test_bit(EDF_RUNNING, &ed->flags) && !domain_runnable(ed) )
   22.76          cpu_relax();
   22.77  }
   22.78  
   22.79 @@ -212,10 +215,10 @@ void domain_wake(struct exec_domain *ed)
   22.80          ed->wokenup = NOW();
   22.81  #endif
   22.82      }
   22.83 -    clear_bit(EDF_MIGRATED, &ed->ed_flags);
   22.84 +    clear_bit(EDF_MIGRATED, &ed->flags);
   22.85      spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
   22.86  
   22.87 -    TRACE_2D(TRC_SCHED_WAKE, ed->domain->id, ed->eid);
   22.88 +    TRACE_2D(TRC_SCHED_WAKE, ed->domain->id, ed->id);
   22.89  }
   22.90  
   22.91  /* Block the currently-executing domain until a pertinent event occurs. */
   22.92 @@ -224,16 +227,16 @@ long do_block(void)
   22.93      struct exec_domain *ed = current;
   22.94  
   22.95      ed->vcpu_info->evtchn_upcall_mask = 0;
   22.96 -    set_bit(EDF_BLOCKED, &ed->ed_flags);
   22.97 +    set_bit(EDF_BLOCKED, &ed->flags);
   22.98  
   22.99      /* Check for events /after/ blocking: avoids wakeup waiting race. */
  22.100      if ( event_pending(ed) )
  22.101      {
  22.102 -        clear_bit(EDF_BLOCKED, &ed->ed_flags);
  22.103 +        clear_bit(EDF_BLOCKED, &ed->flags);
  22.104      }
  22.105      else
  22.106      {
  22.107 -        TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->eid);
  22.108 +        TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->id);
  22.109          __enter_scheduler();
  22.110      }
  22.111  
  22.112 @@ -243,7 +246,7 @@ long do_block(void)
  22.113  /* Voluntarily yield the processor for this allocation. */
  22.114  static long do_yield(void)
  22.115  {
  22.116 -    TRACE_2D(TRC_SCHED_YIELD, current->domain->id, current->eid);
  22.117 +    TRACE_2D(TRC_SCHED_YIELD, current->domain->id, current->id);
  22.118      __enter_scheduler();
  22.119      return 0;
  22.120  }
  22.121 @@ -272,7 +275,7 @@ long do_sched_op(unsigned long op)
  22.122  
  22.123      case SCHEDOP_shutdown:
  22.124      {
  22.125 -        TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->id, current->eid,
  22.126 +        TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->id, current->id,
  22.127                   (op >> SCHEDOP_reasonshift));
  22.128          domain_shutdown((u8)(op >> SCHEDOP_reasonshift));
  22.129          break;
  22.130 @@ -379,7 +382,7 @@ static void __enter_scheduler(void)
  22.131      add_ac_timer(&schedule_data[cpu].s_timer);
  22.132  
  22.133      /* Must be protected by the schedule_lock! */
  22.134 -    set_bit(EDF_RUNNING, &next->ed_flags);
  22.135 +    set_bit(EDF_RUNNING, &next->flags);
  22.136  
  22.137      spin_unlock_irq(&schedule_data[cpu].schedule_lock);
  22.138  
  22.139 @@ -417,8 +420,8 @@ static void __enter_scheduler(void)
  22.140      }
  22.141  
  22.142      TRACE_4D(TRC_SCHED_SWITCH,
  22.143 -             prev->domain->id, prev->eid,
  22.144 -             next->domain->id, next->eid);
  22.145 +             prev->domain->id, prev->id,
  22.146 +             next->domain->id, next->id);
  22.147  
  22.148      context_switch(prev, next);
  22.149  }
    23.1 --- a/xen/include/asm-x86/config.h	Sun May 08 11:12:14 2005 +0000
    23.2 +++ b/xen/include/asm-x86/config.h	Sun May 08 12:06:10 2005 +0000
    23.3 @@ -271,9 +271,9 @@ extern unsigned long _end; /* standard E
    23.4  extern unsigned long xenheap_phys_end; /* user-configurable */
    23.5  #endif
    23.6  
    23.7 -#define GDT_VIRT_START(ed)    (PERDOMAIN_VIRT_START + ((ed)->eid << PDPT_VCPU_VA_SHIFT))
    23.8 +#define GDT_VIRT_START(ed)    (PERDOMAIN_VIRT_START + ((ed)->id << PDPT_VCPU_VA_SHIFT))
    23.9  #define GDT_VIRT_END(ed)      (GDT_VIRT_START(ed) + (64*1024))
   23.10 -#define LDT_VIRT_START(ed)    (PERDOMAIN_VIRT_START + (64*1024) + ((ed)->eid << PDPT_VCPU_VA_SHIFT))
   23.11 +#define LDT_VIRT_START(ed)    (PERDOMAIN_VIRT_START + (64*1024) + ((ed)->id << PDPT_VCPU_VA_SHIFT))
   23.12  #define LDT_VIRT_END(ed)      (LDT_VIRT_START(ed) + (64*1024))
   23.13  
   23.14  #define PDPT_VCPU_SHIFT       5
    24.1 --- a/xen/include/asm-x86/debugger.h	Sun May 08 11:12:14 2005 +0000
    24.2 +++ b/xen/include/asm-x86/debugger.h	Sun May 08 12:06:10 2005 +0000
    24.3 @@ -69,7 +69,7 @@ static inline int debugger_trap_entry(
    24.4      {
    24.5      case TRAP_int3:
    24.6      case TRAP_debug:
    24.7 -        set_bit(EDF_CTRLPAUSE, &ed->ed_flags);
    24.8 +        set_bit(EDF_CTRLPAUSE, &ed->flags);
    24.9          raise_softirq(SCHEDULE_SOFTIRQ);
   24.10          return 1;
   24.11      }
    25.1 --- a/xen/include/asm-x86/i387.h	Sun May 08 11:12:14 2005 +0000
    25.2 +++ b/xen/include/asm-x86/i387.h	Sun May 08 12:06:10 2005 +0000
    25.3 @@ -19,7 +19,7 @@ extern void save_init_fpu(struct exec_do
    25.4  extern void restore_fpu(struct exec_domain *tsk);
    25.5  
    25.6  #define unlazy_fpu(_tsk) do { \
    25.7 -    if ( test_bit(EDF_USEDFPU, &(_tsk)->ed_flags) ) \
    25.8 +    if ( test_bit(EDF_USEDFPU, &(_tsk)->flags) ) \
    25.9          save_init_fpu(_tsk); \
   25.10  } while ( 0 )
   25.11  
    26.1 --- a/xen/include/xen/event.h	Sun May 08 11:12:14 2005 +0000
    26.2 +++ b/xen/include/xen/event.h	Sun May 08 12:06:10 2005 +0000
    26.3 @@ -43,7 +43,7 @@ static inline void evtchn_set_pending(st
    26.4           * NB2. We save DF_RUNNING across the unblock to avoid a needless
    26.5           * IPI for domains that we IPI'd to unblock.
    26.6           */
    26.7 -        running = test_bit(EDF_RUNNING, &ed->ed_flags);
    26.8 +        running = test_bit(EDF_RUNNING, &ed->flags);
    26.9          exec_domain_unblock(ed);
   26.10          if ( running )
   26.11              smp_send_event_check_cpu(ed->processor);
    27.1 --- a/xen/include/xen/sched.h	Sun May 08 11:12:14 2005 +0000
    27.2 +++ b/xen/include/xen/sched.h	Sun May 08 12:06:10 2005 +0000
    27.3 @@ -60,13 +60,14 @@ int  init_exec_domain_event_channels(str
    27.4  
    27.5  struct exec_domain 
    27.6  {
    27.7 -    u32 processor;
    27.8 +    int              id;
    27.9  
   27.10 -    vcpu_info_t *vcpu_info;
   27.11 +    int              processor;
   27.12  
   27.13 -    struct domain *domain;
   27.14 -    struct exec_domain *ed_next_list;
   27.15 -    int eid;
   27.16 +    vcpu_info_t     *vcpu_info;
   27.17 +
   27.18 +    struct domain   *domain;
   27.19 +    struct exec_domain *next_in_list;
   27.20  
   27.21      struct ac_timer  timer;         /* one-shot timer for timeout values */
   27.22      unsigned long    sleep_tick;    /* tick at which this vcpu started sleep */
   27.23 @@ -75,13 +76,13 @@ struct exec_domain
   27.24      s_time_t         lastdeschd;    /* time this domain was last descheduled */
   27.25      s_time_t         cpu_time;      /* total CPU time received till now */
   27.26      s_time_t         wokenup;       /* time domain got woken up */
   27.27 -    void            *ed_sched_priv;    /* scheduler-specific data */
   27.28 +    void            *sched_priv;    /* scheduler-specific data */
   27.29  
   27.30 -    unsigned long ed_flags;
   27.31 +    unsigned long    flags;
   27.32  
   27.33 -    u16 virq_to_evtchn[NR_VIRQS];
   27.34 +    u16              virq_to_evtchn[NR_VIRQS];
   27.35  
   27.36 -    atomic_t pausecnt;
   27.37 +    atomic_t         pausecnt;
   27.38  
   27.39      struct arch_exec_domain arch;
   27.40  };
   27.41 @@ -111,14 +112,15 @@ struct domain
   27.42      int              shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
   27.43      void            *sched_priv;    /* scheduler-specific data */
   27.44  
   27.45 -    struct domain *next_list, *next_hash;
   27.46 +    struct domain   *next_in_list;
   27.47 +    struct domain   *next_in_hashbucket;
   27.48  
   27.49      /* Event channel information. */
   27.50      event_channel_t *event_channel;
   27.51      unsigned int     max_event_channel;
   27.52      spinlock_t       event_channel_lock;
   27.53  
   27.54 -    grant_table_t *grant_table;
   27.55 +    grant_table_t   *grant_table;
   27.56  
   27.57      /*
   27.58       * Interrupt to event-channel mappings. Updates should be protected by the 
   27.59 @@ -126,13 +128,13 @@ struct domain
   27.60       * the lock, but races don't usually matter.
   27.61       */
   27.62  #define NR_PIRQS 128 /* Put this somewhere sane! */
   27.63 -    u16 pirq_to_evtchn[NR_PIRQS];
   27.64 -    u32 pirq_mask[NR_PIRQS/32];
   27.65 +    u16              pirq_to_evtchn[NR_PIRQS];
   27.66 +    u32              pirq_mask[NR_PIRQS/32];
   27.67  
   27.68 -    unsigned long d_flags;
   27.69 -    unsigned long vm_assist;
   27.70 +    unsigned long    flags;
   27.71 +    unsigned long    vm_assist;
   27.72  
   27.73 -    atomic_t refcnt;
   27.74 +    atomic_t         refcnt;
   27.75  
   27.76      struct exec_domain *exec_domain[MAX_VIRT_CPUS];
   27.77  
   27.78 @@ -166,7 +168,7 @@ extern struct exec_domain idle0_exec_dom
   27.79  
   27.80  extern struct exec_domain *idle_task[NR_CPUS];
   27.81  #define IDLE_DOMAIN_ID   (0x7FFFU)
   27.82 -#define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->d_flags))
   27.83 +#define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->flags))
   27.84  
   27.85  struct exec_domain *alloc_exec_domain_struct(struct domain *d,
   27.86                                               unsigned long vcpu);
   27.87 @@ -315,35 +317,36 @@ extern struct domain *domain_hash[DOMAIN
   27.88  extern struct domain *domain_list;
   27.89  
   27.90  #define for_each_domain(_d) \
   27.91 - for ( (_d) = domain_list; (_d) != NULL; (_d) = (_d)->next_list )
   27.92 + for ( (_d) = domain_list; (_d) != NULL; (_d) = (_d)->next_in_list )
   27.93  
   27.94  #define for_each_exec_domain(_d,_ed) \
   27.95   for ( (_ed) = (_d)->exec_domain[0]; \
   27.96         (_ed) != NULL;                \
   27.97 -       (_ed) = (_ed)->ed_next_list )
   27.98 +       (_ed) = (_ed)->next_in_list )
   27.99  
  27.100  #define EDF_DONEFPUINIT  0 /* Has the FPU been initialised for this task?    */
  27.101  #define EDF_USEDFPU      1 /* Has this task used the FPU since last save?    */
  27.102  #define EDF_GUEST_STTS   2 /* Has the guest OS requested 'stts'?             */
  27.103 -#define  DF_CONSTRUCTED  3 /* Has the guest OS been fully built yet?         */
  27.104 -#define  DF_IDLETASK     4 /* Is this one of the per-CPU idle domains?       */
  27.105 -#define  DF_PRIVILEGED   5 /* Is this domain privileged?                     */
  27.106 -#define  DF_PHYSDEV      6 /* May this domain do IO to physical devices?     */
  27.107 -#define EDF_BLOCKED      7 /* Domain is blocked waiting for an event.        */
  27.108 -#define EDF_CTRLPAUSE    8 /* Domain is paused by controller software.       */
  27.109 -#define  DF_SHUTDOWN     9 /* Guest shut itself down for some reason.        */
  27.110 -#define  DF_CRASHED     10 /* Domain crashed inside Xen, cannot continue.    */
  27.111 -#define  DF_DYING       11 /* Death rattle.                                  */
  27.112 -#define EDF_RUNNING     12 /* Currently running on a CPU.                    */
  27.113 -#define EDF_CPUPINNED   13 /* Disables auto-migration.                       */
  27.114 -#define EDF_MIGRATED    14 /* Domain migrated between CPUs.                  */
  27.115 -#define EDF_DONEINIT    15 /* Initialization completed    .                  */
  27.116 +#define EDF_BLOCKED      3 /* Domain is blocked waiting for an event.        */
  27.117 +#define EDF_CTRLPAUSE    4 /* Domain is paused by controller software.       */
  27.118 +#define EDF_RUNNING      5 /* Currently running on a CPU.                    */
  27.119 +#define EDF_CPUPINNED    6 /* Disables auto-migration.                       */
  27.120 +#define EDF_MIGRATED     7 /* Domain migrated between CPUs.                  */
  27.121 +#define EDF_DONEINIT     8 /* Initialization completed    .                  */
  27.122  
  27.123 -static inline int domain_runnable(struct exec_domain *d)
  27.124 +#define DF_CONSTRUCTED   0 /* Has the guest OS been fully built yet?         */
  27.125 +#define DF_IDLETASK      1 /* Is this one of the per-CPU idle domains?       */
  27.126 +#define DF_PRIVILEGED    2 /* Is this domain privileged?                     */
  27.127 +#define DF_PHYSDEV       3 /* May this domain do IO to physical devices?     */
  27.128 +#define DF_SHUTDOWN      4 /* Guest shut itself down for some reason.        */
  27.129 +#define DF_CRASHED       5 /* Domain crashed inside Xen, cannot continue.    */
  27.130 +#define DF_DYING         6 /* Death rattle.                                  */
  27.131 +
  27.132 +static inline int domain_runnable(struct exec_domain *ed)
  27.133  {
  27.134 -    return ( (atomic_read(&d->pausecnt) == 0) &&
  27.135 -             !(d->ed_flags & ((1<<EDF_BLOCKED)|(1<<EDF_CTRLPAUSE))) &&
  27.136 -             !(d->domain->d_flags & ((1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
  27.137 +    return ( (atomic_read(&ed->pausecnt) == 0) &&
  27.138 +             !(ed->flags & ((1<<EDF_BLOCKED)|(1<<EDF_CTRLPAUSE))) &&
  27.139 +             !(ed->domain->flags & ((1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
  27.140  }
  27.141  
  27.142  static inline void exec_domain_pause(struct exec_domain *ed)
  27.143 @@ -385,7 +388,7 @@ static inline void domain_unpause(struct
  27.144  
  27.145  static inline void exec_domain_unblock(struct exec_domain *ed)
  27.146  {
  27.147 -    if ( test_and_clear_bit(EDF_BLOCKED, &ed->ed_flags) )
  27.148 +    if ( test_and_clear_bit(EDF_BLOCKED, &ed->flags) )
  27.149          domain_wake(ed);
  27.150  }
  27.151  
  27.152 @@ -396,7 +399,7 @@ static inline void domain_pause_by_syste
  27.153      for_each_exec_domain ( d, ed )
  27.154      {
  27.155          ASSERT(ed != current);
  27.156 -        if ( !test_and_set_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
  27.157 +        if ( !test_and_set_bit(EDF_CTRLPAUSE, &ed->flags) )
  27.158              domain_sleep(ed);
  27.159      }
  27.160  
  27.161 @@ -409,14 +412,14 @@ static inline void domain_unpause_by_sys
  27.162  
  27.163      for_each_exec_domain ( d, ed )
  27.164      {
  27.165 -        if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
  27.166 +        if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->flags) )
  27.167              domain_wake(ed);
  27.168      }
  27.169  }
  27.170  
  27.171  
  27.172 -#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->d_flags))
  27.173 -#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->d_flags))
  27.174 +#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
  27.175 +#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
  27.176  
  27.177  #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
  27.178