ia64/xen-unstable

changeset 5289:cd10a0139388

bitkeeper revision 1.1644.1.1 (429f749dKFzVUg9NXDMVu4apHJvpNQ)

The last annoying rename:
struct exec_domain *ed -> struct vcpu *v
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Jun 02 21:05:33 2005 +0000 (2005-06-02)
parents ac04979dce3a
children e9a3f213817d
files xen/arch/ia64/asm-offsets.c xen/arch/ia64/domain.c xen/arch/ia64/hypercall.c xen/arch/ia64/idle0_task.c xen/arch/ia64/irq.c xen/arch/ia64/mmio.c xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c xen/arch/ia64/patch/linux-2.6.7/current.h xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c xen/arch/ia64/patch/linux-2.6.7/time.c xen/arch/ia64/privop.c xen/arch/ia64/process.c xen/arch/ia64/regionreg.c xen/arch/ia64/vmmu.c xen/arch/ia64/vmx_init.c xen/arch/ia64/vmx_process.c xen/arch/ia64/xenirq.c xen/arch/ia64/xenmisc.c xen/arch/ia64/xensetup.c xen/arch/ia64/xentime.c xen/arch/x86/audit.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/i387.c xen/arch/x86/idle0_task.c xen/arch/x86/irq.c xen/arch/x86/mm.c xen/arch/x86/setup.c xen/arch/x86/shadow.c xen/arch/x86/smpboot.c xen/arch/x86/time.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/vmx_intercept.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/seg_fixup.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_64/entry.S xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c xen/common/dom0_ops.c xen/common/domain.c xen/common/event_channel.c xen/common/grant_table.c xen/common/keyhandler.c xen/common/sched_bvt.c xen/common/sched_sedf.c xen/common/schedule.c xen/drivers/char/console.c xen/include/asm-ia64/config.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-x86/current.h xen/include/asm-x86/debugger.h xen/include/asm-x86/domain.h xen/include/asm-x86/i387.h xen/include/asm-x86/ldt.h xen/include/asm-x86/mm.h xen/include/asm-x86/processor.h xen/include/asm-x86/shadow.h xen/include/asm-x86/vmx.h xen/include/asm-x86/vmx_platform.h xen/include/asm-x86/vmx_virpit.h xen/include/asm-x86/vmx_vmcs.h xen/include/public/event_channel.h xen/include/xen/domain.h xen/include/xen/event.h xen/include/xen/irq.h xen/include/xen/sched-if.h xen/include/xen/sched.h xen/include/xen/time.h xen/include/xen/types.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Thu Jun 02 19:19:24 2005 +0000
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Thu Jun 02 21:05:33 2005 +0000
     1.3 @@ -13,7 +13,7 @@
     1.4  #include <asm/tlb.h>
     1.5  #endif // CONFIG_VTI
     1.6  
     1.7 -#define task_struct exec_domain
     1.8 +#define task_struct vcpu
     1.9  
    1.10  #define DEFINE(sym, val) \
    1.11          asm volatile("\n->" #sym " %0 " #val : : "i" (val))
    1.12 @@ -60,8 +60,8 @@ void foo(void)
    1.13  	//DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand));
    1.14  	//DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
    1.15  	//DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
    1.16 -	DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct exec_domain, arch._thread.ksp));
    1.17 -	DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct exec_domain, arch._thread.on_ustack));
    1.18 +	DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct vcpu, arch._thread.ksp));
    1.19 +	DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu, arch._thread.on_ustack));
    1.20  
    1.21  	BLANK();
    1.22  
    1.23 @@ -112,14 +112,14 @@ void foo(void)
    1.24  	DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct xen_regs, cr_isr));
    1.25  	DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct xen_regs, eml_unat));
    1.26  	DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct xen_regs, rfi_pfs));
    1.27 -	DEFINE(RFI_IIP_OFFSET, offsetof(struct exec_domain, arch.arch_vmx.rfi_iip));
    1.28 -	DEFINE(RFI_IPSR_OFFSET, offsetof(struct exec_domain, arch.arch_vmx.rfi_ipsr));
    1.29 -	DEFINE(RFI_IFS_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.rfi_ifs));
    1.30 -	DEFINE(RFI_PFS_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.rfi_pfs));
    1.31 -	DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr5));
    1.32 -	DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr6));
    1.33 -	DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr7));
    1.34 -	DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mpta));
    1.35 +	DEFINE(RFI_IIP_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_iip));
    1.36 +	DEFINE(RFI_IPSR_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_ipsr));
    1.37 +	DEFINE(RFI_IFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_ifs));
    1.38 +	DEFINE(RFI_PFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_pfs));
    1.39 +	DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr5));
    1.40 +	DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr6));
    1.41 +	DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr7));
    1.42 +	DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
    1.43  #endif  //CONFIG_VTI
    1.44  	DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
    1.45  	DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
    1.46 @@ -193,7 +193,7 @@ void foo(void)
    1.47  	BLANK();
    1.48  
    1.49  #ifdef  CONFIG_VTI
    1.50 -	DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct exec_domain, arch.arch_vmx.vpd));
    1.51 +	DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.vpd));
    1.52  	DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
    1.53  	DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
    1.54  
     2.1 --- a/xen/arch/ia64/domain.c	Thu Jun 02 19:19:24 2005 +0000
     2.2 +++ b/xen/arch/ia64/domain.c	Thu Jun 02 21:05:33 2005 +0000
     2.3 @@ -154,23 +154,23 @@ void startup_cpu_idle_loop(void)
     2.4  	continue_cpu_idle_loop();
     2.5  }
     2.6  
     2.7 -struct exec_domain *arch_alloc_exec_domain_struct(void)
     2.8 +struct vcpu *arch_alloc_vcpu_struct(void)
     2.9  {
    2.10 -	/* Per-vp stack is used here. So we need keep exec_domain
    2.11 +	/* Per-vp stack is used here. So we need keep vcpu
    2.12  	 * same page as per-vp stack */
    2.13  	return alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER);
    2.14  }
    2.15  
    2.16 -void arch_free_exec_domain_struct(struct exec_domain *ed)
    2.17 +void arch_free_vcpu_struct(struct vcpu *v)
    2.18  {
    2.19 -	free_xenheap_pages(ed, KERNEL_STACK_SIZE_ORDER);
    2.20 +	free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
    2.21  }
    2.22  
    2.23  #ifdef CONFIG_VTI
    2.24 -void arch_do_createdomain(struct exec_domain *ed)
    2.25 +void arch_do_createdomain(struct vcpu *v)
    2.26  {
    2.27 -	struct domain *d = ed->domain;
    2.28 -	struct thread_info *ti = alloc_thread_info(ed);
    2.29 +	struct domain *d = v->domain;
    2.30 +	struct thread_info *ti = alloc_thread_info(v);
    2.31  
    2.32  	/* If domain is VMX domain, shared info area is created
    2.33  	 * by domain and then domain notifies HV by specific hypercall.
    2.34 @@ -187,18 +187,18 @@ void arch_do_createdomain(struct exec_do
    2.35  	 * normal xen convention.
    2.36  	 */
    2.37  	d->shared_info = NULL;
    2.38 -	ed->vcpu_info = (void *)alloc_xenheap_page();
    2.39 -	if (!ed->vcpu_info) {
    2.40 +	v->vcpu_info = (void *)alloc_xenheap_page();
    2.41 +	if (!v->vcpu_info) {
    2.42     		printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
    2.43     		while (1);
    2.44  	}
    2.45 -	memset(ed->vcpu_info, 0, PAGE_SIZE);
    2.46 +	memset(v->vcpu_info, 0, PAGE_SIZE);
    2.47  
    2.48  	/* Clear thread_info to clear some important fields, like preempt_count */
    2.49  	memset(ti, 0, sizeof(struct thread_info));
    2.50  
    2.51  	/* Allocate per-domain vTLB and vhpt */
    2.52 -	ed->arch.vtlb = init_domain_tlb(ed);
    2.53 +	v->arch.vtlb = init_domain_tlb(v);
    2.54  
    2.55  	/* Physical->machine page table will be allocated when 
    2.56  	 * final setup, since we have no the maximum pfn number in 
    2.57 @@ -215,20 +215,20 @@ void arch_do_createdomain(struct exec_do
    2.58  	// stay on kernel stack because may get interrupts!
    2.59  	// ia64_ret_from_clone (which b0 gets in new_thread) switches
    2.60  	// to user stack
    2.61 -	ed->arch._thread.on_ustack = 0;
    2.62 +	v->arch._thread.on_ustack = 0;
    2.63  }
    2.64  #else // CONFIG_VTI
    2.65 -void arch_do_createdomain(struct exec_domain *ed)
    2.66 +void arch_do_createdomain(struct vcpu *v)
    2.67  {
    2.68 -	struct domain *d = ed->domain;
    2.69 +	struct domain *d = v->domain;
    2.70  
    2.71  	d->shared_info = (void *)alloc_xenheap_page();
    2.72 -	ed->vcpu_info = (void *)alloc_xenheap_page();
    2.73 -	if (!ed->vcpu_info) {
    2.74 +	v->vcpu_info = (void *)alloc_xenheap_page();
    2.75 +	if (!v->vcpu_info) {
    2.76     		printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
    2.77     		while (1);
    2.78  	}
    2.79 -	memset(ed->vcpu_info, 0, PAGE_SIZE);
    2.80 +	memset(v->vcpu_info, 0, PAGE_SIZE);
    2.81  	/* pin mapping */
    2.82  	// FIXME: Does this belong here?  Or do only at domain switch time?
    2.83  #if 0
    2.84 @@ -246,7 +246,7 @@ void arch_do_createdomain(struct exec_do
    2.85  	d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
    2.86  	if ((d->metaphysical_rid = allocate_metaphysical_rid()) == -1UL)
    2.87  		BUG();
    2.88 -	ed->vcpu_info->arch.metaphysical_mode = 1;
    2.89 +	v->vcpu_info->arch.metaphysical_mode = 1;
    2.90  #define DOMAIN_RID_BITS_DEFAULT 18
    2.91  	if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
    2.92  		BUG();
    2.93 @@ -258,22 +258,22 @@ void arch_do_createdomain(struct exec_do
    2.94  	// stay on kernel stack because may get interrupts!
    2.95  	// ia64_ret_from_clone (which b0 gets in new_thread) switches
    2.96  	// to user stack
    2.97 -	ed->arch._thread.on_ustack = 0;
    2.98 +	v->arch._thread.on_ustack = 0;
    2.99  }
   2.100  #endif // CONFIG_VTI
   2.101  
   2.102 -void arch_do_boot_vcpu(struct exec_domain *p)
   2.103 +void arch_do_boot_vcpu(struct vcpu *v)
   2.104  {
   2.105  	return;
   2.106  }
   2.107  
   2.108 -int arch_set_info_guest(struct exec_domain *p, struct vcpu_guest_context *c)
   2.109 +int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
   2.110  {
   2.111  	dummy();
   2.112  	return 1;
   2.113  }
   2.114  
   2.115 -int arch_final_setup_guest(struct exec_domain *p, struct vcpu_guest_context *c)
   2.116 +int arch_final_setup_guest(struct vcpu *v, struct vcpu_guest_context *c)
   2.117  {
   2.118  	dummy();
   2.119  	return 1;
   2.120 @@ -285,12 +285,12 @@ void domain_relinquish_resources(struct 
   2.121  }
   2.122  
   2.123  #ifdef CONFIG_VTI
   2.124 -void new_thread(struct exec_domain *ed,
   2.125 +void new_thread(struct vcpu *v,
   2.126                  unsigned long start_pc,
   2.127                  unsigned long start_stack,
   2.128                  unsigned long start_info)
   2.129  {
   2.130 -	struct domain *d = ed->domain;
   2.131 +	struct domain *d = v->domain;
   2.132  	struct switch_stack *sw;
   2.133  	struct xen_regs *regs;
   2.134  	struct ia64_boot_param *bp;
   2.135 @@ -302,12 +302,12 @@ void new_thread(struct exec_domain *ed,
   2.136  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
   2.137  	if (d == dom0) start_pc += dom0_start;
   2.138  #endif
   2.139 -	regs = (struct xen_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1;
   2.140 +	regs = (struct xen_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
   2.141  	sw = (struct switch_stack *) regs - 1;
   2.142  	/* Sanity Clear */
   2.143  	memset(sw, 0, sizeof(struct xen_regs) + sizeof(struct switch_stack));
   2.144  
   2.145 -	if (VMX_DOMAIN(ed)) {
   2.146 +	if (VMX_DOMAIN(v)) {
   2.147  		/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
   2.148  		regs->cr_ipsr = 0x501008826008; /* Need to be expanded as macro */
   2.149  	} else {
   2.150 @@ -320,42 +320,42 @@ void new_thread(struct exec_domain *ed,
   2.151  	regs->ar_rsc = 0x0;
   2.152  	regs->cr_ifs = 0x0;
   2.153  	regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
   2.154 -	sw->ar_bspstore = (unsigned long)ed + IA64_RBS_OFFSET;
   2.155 -	printf("new_thread: ed=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
   2.156 -		ed,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,&regs->r8);
   2.157 +	sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
   2.158 +	printf("new_thread: v=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
   2.159 +		v,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,&regs->r8);
   2.160  	printf("iip:0x%lx,ipsr:0x%lx\n", regs->cr_iip, regs->cr_ipsr);
   2.161  
   2.162  	sw->b0 = (unsigned long) &ia64_ret_from_clone;
   2.163 -	ed->arch._thread.ksp = (unsigned long) sw - 16;
   2.164 +	v->arch._thread.ksp = (unsigned long) sw - 16;
   2.165  	printk("new_thread, about to call init_all_rr\n");
   2.166 -	if (VMX_DOMAIN(ed)) {
   2.167 -		vmx_init_all_rr(ed);
   2.168 +	if (VMX_DOMAIN(v)) {
   2.169 +		vmx_init_all_rr(v);
   2.170  	} else
   2.171 -		init_all_rr(ed);
   2.172 +		init_all_rr(v);
   2.173  	// set up boot parameters (and fake firmware)
   2.174  	printk("new_thread, about to call dom_fw_setup\n");
   2.175 -	VMX_VPD(ed,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);  //FIXME
   2.176 +	VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);  //FIXME
   2.177  	printk("new_thread, done with dom_fw_setup\n");
   2.178  
   2.179 -	if (VMX_DOMAIN(ed)) {
   2.180 +	if (VMX_DOMAIN(v)) {
   2.181  		/* Virtual processor context setup */
   2.182 -		VMX_VPD(ed, vpsr) = IA64_PSR_BN;
   2.183 -		VPD_CR(ed, dcr) = 0;
   2.184 +		VMX_VPD(v, vpsr) = IA64_PSR_BN;
   2.185 +		VPD_CR(v, dcr) = 0;
   2.186  	} else {
   2.187  		// don't forget to set this!
   2.188 -		ed->vcpu_info->arch.banknum = 1;
   2.189 +		v->vcpu_info->arch.banknum = 1;
   2.190  	}
   2.191  }
   2.192  #else // CONFIG_VTI
   2.193  
   2.194  // heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
   2.195  // and linux/arch/ia64/kernel/process.c:kernel_thread()
   2.196 -void new_thread(struct exec_domain *ed,
   2.197 +void new_thread(struct vcpu *v,
   2.198  	            unsigned long start_pc,
   2.199  	            unsigned long start_stack,
   2.200  	            unsigned long start_info)
   2.201  {
   2.202 -	struct domain *d = ed->domain;
   2.203 +	struct domain *d = v->domain;
   2.204  	struct switch_stack *sw;
   2.205  	struct pt_regs *regs;
   2.206  	unsigned long new_rbs;
   2.207 @@ -366,10 +366,10 @@ void new_thread(struct exec_domain *ed,
   2.208  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
   2.209  	if (d == dom0) start_pc += dom0_start;
   2.210  #endif
   2.211 -	regs = (struct pt_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1;
   2.212 +	regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
   2.213  	sw = (struct switch_stack *) regs - 1;
   2.214  	memset(sw,0,sizeof(struct switch_stack)+sizeof(struct pt_regs));
   2.215 -	new_rbs = (unsigned long) ed + IA64_RBS_OFFSET;
   2.216 +	new_rbs = (unsigned long) v + IA64_RBS_OFFSET;
   2.217  	regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
   2.218  		| IA64_PSR_BITS_TO_SET | IA64_PSR_BN
   2.219  		& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
   2.220 @@ -389,20 +389,20 @@ void new_thread(struct exec_domain *ed,
   2.221  	sw->caller_unat = 0;
   2.222  	sw->ar_pfs = 0;
   2.223  	sw->ar_bspstore = new_rbs;
   2.224 -	//regs->r13 = (unsigned long) ed;
   2.225 -printf("new_thread: ed=%p, start_pc=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
   2.226 -ed,start_pc,regs,sw,new_rbs,IA64_STK_OFFSET,&regs->r8);
   2.227 +	//regs->r13 = (unsigned long) v;
   2.228 +printf("new_thread: v=%p, start_pc=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
   2.229 +v,start_pc,regs,sw,new_rbs,IA64_STK_OFFSET,&regs->r8);
   2.230  	sw->b0 = (unsigned long) &ia64_ret_from_clone;
   2.231 -	ed->arch._thread.ksp = (unsigned long) sw - 16;
   2.232 -	//ed->thread_info->flags = 0;
   2.233 +	v->arch._thread.ksp = (unsigned long) sw - 16;
   2.234 +	//v->thread_info->flags = 0;
   2.235  printk("new_thread, about to call init_all_rr\n");
   2.236 -	init_all_rr(ed);
   2.237 +	init_all_rr(v);
   2.238  	// set up boot parameters (and fake firmware)
   2.239  printk("new_thread, about to call dom_fw_setup\n");
   2.240  	regs->r28 = dom_fw_setup(d,saved_command_line,256L);  //FIXME
   2.241  printk("new_thread, done with dom_fw_setup\n");
   2.242  	// don't forget to set this!
   2.243 -	ed->vcpu_info->arch.banknum = 1;
   2.244 +	v->vcpu_info->arch.banknum = 1;
   2.245  }
   2.246  #endif // CONFIG_VTI
   2.247  
   2.248 @@ -737,9 +737,9 @@ domU_staging_write_32(unsigned long at, 
   2.249   * here.
   2.250   */
   2.251  void
   2.252 -post_arch_do_create_domain(struct exec_domain *ed, int vmx_domain)
   2.253 +post_arch_do_create_domain(struct vcpu *v, int vmx_domain)
   2.254  {
   2.255 -    struct domain *d = ed->domain;
   2.256 +    struct domain *d = v->domain;
   2.257  
   2.258      if (!vmx_domain) {
   2.259  	d->shared_info = (void*)alloc_xenheap_page();
   2.260 @@ -786,7 +786,7 @@ int construct_dom0(struct domain *d,
   2.261      unsigned long alloc_start, alloc_end;
   2.262      struct pfn_info *page = NULL;
   2.263      start_info_t *si;
   2.264 -    struct exec_domain *ed = d->exec_domain[0];
   2.265 +    struct vcpu *v = d->vcpu[0];
   2.266      struct domain_setup_info dsi;
   2.267      unsigned long p_start;
   2.268      unsigned long pkern_start;
   2.269 @@ -882,7 +882,7 @@ int construct_dom0(struct domain *d,
   2.270  	machine_to_phys_mapping[mfn] = mfn;
   2.271      }
   2.272  
   2.273 -    post_arch_do_create_domain(ed, vmx_dom0);
   2.274 +    post_arch_do_create_domain(v, vmx_dom0);
   2.275  
   2.276      /* Load Dom0 image to its own memory */
   2.277      loaddomainelfimage(d,image_start);
   2.278 @@ -898,7 +898,7 @@ int construct_dom0(struct domain *d,
   2.279      /* Physical mode emulation initialization, including
   2.280       * emulation ID allcation and related memory request
   2.281       */
   2.282 -    physical_mode_init(ed);
   2.283 +    physical_mode_init(v);
   2.284      /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
   2.285       * for dom0
   2.286       */
   2.287 @@ -916,11 +916,11 @@ int construct_dom0(struct domain *d,
   2.288  	vmx_final_setup_domain(dom0);
   2.289      
   2.290      /* vpd is ready now */
   2.291 -    vlsapic_reset(ed);
   2.292 -    vtm_init(ed);
   2.293 +    vlsapic_reset(v);
   2.294 +    vtm_init(v);
   2.295  
   2.296      set_bit(_DOMF_constructed, &d->domain_flags);
   2.297 -    new_thread(ed, pkern_entry, 0, 0);
   2.298 +    new_thread(v, pkern_entry, 0, 0);
   2.299  
   2.300      // FIXME: Hack for keyboard input
   2.301  #ifdef CLONE_DOMAIN0
   2.302 @@ -928,12 +928,12 @@ if (d == dom0)
   2.303  #endif
   2.304      serial_input_init();
   2.305      if (d == dom0) {
   2.306 -    	ed->vcpu_info->arch.delivery_mask[0] = -1L;
   2.307 -    	ed->vcpu_info->arch.delivery_mask[1] = -1L;
   2.308 -    	ed->vcpu_info->arch.delivery_mask[2] = -1L;
   2.309 -    	ed->vcpu_info->arch.delivery_mask[3] = -1L;
   2.310 +    	v->vcpu_info->arch.delivery_mask[0] = -1L;
   2.311 +    	v->vcpu_info->arch.delivery_mask[1] = -1L;
   2.312 +    	v->vcpu_info->arch.delivery_mask[2] = -1L;
   2.313 +    	v->vcpu_info->arch.delivery_mask[3] = -1L;
   2.314      }
   2.315 -    else __set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
   2.316 +    else __set_bit(0x30,v->vcpu_info->arch.delivery_mask);
   2.317  
   2.318      return 0;
   2.319  }
   2.320 @@ -953,7 +953,7 @@ int construct_dom0(struct domain *d,
   2.321  	//l1_pgentry_t *l1tab = NULL, *l1start = NULL;
   2.322  	struct pfn_info *page = NULL;
   2.323  	start_info_t *si;
   2.324 -	struct exec_domain *ed = d->exec_domain[0];
   2.325 +	struct vcpu *v = d->vcpu[0];
   2.326  
   2.327  	struct domain_setup_info dsi;
   2.328  	unsigned long p_start;
   2.329 @@ -1095,19 +1095,19 @@ int construct_dom0(struct domain *d,
   2.330  
   2.331  	set_bit(_DOMF_constructed, &d->domain_flags);
   2.332  
   2.333 -	new_thread(ed, pkern_entry, 0, 0);
   2.334 +	new_thread(v, pkern_entry, 0, 0);
   2.335  	// FIXME: Hack for keyboard input
   2.336  #ifdef CLONE_DOMAIN0
   2.337  if (d == dom0)
   2.338  #endif
   2.339  	serial_input_init();
   2.340  	if (d == dom0) {
   2.341 -		ed->vcpu_info->arch.delivery_mask[0] = -1L;
   2.342 -		ed->vcpu_info->arch.delivery_mask[1] = -1L;
   2.343 -		ed->vcpu_info->arch.delivery_mask[2] = -1L;
   2.344 -		ed->vcpu_info->arch.delivery_mask[3] = -1L;
   2.345 +		v->vcpu_info->arch.delivery_mask[0] = -1L;
   2.346 +		v->vcpu_info->arch.delivery_mask[1] = -1L;
   2.347 +		v->vcpu_info->arch.delivery_mask[2] = -1L;
   2.348 +		v->vcpu_info->arch.delivery_mask[3] = -1L;
   2.349  	}
   2.350 -	else __set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
   2.351 +	else __set_bit(0x30,v->vcpu_info->arch.delivery_mask);
   2.352  
   2.353  	return 0;
   2.354  }
   2.355 @@ -1120,7 +1120,7 @@ int construct_domU(struct domain *d,
   2.356  	           char *cmdline)
   2.357  {
   2.358  	int i, rc;
   2.359 -	struct exec_domain *ed = d->exec_domain[0];
   2.360 +	struct vcpu *v = d->vcpu[0];
   2.361  	unsigned long pkern_entry;
   2.362  
   2.363  #ifndef DOMU_AUTO_RESTART
   2.364 @@ -1161,25 +1161,25 @@ int construct_domU(struct domain *d,
   2.365  
   2.366  	printk("calling new_thread, entry=%p\n",pkern_entry);
   2.367  #ifdef DOMU_AUTO_RESTART
   2.368 -	ed->domain->arch.image_start = image_start;
   2.369 -	ed->domain->arch.image_len = image_len;
   2.370 -	ed->domain->arch.entry = pkern_entry;
   2.371 +	v->domain->arch.image_start = image_start;
   2.372 +	v->domain->arch.image_len = image_len;
   2.373 +	v->domain->arch.entry = pkern_entry;
   2.374  #endif
   2.375 -	new_thread(ed, pkern_entry, 0, 0);
   2.376 +	new_thread(v, pkern_entry, 0, 0);
   2.377  	printk("new_thread returns\n");
   2.378 -	__set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
   2.379 +	__set_bit(0x30,v->vcpu_info->arch.delivery_mask);
   2.380  
   2.381  	return 0;
   2.382  }
   2.383  
   2.384  #ifdef DOMU_AUTO_RESTART
   2.385 -void reconstruct_domU(struct exec_domain *ed)
   2.386 +void reconstruct_domU(struct vcpu *v)
   2.387  {
   2.388  	/* re-copy the OS image to reset data values to original */
   2.389  	printk("reconstruct_domU: restarting domain %d...\n",
   2.390 -		ed->domain->domain_id);
   2.391 -	loaddomainelfimage(ed->domain,ed->domain->arch.image_start);
   2.392 -	new_thread(ed, ed->domain->arch.entry, 0, 0);
   2.393 +		v->domain->domain_id);
   2.394 +	loaddomainelfimage(v->domain,v->domain->arch.image_start);
   2.395 +	new_thread(v, v->domain->arch.entry, 0, 0);
   2.396  }
   2.397  #endif
   2.398  
   2.399 @@ -1229,9 +1229,9 @@ void dummy(void)
   2.400  
   2.401  
   2.402  #if 0
   2.403 -void switch_to(struct exec_domain *prev, struct exec_domain *next)
   2.404 +void switch_to(struct vcpu *prev, struct vcpu *next)
   2.405  {
   2.406 - 	struct exec_domain *last;
   2.407 + 	struct vcpu *last;
   2.408  
   2.409  	__switch_to(prev,next,last);
   2.410  	//set_current(next);
   2.411 @@ -1240,7 +1240,7 @@ void switch_to(struct exec_domain *prev,
   2.412  
   2.413  void domain_pend_keyboard_interrupt(int irq)
   2.414  {
   2.415 -	vcpu_pend_interrupt(dom0->exec_domain[0],irq);
   2.416 +	vcpu_pend_interrupt(dom0->vcpu[0],irq);
   2.417  }
   2.418  
   2.419  /////////////////////////////////
     3.1 --- a/xen/arch/ia64/hypercall.c	Thu Jun 02 19:19:24 2005 +0000
     3.2 +++ b/xen/arch/ia64/hypercall.c	Thu Jun 02 21:05:33 2005 +0000
     3.3 @@ -24,7 +24,7 @@ void fooefi(void) {}
     3.4  int
     3.5  ia64_hypercall (struct pt_regs *regs)
     3.6  {
     3.7 -	struct exec_domain *ed = (struct domain *) current;
     3.8 +	struct vcpu *v = (struct domain *) current;
     3.9  	struct ia64_sal_retval x;
    3.10  	unsigned long *tv, *tc;
    3.11  
    3.12 @@ -38,7 +38,7 @@ ia64_hypercall (struct pt_regs *regs)
    3.13  		// to a yet-to-be-found bug where pending_interruption
    3.14  		// is zero when it shouldn't be. Since PAL is called
    3.15  		// in the idle loop, this should resolve it
    3.16 -		ed->vcpu_info->arch.pending_interruption = 1;
    3.17 +		v->vcpu_info->arch.pending_interruption = 1;
    3.18  #endif
    3.19  		x = pal_emulator_static(regs->r28);
    3.20  		if (regs->r28 == PAL_HALT_LIGHT) {
    3.21 @@ -49,10 +49,10 @@ ia64_hypercall (struct pt_regs *regs)
    3.22  		regs->r10 = x.v1; regs->r11 = x.v2;
    3.23  		break;
    3.24  	    case FW_HYPERCALL_SAL_CALL:
    3.25 -		x = sal_emulator(vcpu_get_gr(ed,32),vcpu_get_gr(ed,33),
    3.26 -			vcpu_get_gr(ed,34),vcpu_get_gr(ed,35),
    3.27 -			vcpu_get_gr(ed,36),vcpu_get_gr(ed,37),
    3.28 -			vcpu_get_gr(ed,38),vcpu_get_gr(ed,39));
    3.29 +		x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
    3.30 +			vcpu_get_gr(v,34),vcpu_get_gr(v,35),
    3.31 +			vcpu_get_gr(v,36),vcpu_get_gr(v,37),
    3.32 +			vcpu_get_gr(v,38),vcpu_get_gr(v,39));
    3.33  		regs->r8 = x.status; regs->r9 = x.v0;
    3.34  		regs->r10 = x.v1; regs->r11 = x.v2;
    3.35  		break;
    3.36 @@ -73,8 +73,8 @@ ia64_hypercall (struct pt_regs *regs)
    3.37  #endif
    3.38  		break;
    3.39  	    case FW_HYPERCALL_EFI_GET_TIME:
    3.40 -		tv = vcpu_get_gr(ed,32);
    3.41 -		tc = vcpu_get_gr(ed,33);
    3.42 +		tv = vcpu_get_gr(v,32);
    3.43 +		tc = vcpu_get_gr(v,33);
    3.44  		//printf("efi_get_time(%p,%p) called...",tv,tc);
    3.45  		tv = __va(translate_domain_mpaddr(tv));
    3.46  		if (tc) tc = __va(translate_domain_mpaddr(tc));
    3.47 @@ -99,28 +99,28 @@ ia64_hypercall (struct pt_regs *regs)
    3.48  		break;
    3.49  	    case 0xffff: // test dummy hypercall
    3.50  		regs->r8 = dump_privop_counts_to_user(
    3.51 -			vcpu_get_gr(ed,32),
    3.52 -			vcpu_get_gr(ed,33));
    3.53 +			vcpu_get_gr(v,32),
    3.54 +			vcpu_get_gr(v,33));
    3.55  		break;
    3.56  	    case 0xfffe: // test dummy hypercall
    3.57  		regs->r8 = zero_privop_counts_to_user(
    3.58 -			vcpu_get_gr(ed,32),
    3.59 -			vcpu_get_gr(ed,33));
    3.60 +			vcpu_get_gr(v,32),
    3.61 +			vcpu_get_gr(v,33));
    3.62  		break;
    3.63  	    case 0xfffd: // test dummy hypercall
    3.64  		regs->r8 = launch_domainU(
    3.65 -			vcpu_get_gr(ed,32));
    3.66 +			vcpu_get_gr(v,32));
    3.67  		break;
    3.68  	    case 0xfffc: // test dummy hypercall
    3.69  		regs->r8 = domU_staging_write_32(
    3.70 -			vcpu_get_gr(ed,32),
    3.71 -			vcpu_get_gr(ed,33),
    3.72 -			vcpu_get_gr(ed,34),
    3.73 -			vcpu_get_gr(ed,35),
    3.74 -			vcpu_get_gr(ed,36));
    3.75 +			vcpu_get_gr(v,32),
    3.76 +			vcpu_get_gr(v,33),
    3.77 +			vcpu_get_gr(v,34),
    3.78 +			vcpu_get_gr(v,35),
    3.79 +			vcpu_get_gr(v,36));
    3.80  		break;
    3.81  	    case 0xfffb: // test dummy hypercall
    3.82 -		regs->r8 = domU_staging_read_8(vcpu_get_gr(ed,32));
    3.83 +		regs->r8 = domU_staging_read_8(vcpu_get_gr(v,32));
    3.84  		break;
    3.85  	}
    3.86  	return 1;
     4.1 --- a/xen/arch/ia64/idle0_task.c	Thu Jun 02 19:19:24 2005 +0000
     4.2 +++ b/xen/arch/ia64/idle0_task.c	Thu Jun 02 21:05:33 2005 +0000
     4.3 @@ -31,7 +31,7 @@ EXPORT_SYMBOL(init_mm);
     4.4  
     4.5  struct domain idle0_domain = IDLE0_DOMAIN(idle0_domain);
     4.6  #if 0
     4.7 -struct exec_domain idle0_exec_domain = IDLE0_EXEC_DOMAIN(idle0_exec_domain,
     4.8 +struct vcpu idle0_vcpu = IDLE0_EXEC_DOMAIN(idle0_vcpu,
     4.9                                                           &idle0_domain);
    4.10  #endif
    4.11  
     5.1 --- a/xen/arch/ia64/irq.c	Thu Jun 02 19:19:24 2005 +0000
     5.2 +++ b/xen/arch/ia64/irq.c	Thu Jun 02 21:05:33 2005 +0000
     5.3 @@ -1364,7 +1364,7 @@ int pirq_guest_unmask(struct domain *d)
     5.4      return 0;
     5.5  }
     5.6  
     5.7 -int pirq_guest_bind(struct exec_domain *d, int irq, int will_share)
     5.8 +int pirq_guest_bind(struct vcpu *d, int irq, int will_share)
     5.9  {
    5.10      irq_desc_t         *desc = &irq_desc[irq];
    5.11      irq_guest_action_t *action;
     6.1 --- a/xen/arch/ia64/mmio.c	Thu Jun 02 19:19:24 2005 +0000
     6.2 +++ b/xen/arch/ia64/mmio.c	Thu Jun 02 21:05:33 2005 +0000
     6.3 @@ -211,7 +211,7 @@ static inline VCPU *lid_2_vcpu (struct d
     6.4  	LID	  lid;
     6.5  	
     6.6  	for (i=0; i<MAX_VIRT_CPUS; i++) {
     6.7 -		vcpu = d->exec_domain[i];
     6.8 +		vcpu = d->vcpu[i];
     6.9  		lid.val = VPD_CR(vcpu, lid);
    6.10  		if ( lid.id == id && lid.eid == eid ) {
    6.11  		    return vcpu;
     7.1 --- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c	Thu Jun 02 19:19:24 2005 +0000
     7.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c	Thu Jun 02 21:05:33 2005 +0000
     7.3 @@ -82,7 +82,7 @@
     7.4  +
     7.5  +		if (vector != IA64_TIMER_VECTOR) {
     7.6  +			/* FIXME: Leave IRQ re-route later */
     7.7 -+			vmx_vcpu_pend_interrupt(dom0->exec_domain[0],vector);
     7.8 ++			vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
     7.9  +			wake_dom0 = 1;
    7.10  +		}
    7.11  +		else {	// FIXME: Handle Timer only now
    7.12 @@ -108,7 +108,7 @@
    7.13  +	 */
    7.14  +	irq_exit();
    7.15  +	if ( wake_dom0 && current != dom0 ) 
    7.16 -+		domain_wake(dom0->exec_domain[0]);
    7.17 ++		domain_wake(dom0->vcpu[0]);
    7.18  +}
    7.19  +#endif
    7.20  +
     8.1 --- a/xen/arch/ia64/patch/linux-2.6.7/current.h	Thu Jun 02 19:19:24 2005 +0000
     8.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/current.h	Thu Jun 02 21:05:33 2005 +0000
     8.3 @@ -6,7 +6,7 @@
     8.4    */
     8.5  +#ifdef XEN
     8.6  +struct domain;
     8.7 -+#define get_current()	((struct exec_domain *) ia64_getreg(_IA64_REG_TP))
     8.8 ++#define get_current()	((struct vcpu *) ia64_getreg(_IA64_REG_TP))
     8.9  +#define current get_current()
    8.10  +//#define set_current(d)	ia64_setreg(_IA64_REG_TP,(void *)d);
    8.11  +#define set_current(d)		(ia64_r13 = (void *)d)
     9.1 --- a/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c	Thu Jun 02 19:19:24 2005 +0000
     9.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c	Thu Jun 02 21:05:33 2005 +0000
     9.3 @@ -72,8 +72,8 @@
     9.4  +		}
     9.5  +#endif
     9.6  +		//FIXME: TEMPORARY HACK!!!!
     9.7 -+		vcpu_pend_interrupt(dom0->exec_domain[0],vector);
     9.8 -+		domain_wake(dom0->exec_domain[0]);
     9.9 ++		vcpu_pend_interrupt(dom0->vcpu[0],vector);
    9.10 ++		domain_wake(dom0->vcpu[0]);
    9.11  +	}
    9.12  +	else
    9.13  +#endif
    10.1 --- a/xen/arch/ia64/patch/linux-2.6.7/time.c	Thu Jun 02 19:19:24 2005 +0000
    10.2 +++ b/xen/arch/ia64/patch/linux-2.6.7/time.c	Thu Jun 02 21:05:33 2005 +0000
    10.3 @@ -70,7 +70,7 @@
    10.4  +    return now; 
    10.5  +}
    10.6  +
    10.7 -+void update_dom_time(struct exec_domain *ed)
    10.8 ++void update_dom_time(struct vcpu *v)
    10.9  +{
   10.10  +// FIXME: implement this?
   10.11  +//	printf("update_dom_time: called, not implemented, skipping\n");
   10.12 @@ -206,10 +206,10 @@
   10.13  +		// call vcpu_timer_expired on it
   10.14  +		//domain0_ready = 1; // moved to xensetup.c
   10.15  +	}
   10.16 -+	if (domain0_ready && vcpu_timer_expired(dom0->exec_domain[0])) {
   10.17 -+		vcpu_pend_timer(dom0->exec_domain[0]);
   10.18 -+		//vcpu_set_next_timer(dom0->exec_domain[0]);
   10.19 -+		domain_wake(dom0->exec_domain[0]);
   10.20 ++	if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) {
   10.21 ++		vcpu_pend_timer(dom0->vcpu[0]);
   10.22 ++		//vcpu_set_next_timer(dom0->vcpu[0]);
   10.23 ++		domain_wake(dom0->vcpu[0]);
   10.24  +	}
   10.25  +	if (!is_idle_task(current->domain) && current->domain != dom0) {
   10.26  +		if (vcpu_timer_expired(current)) {
    11.1 --- a/xen/arch/ia64/privop.c	Thu Jun 02 19:19:24 2005 +0000
    11.2 +++ b/xen/arch/ia64/privop.c	Thu Jun 02 21:05:33 2005 +0000
    11.3 @@ -761,7 +761,7 @@ unsigned long hyperpriv_cnt[HYPERPRIVOP_
    11.4  int
    11.5  ia64_hyperprivop(unsigned long iim, REGS *regs)
    11.6  {
    11.7 -	struct exec_domain *ed = (struct domain *) current;
    11.8 +	struct vcpu *v = (struct domain *) current;
    11.9  	INST64 inst;
   11.10  	UINT64 val;
   11.11  
   11.12 @@ -774,24 +774,24 @@ ia64_hyperprivop(unsigned long iim, REGS
   11.13  	hyperpriv_cnt[iim]++;
   11.14  	switch(iim) {
   11.15  	    case HYPERPRIVOP_RFI:
   11.16 -		(void)vcpu_rfi(ed);
   11.17 +		(void)vcpu_rfi(v);
   11.18  		return 0;	// don't update iip
   11.19  	    case HYPERPRIVOP_RSM_DT:
   11.20 -		(void)vcpu_reset_psr_dt(ed);
   11.21 +		(void)vcpu_reset_psr_dt(v);
   11.22  		return 1;
   11.23  	    case HYPERPRIVOP_SSM_DT:
   11.24 -		(void)vcpu_set_psr_dt(ed);
   11.25 +		(void)vcpu_set_psr_dt(v);
   11.26  		return 1;
   11.27  	    case HYPERPRIVOP_COVER:
   11.28 -		(void)vcpu_cover(ed);
   11.29 +		(void)vcpu_cover(v);
   11.30  		return 1;
   11.31  	    case HYPERPRIVOP_ITC_D:
   11.32  		inst.inst = 0;
   11.33 -		(void)priv_itc_d(ed,inst);
   11.34 +		(void)priv_itc_d(v,inst);
   11.35  		return 1;
   11.36  	    case HYPERPRIVOP_ITC_I:
   11.37  		inst.inst = 0;
   11.38 -		(void)priv_itc_i(ed,inst);
   11.39 +		(void)priv_itc_i(v,inst);
   11.40  		return 1;
   11.41  	}
   11.42  	return 0;
    12.1 --- a/xen/arch/ia64/process.c	Thu Jun 02 19:19:24 2005 +0000
    12.2 +++ b/xen/arch/ia64/process.c	Thu Jun 02 21:05:33 2005 +0000
    12.3 @@ -31,7 +31,7 @@
    12.4  #include <asm/hpsim_ssc.h>
    12.5  #include <asm/dom_fw.h>
    12.6  
    12.7 -extern unsigned long vcpu_get_itir_on_fault(struct exec_domain *, UINT64);
    12.8 +extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
    12.9  extern struct ia64_sal_retval pal_emulator_static(UINT64);
   12.10  extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
   12.11  
   12.12 @@ -61,7 +61,7 @@ long do_iopl(domid_t domain, unsigned in
   12.13  	return 0;
   12.14  }
   12.15  
   12.16 -void schedule_tail(struct exec_domain *next)
   12.17 +void schedule_tail(struct vcpu *next)
   12.18  {
   12.19  	unsigned long rr7;
   12.20  	//printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
   12.21 @@ -76,7 +76,7 @@ void schedule_tail(struct exec_domain *n
   12.22  #endif // CONFIG_VTI
   12.23  }
   12.24  
   12.25 -extern TR_ENTRY *match_tr(struct exec_domain *ed, unsigned long ifa);
   12.26 +extern TR_ENTRY *match_tr(struct vcpu *v, unsigned long ifa);
   12.27  
   12.28  void tdpfoo(void) { }
   12.29  
   12.30 @@ -132,10 +132,10 @@ unsigned long translate_domain_mpaddr(un
   12.31  
   12.32  void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
   12.33  {
   12.34 -	unsigned long vcpu_get_ipsr_int_state(struct exec_domain *,unsigned long);
   12.35 -	unsigned long vcpu_get_rr_ve(struct exec_domain *,unsigned long);
   12.36 +	unsigned long vcpu_get_ipsr_int_state(struct vcpu *,unsigned long);
   12.37 +	unsigned long vcpu_get_rr_ve(struct vcpu *,unsigned long);
   12.38  	struct domain *d = current->domain;
   12.39 -	struct exec_domain *ed = current;
   12.40 +	struct vcpu *v = current;
   12.41  
   12.42  	if (vector == IA64_EXTINT_VECTOR) {
   12.43  		
   12.44 @@ -147,8 +147,8 @@ void reflect_interruption(unsigned long 
   12.45  			first_extint = 0;
   12.46  		}
   12.47  	}
   12.48 -	if (!PSCB(ed,interrupt_collection_enabled)) {
   12.49 -		if (!(PSCB(ed,ipsr) & IA64_PSR_DT)) {
   12.50 +	if (!PSCB(v,interrupt_collection_enabled)) {
   12.51 +		if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
   12.52  			panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
   12.53  		}
   12.54  		vector &= ~0xf;
   12.55 @@ -156,45 +156,45 @@ void reflect_interruption(unsigned long 
   12.56  		    vector != IA64_ALT_DATA_TLB_VECTOR &&
   12.57  		    vector != IA64_VHPT_TRANS_VECTOR) {
   12.58  panic_domain(regs,"psr.ic off, delivering fault=%lx,iip=%p,ifa=%p,isr=%p,PSCB.iip=%p\n",
   12.59 -	vector,regs->cr_iip,ifa,isr,PSCB(ed,iip));
   12.60 +	vector,regs->cr_iip,ifa,isr,PSCB(v,iip));
   12.61  			
   12.62  		}
   12.63  //printf("Delivering NESTED DATA TLB fault\n");
   12.64  		vector = IA64_DATA_NESTED_TLB_VECTOR;
   12.65 -		regs->cr_iip = ((unsigned long) PSCBX(ed,iva) + vector) & ~0xffUL;
   12.66 +		regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
   12.67  		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   12.68  // NOTE: nested trap must NOT pass PSCB address
   12.69 -		//regs->r31 = (unsigned long) &PSCB(ed);
   12.70 +		//regs->r31 = (unsigned long) &PSCB(v);
   12.71  		return;
   12.72  
   12.73  	}
   12.74  	if ((vector & 0xf) == IA64_FORCED_IFA)
   12.75 -		ifa = PSCB(ed,tmp[0]);
   12.76 +		ifa = PSCB(v,tmp[0]);
   12.77  	vector &= ~0xf;
   12.78 -	PSCB(ed,ifa) = ifa;
   12.79 +	PSCB(v,ifa) = ifa;
   12.80  	if (vector < IA64_DATA_NESTED_TLB_VECTOR) /* VHPT miss, TLB miss, Alt TLB miss */
   12.81 -		vcpu_thash(ed,ifa,&PSCB(current,iha));
   12.82 -	PSCB(ed,unat) = regs->ar_unat;  // not sure if this is really needed?
   12.83 -	PSCB(ed,precover_ifs) = regs->cr_ifs;
   12.84 -	vcpu_bsw0(ed);
   12.85 -	PSCB(ed,ipsr) = vcpu_get_ipsr_int_state(ed,regs->cr_ipsr);
   12.86 +		vcpu_thash(v,ifa,&PSCB(current,iha));
   12.87 +	PSCB(v,unat) = regs->ar_unat;  // not sure if this is really needed?
   12.88 +	PSCB(v,precover_ifs) = regs->cr_ifs;
   12.89 +	vcpu_bsw0(v);
   12.90 +	PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
   12.91  	if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
   12.92 -		PSCB(ed,iim) = itiriim;
   12.93 -	else PSCB(ed,itir) = vcpu_get_itir_on_fault(ed,ifa);
   12.94 -	PSCB(ed,isr) = isr; // this is unnecessary except for interrupts!
   12.95 -	PSCB(ed,iip) = regs->cr_iip;
   12.96 -	PSCB(ed,ifs) = 0;
   12.97 -	PSCB(ed,incomplete_regframe) = 0;
   12.98 +		PSCB(v,iim) = itiriim;
   12.99 +	else PSCB(v,itir) = vcpu_get_itir_on_fault(v,ifa);
  12.100 +	PSCB(v,isr) = isr; // this is unnecessary except for interrupts!
  12.101 +	PSCB(v,iip) = regs->cr_iip;
  12.102 +	PSCB(v,ifs) = 0;
  12.103 +	PSCB(v,incomplete_regframe) = 0;
  12.104  
  12.105 -	regs->cr_iip = ((unsigned long) PSCBX(ed,iva) + vector) & ~0xffUL;
  12.106 +	regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
  12.107  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
  12.108  #ifdef CONFIG_SMP
  12.109  #error "sharedinfo doesn't handle smp yet"
  12.110  #endif
  12.111  	regs->r31 = &((shared_info_t *)SHAREDINFO_ADDR)->vcpu_data[0].arch;
  12.112  
  12.113 -	PSCB(ed,interrupt_delivery_enabled) = 0;
  12.114 -	PSCB(ed,interrupt_collection_enabled) = 0;
  12.115 +	PSCB(v,interrupt_delivery_enabled) = 0;
  12.116 +	PSCB(v,interrupt_collection_enabled) = 0;
  12.117  }
  12.118  
  12.119  void foodpi(void) {}
  12.120 @@ -205,26 +205,26 @@ void foodpi(void) {}
  12.121  void deliver_pending_interrupt(struct pt_regs *regs)
  12.122  {
  12.123  	struct domain *d = current->domain;
  12.124 -	struct exec_domain *ed = current;
  12.125 +	struct vcpu *v = current;
  12.126  	// FIXME: Will this work properly if doing an RFI???
  12.127  	if (!is_idle_task(d) && user_mode(regs)) {
  12.128 -		//vcpu_poke_timer(ed);
  12.129 -		if (vcpu_deliverable_interrupts(ed)) {
  12.130 +		//vcpu_poke_timer(v);
  12.131 +		if (vcpu_deliverable_interrupts(v)) {
  12.132  			unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
  12.133 -			if (vcpu_timer_pending_early(ed))
  12.134 -printf("*#*#*#* about to deliver early timer to domain %d!!!\n",ed->domain->domain_id);
  12.135 +			if (vcpu_timer_pending_early(v))
  12.136 +printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
  12.137  			reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
  12.138  		}
  12.139  	}
  12.140  }
  12.141  
  12.142 -int handle_lazy_cover(struct exec_domain *ed, unsigned long isr, struct pt_regs *regs)
  12.143 +int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
  12.144  {
  12.145 -	if (!PSCB(ed,interrupt_collection_enabled)) {
  12.146 +	if (!PSCB(v,interrupt_collection_enabled)) {
  12.147  		if (isr & IA64_ISR_IR) {
  12.148  //			printf("Handling lazy cover\n");
  12.149 -			PSCB(ed,ifs) = regs->cr_ifs;
  12.150 -			PSCB(ed,incomplete_regframe) = 1;
  12.151 +			PSCB(v,ifs) = regs->cr_ifs;
  12.152 +			PSCB(v,incomplete_regframe) = 1;
  12.153  			regs->cr_ifs = 0;
  12.154  			return(1); // retry same instruction with cr.ifs off
  12.155  		}
  12.156 @@ -237,14 +237,14 @@ int handle_lazy_cover(struct exec_domain
  12.157  void xen_handle_domain_access(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
  12.158  {
  12.159  	struct domain *d = (struct domain *) current->domain;
  12.160 -	struct domain *ed = (struct exec_domain *) current;
  12.161 +	struct domain *ed = (struct vcpu *) current;
  12.162  	TR_ENTRY *trp;
  12.163  	unsigned long psr = regs->cr_ipsr, mask, flags;
  12.164  	unsigned long iip = regs->cr_iip;
  12.165  	// FIXME should validate address here
  12.166  	unsigned long pteval, mpaddr, ps;
  12.167  	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
  12.168 -	unsigned long match_dtlb(struct exec_domain *,unsigned long, unsigned long *, unsigned long *);
  12.169 +	unsigned long match_dtlb(struct vcpu *,unsigned long, unsigned long *, unsigned long *);
  12.170  	IA64FAULT fault;
  12.171  
  12.172  // NEED TO HANDLE THREE CASES:
  12.173 @@ -736,7 +736,7 @@ ia64_handle_break (unsigned long ifa, st
  12.174  {
  12.175  	static int first_time = 1;
  12.176  	struct domain *d = (struct domain *) current->domain;
  12.177 -	struct exec_domain *ed = (struct domain *) current;
  12.178 +	struct vcpu *v = (struct domain *) current;
  12.179  	extern unsigned long running_on_sim;
  12.180  
  12.181  	if (first_time) {
  12.182 @@ -752,7 +752,7 @@ ia64_handle_break (unsigned long ifa, st
  12.183  		if (ia64_hypercall(regs))
  12.184  			vcpu_increment_iip(current);
  12.185  	}
  12.186 -	else if (!PSCB(ed,interrupt_collection_enabled)) {
  12.187 +	else if (!PSCB(v,interrupt_collection_enabled)) {
  12.188  		if (ia64_hyperprivop(iim,regs))
  12.189  			vcpu_increment_iip(current);
  12.190  	}
  12.191 @@ -764,11 +764,11 @@ ia64_handle_privop (unsigned long ifa, s
  12.192  {
  12.193  	IA64FAULT vector;
  12.194  	struct domain *d = current->domain;
  12.195 -	struct exec_domain *ed = current;
  12.196 +	struct vcpu *v = current;
  12.197  	// FIXME: no need to pass itir in to this routine as we need to
  12.198  	// compute the virtual itir anyway (based on domain's RR.ps)
  12.199  	// AND ACTUALLY reflect_interruption doesn't use it anyway!
  12.200 -	itir = vcpu_get_itir_on_fault(ed,ifa);
  12.201 +	itir = vcpu_get_itir_on_fault(v,ifa);
  12.202  	vector = priv_emulate(current,regs,isr);
  12.203  	if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
  12.204  		reflect_interruption(ifa,isr,itir,regs,vector);
  12.205 @@ -782,10 +782,10 @@ void
  12.206  ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
  12.207  {
  12.208  	struct domain *d = (struct domain *) current->domain;
  12.209 -	struct exec_domain *ed = (struct domain *) current;
  12.210 +	struct vcpu *v = (struct domain *) current;
  12.211  	unsigned long check_lazy_cover = 0;
  12.212  	unsigned long psr = regs->cr_ipsr;
  12.213 -	unsigned long itir = vcpu_get_itir_on_fault(ed,ifa);
  12.214 +	unsigned long itir = vcpu_get_itir_on_fault(v,ifa);
  12.215  
  12.216  	if (!(psr & IA64_PSR_CPL)) {
  12.217  		printk("ia64_handle_reflection: reflecting with priv=0!!\n");
  12.218 @@ -793,7 +793,7 @@ ia64_handle_reflection (unsigned long if
  12.219  	// FIXME: no need to pass itir in to this routine as we need to
  12.220  	// compute the virtual itir anyway (based on domain's RR.ps)
  12.221  	// AND ACTUALLY reflect_interruption doesn't use it anyway!
  12.222 -	itir = vcpu_get_itir_on_fault(ed,ifa);
  12.223 +	itir = vcpu_get_itir_on_fault(v,ifa);
  12.224  	switch(vector) {
  12.225  	    case 8:
  12.226  		vector = IA64_DIRTY_BIT_VECTOR; break;
  12.227 @@ -814,7 +814,7 @@ ia64_handle_reflection (unsigned long if
  12.228  		vector = IA64_DISABLED_FPREG_VECTOR; break;
  12.229  	    case 26:
  12.230  printf("*** NaT fault... attempting to handle as privop\n");
  12.231 -		vector = priv_emulate(ed,regs,isr);
  12.232 +		vector = priv_emulate(v,regs,isr);
  12.233  		if (vector == IA64_NO_FAULT) {
  12.234  printf("*** Handled privop masquerading as NaT fault\n");
  12.235  			return;
  12.236 @@ -832,6 +832,6 @@ printf("*** Handled privop masquerading 
  12.237  		while(vector);
  12.238  		return;
  12.239  	}
  12.240 -	if (check_lazy_cover && handle_lazy_cover(ed, isr, regs)) return;
  12.241 +	if (check_lazy_cover && handle_lazy_cover(v, isr, regs)) return;
  12.242  	reflect_interruption(ifa,isr,itir,regs,vector);
  12.243  }
    13.1 --- a/xen/arch/ia64/regionreg.c	Thu Jun 02 19:19:24 2005 +0000
    13.2 +++ b/xen/arch/ia64/regionreg.c	Thu Jun 02 21:05:33 2005 +0000
    13.3 @@ -251,7 +251,7 @@ static inline int validate_page_size(uns
    13.4  // NOTE: DOES NOT SET VCPU's rrs[x] value!!
    13.5  int set_one_rr(unsigned long rr, unsigned long val)
    13.6  {
    13.7 -	struct exec_domain *ed = current;
    13.8 +	struct vcpu *v = current;
    13.9  	unsigned long rreg = REGION_NUMBER(rr);
   13.10  	ia64_rr rrv, newrrv, memrrv;
   13.11  	unsigned long newrid;
   13.12 @@ -260,12 +260,12 @@ int set_one_rr(unsigned long rr, unsigne
   13.13  
   13.14  	rrv.rrval = val;
   13.15  	newrrv.rrval = 0;
   13.16 -	newrid = ed->domain->starting_rid + rrv.rid;
   13.17 +	newrid = v->domain->starting_rid + rrv.rid;
   13.18  
   13.19 -	if (newrid > ed->domain->ending_rid) {
   13.20 +	if (newrid > v->domain->ending_rid) {
   13.21  		printk("can't set rr%d to %lx, starting_rid=%lx,"
   13.22  			"ending_rid=%lx, val=%lx\n", rreg, newrid,
   13.23 -			ed->domain->starting_rid,ed->domain->ending_rid,val);
   13.24 +			v->domain->starting_rid,v->domain->ending_rid,val);
   13.25  		return 0;
   13.26  	}
   13.27  
   13.28 @@ -274,7 +274,7 @@ int set_one_rr(unsigned long rr, unsigne
   13.29  		newrrv.rid = newrid;
   13.30  		newrrv.ve = VHPT_ENABLED_REGION_7;
   13.31  		newrrv.ps = IA64_GRANULE_SHIFT;
   13.32 -		ia64_new_rr7(vmMangleRID(newrrv.rrval),ed->vcpu_info);
   13.33 +		ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
   13.34  	}
   13.35  	else {
   13.36  		newrrv.rid = newrid;
   13.37 @@ -317,45 +317,45 @@ int set_all_rr( u64 rr0, u64 rr1, u64 rr
   13.38  	return 1;
   13.39  }
   13.40  
   13.41 -void init_all_rr(struct exec_domain *ed)
   13.42 +void init_all_rr(struct vcpu *v)
   13.43  {
   13.44  	ia64_rr rrv;
   13.45  
   13.46  	rrv.rrval = 0;
   13.47 -	rrv.rid = ed->domain->metaphysical_rid;
   13.48 +	rrv.rid = v->domain->metaphysical_rid;
   13.49  	rrv.ps = PAGE_SHIFT;
   13.50  	rrv.ve = 1;
   13.51 -if (!ed->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
   13.52 -	ed->vcpu_info->arch.rrs[0] = -1;
   13.53 -	ed->vcpu_info->arch.rrs[1] = rrv.rrval;
   13.54 -	ed->vcpu_info->arch.rrs[2] = rrv.rrval;
   13.55 -	ed->vcpu_info->arch.rrs[3] = rrv.rrval;
   13.56 -	ed->vcpu_info->arch.rrs[4] = rrv.rrval;
   13.57 -	ed->vcpu_info->arch.rrs[5] = rrv.rrval;
   13.58 +if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
   13.59 +	v->vcpu_info->arch.rrs[0] = -1;
   13.60 +	v->vcpu_info->arch.rrs[1] = rrv.rrval;
   13.61 +	v->vcpu_info->arch.rrs[2] = rrv.rrval;
   13.62 +	v->vcpu_info->arch.rrs[3] = rrv.rrval;
   13.63 +	v->vcpu_info->arch.rrs[4] = rrv.rrval;
   13.64 +	v->vcpu_info->arch.rrs[5] = rrv.rrval;
   13.65  	rrv.ve = 0; 
   13.66 -	ed->vcpu_info->arch.rrs[6] = rrv.rrval;
   13.67 -//	ed->shared_info->arch.rrs[7] = rrv.rrval;
   13.68 +	v->vcpu_info->arch.rrs[6] = rrv.rrval;
   13.69 +//	v->shared_info->arch.rrs[7] = rrv.rrval;
   13.70  }
   13.71  
   13.72  
   13.73  /* XEN/ia64 INTERNAL ROUTINES */
   13.74  
   13.75 -unsigned long physicalize_rid(struct exec_domain *ed, unsigned long rrval)
   13.76 +unsigned long physicalize_rid(struct vcpu *v, unsigned long rrval)
   13.77  {
   13.78  	ia64_rr rrv;
   13.79  	    
   13.80  	rrv.rrval = rrval;
   13.81 -	rrv.rid += ed->domain->starting_rid;
   13.82 +	rrv.rid += v->domain->starting_rid;
   13.83  	return rrv.rrval;
   13.84  }
   13.85  
   13.86  unsigned long
   13.87 -virtualize_rid(struct exec_domain *ed, unsigned long rrval)
   13.88 +virtualize_rid(struct vcpu *v, unsigned long rrval)
   13.89  {
   13.90  	ia64_rr rrv;
   13.91  	    
   13.92  	rrv.rrval = rrval;
   13.93 -	rrv.rid -= ed->domain->starting_rid;
   13.94 +	rrv.rid -= v->domain->starting_rid;
   13.95  	return rrv.rrval;
   13.96  }
   13.97  
   13.98 @@ -366,17 +366,17 @@ virtualize_rid(struct exec_domain *ed, u
   13.99  // rr7 (because we have to to assembly and physical mode
  13.100  // to change rr7).  If no change to rr7 is required, returns 0.
  13.101  //
  13.102 -unsigned long load_region_regs(struct exec_domain *ed)
  13.103 +unsigned long load_region_regs(struct vcpu *v)
  13.104  {
  13.105  	unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
  13.106  	// TODO: These probably should be validated
  13.107  	unsigned long bad = 0;
  13.108  
  13.109 -	if (ed->vcpu_info->arch.metaphysical_mode) {
  13.110 +	if (v->vcpu_info->arch.metaphysical_mode) {
  13.111  		ia64_rr rrv;
  13.112  
  13.113  		rrv.rrval = 0;
  13.114 -		rrv.rid = ed->domain->metaphysical_rid;
  13.115 +		rrv.rid = v->domain->metaphysical_rid;
  13.116  		rrv.ps = PAGE_SHIFT;
  13.117  		rrv.ve = 1;
  13.118  		rr0 = rrv.rrval;
  13.119 @@ -384,16 +384,16 @@ unsigned long load_region_regs(struct ex
  13.120  		ia64_srlz_d();
  13.121  	}
  13.122  	else {
  13.123 -		rr0 =  ed->vcpu_info->arch.rrs[0];
  13.124 +		rr0 =  v->vcpu_info->arch.rrs[0];
  13.125  		if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
  13.126  	}
  13.127 -	rr1 =  ed->vcpu_info->arch.rrs[1];
  13.128 -	rr2 =  ed->vcpu_info->arch.rrs[2];
  13.129 -	rr3 =  ed->vcpu_info->arch.rrs[3];
  13.130 -	rr4 =  ed->vcpu_info->arch.rrs[4];
  13.131 -	rr5 =  ed->vcpu_info->arch.rrs[5];
  13.132 -	rr6 =  ed->vcpu_info->arch.rrs[6];
  13.133 -	rr7 =  ed->vcpu_info->arch.rrs[7];
  13.134 +	rr1 =  v->vcpu_info->arch.rrs[1];
  13.135 +	rr2 =  v->vcpu_info->arch.rrs[2];
  13.136 +	rr3 =  v->vcpu_info->arch.rrs[3];
  13.137 +	rr4 =  v->vcpu_info->arch.rrs[4];
  13.138 +	rr5 =  v->vcpu_info->arch.rrs[5];
  13.139 +	rr6 =  v->vcpu_info->arch.rrs[6];
  13.140 +	rr7 =  v->vcpu_info->arch.rrs[7];
  13.141  	if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
  13.142  	if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
  13.143  	if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
    14.1 --- a/xen/arch/ia64/vmmu.c	Thu Jun 02 19:19:24 2005 +0000
    14.2 +++ b/xen/arch/ia64/vmmu.c	Thu Jun 02 21:05:33 2005 +0000
    14.3 @@ -136,7 +136,7 @@ purge_machine_tc_by_domid(domid_t domid)
    14.4  #endif
    14.5  }
    14.6  
    14.7 -static thash_cb_t *init_domain_vhpt(struct exec_domain *d)
    14.8 +static thash_cb_t *init_domain_vhpt(struct vcpu *d)
    14.9  {
   14.10      struct pfn_info *page;
   14.11      void   *vbase,*vcur;
   14.12 @@ -179,7 +179,7 @@ static thash_cb_t *init_domain_vhpt(stru
   14.13  }
   14.14  
   14.15  
   14.16 -thash_cb_t *init_domain_tlb(struct exec_domain *d)
   14.17 +thash_cb_t *init_domain_tlb(struct vcpu *d)
   14.18  {
   14.19      struct pfn_info *page;
   14.20      void    *vbase,*vcur;
   14.21 @@ -234,7 +234,7 @@ alloc_pmt(struct domain *d)
   14.22   * Insert guest TLB to machine TLB.
   14.23   *  data:   In TLB format
   14.24   */
   14.25 -void machine_tlb_insert(struct exec_domain *d, thash_data_t *tlb)
   14.26 +void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
   14.27  {
   14.28      u64     saved_itir, saved_ifa, saved_rr;
   14.29      u64     pages;
   14.30 @@ -285,7 +285,7 @@ u64 machine_thash(PTA pta, u64 va, u64 r
   14.31      u64     saved_pta, saved_rr0;
   14.32      u64     hash_addr, tag;
   14.33      unsigned long psr;
   14.34 -    struct exec_domain *ed = current;
   14.35 +    struct vcpu *v = current;
   14.36      rr_t    vrr;
   14.37  
   14.38      
   14.39 @@ -299,7 +299,7 @@ u64 machine_thash(PTA pta, u64 va, u64 r
   14.40      // TODO: Set to enforce lazy mode
   14.41      local_irq_save(psr);
   14.42      ia64_setreg(_IA64_REG_CR_PTA, pta.val);
   14.43 -    ia64_set_rr(0, vmx_vrrtomrr(ed, vrr.value));
   14.44 +    ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
   14.45      ia64_srlz_d();
   14.46  
   14.47      hash_addr = ia64_thash(va);
   14.48 @@ -316,7 +316,7 @@ u64 machine_ttag(PTA pta, u64 va, u64 ri
   14.49      u64     saved_pta, saved_rr0;
   14.50      u64     hash_addr, tag;
   14.51      u64     psr;
   14.52 -    struct exec_domain *ed = current;
   14.53 +    struct vcpu *v = current;
   14.54      rr_t    vrr;
   14.55  
   14.56      // TODO: Set to enforce lazy mode    
   14.57 @@ -329,7 +329,7 @@ u64 machine_ttag(PTA pta, u64 va, u64 ri
   14.58      va = (va << 3) >> 3;    // set VRN to 0.
   14.59      local_irq_save(psr);
   14.60      ia64_setreg(_IA64_REG_CR_PTA, pta.val);
   14.61 -    ia64_set_rr(0, vmx_vrrtomrr(ed, vrr.value));
   14.62 +    ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
   14.63      ia64_srlz_d();
   14.64  
   14.65      tag = ia64_ttag(va);
    15.1 --- a/xen/arch/ia64/vmx_init.c	Thu Jun 02 19:19:24 2005 +0000
    15.2 +++ b/xen/arch/ia64/vmx_init.c	Thu Jun 02 21:05:33 2005 +0000
    15.3 @@ -174,10 +174,10 @@ static vpd_t *alloc_vpd(void)
    15.4   * Create a VP on intialized VMX environment.
    15.5   */
    15.6  static void
    15.7 -vmx_create_vp(struct exec_domain *ed)
    15.8 +vmx_create_vp(struct vcpu *v)
    15.9  {
   15.10  	u64 ret;
   15.11 -	vpd_t *vpd = ed->arch.arch_vmx.vpd;
   15.12 +	vpd_t *vpd = v->arch.arch_vmx.vpd;
   15.13  	u64 ivt_base;
   15.14      extern char vmx_ia64_ivt;
   15.15  	/* ia64_ivt is function pointer, so need this tranlation */
   15.16 @@ -207,55 +207,55 @@ void vmx_init_double_mapping_stub(void)
   15.17  
   15.18  /* Other non-context related tasks can be done in context switch */
   15.19  void
   15.20 -vmx_save_state(struct exec_domain *ed)
   15.21 +vmx_save_state(struct vcpu *v)
   15.22  {
   15.23  	u64 status, psr;
   15.24  	u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
   15.25  
   15.26  	/* FIXME: about setting of pal_proc_vector... time consuming */
   15.27 -	status = ia64_pal_vp_save(ed->arch.arch_vmx.vpd, 0);
   15.28 +	status = ia64_pal_vp_save(v->arch.arch_vmx.vpd, 0);
   15.29  	if (status != PAL_STATUS_SUCCESS)
   15.30  		panic("Save vp status failed\n");
   15.31  
   15.32 -	/* FIXME: Do we really need purge double mapping for old ed?
   15.33 +	/* FIXME: Do we really need purge double mapping for old vcpu?
   15.34  	 * Since rid is completely different between prev and next,
   15.35  	 * it's not overlap and thus no MCA possible... */
   15.36 -	dom_rr7 = vmx_vrrtomrr(ed, VMX(ed, vrr[7]));
   15.37 +	dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
   15.38          vmx_purge_double_mapping(dom_rr7, KERNEL_START,
   15.39 -				 (u64)ed->arch.vtlb->ts->vhpt->hash);
   15.40 +				 (u64)v->arch.vtlb->ts->vhpt->hash);
   15.41  
   15.42  }
   15.43  
   15.44  /* Even guest is in physical mode, we still need such double mapping */
   15.45  void
   15.46 -vmx_load_state(struct exec_domain *ed)
   15.47 +vmx_load_state(struct vcpu *v)
   15.48  {
   15.49  	u64 status, psr;
   15.50  	u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
   15.51  	u64 pte_xen, pte_vhpt;
   15.52  
   15.53 -	status = ia64_pal_vp_restore(ed->arch.arch_vmx.vpd, 0);
   15.54 +	status = ia64_pal_vp_restore(v->arch.arch_vmx.vpd, 0);
   15.55  	if (status != PAL_STATUS_SUCCESS)
   15.56  		panic("Restore vp status failed\n");
   15.57  
   15.58 -	dom_rr7 = vmx_vrrtomrr(ed, VMX(ed, vrr[7]));
   15.59 +	dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
   15.60  	pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
   15.61 -	pte_vhpt = pte_val(pfn_pte((__pa(ed->arch.vtlb->ts->vhpt->hash) >> PAGE_SHIFT), PAGE_KERNEL));
   15.62 +	pte_vhpt = pte_val(pfn_pte((__pa(v->arch.vtlb->ts->vhpt->hash) >> PAGE_SHIFT), PAGE_KERNEL));
   15.63  	vmx_insert_double_mapping(dom_rr7, KERNEL_START,
   15.64 -				  (u64)ed->arch.vtlb->ts->vhpt->hash,
   15.65 +				  (u64)v->arch.vtlb->ts->vhpt->hash,
   15.66  				  pte_xen, pte_vhpt);
   15.67  
   15.68  	/* Guest vTLB is not required to be switched explicitly, since
   15.69 -	 * anchored in exec_domain */
   15.70 +	 * anchored in vcpu */
   15.71  }
   15.72  
   15.73  /* Purge old double mapping and insert new one, due to rr7 change */
   15.74  void
   15.75 -vmx_change_double_mapping(struct exec_domain *ed, u64 oldrr7, u64 newrr7)
   15.76 +vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7)
   15.77  {
   15.78  	u64 pte_xen, pte_vhpt, vhpt_base;
   15.79  
   15.80 -    vhpt_base = (u64)ed->arch.vtlb->ts->vhpt->hash;
   15.81 +    vhpt_base = (u64)v->arch.vtlb->ts->vhpt->hash;
   15.82      vmx_purge_double_mapping(oldrr7, KERNEL_START,
   15.83  				 vhpt_base);
   15.84  
   15.85 @@ -267,29 +267,29 @@ vmx_change_double_mapping(struct exec_do
   15.86  }
   15.87  
   15.88  /*
   15.89 - * Initialize VMX envirenment for guest. Only the 1st vp/exec_domain
   15.90 + * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
   15.91   * is registered here.
   15.92   */
   15.93  void
   15.94  vmx_final_setup_domain(struct domain *d)
   15.95  {
   15.96 -	struct exec_domain *ed = d->exec_domain[0];
   15.97 +	struct vcpu *v = d->vcpu[0];
   15.98  	vpd_t *vpd;
   15.99  
  15.100 -	/* Allocate resources for exec_domain 0 */
  15.101 -	//memset(&ed->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
  15.102 +	/* Allocate resources for vcpu 0 */
  15.103 +	//memset(&v->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
  15.104  
  15.105  	vpd = alloc_vpd();
  15.106  	ASSERT(vpd);
  15.107  
  15.108 -	ed->arch.arch_vmx.vpd = vpd;
  15.109 +	v->arch.arch_vmx.vpd = vpd;
  15.110  	vpd->virt_env_vaddr = vm_buffer;
  15.111  
  15.112 -	/* ed->arch.schedule_tail = arch_vmx_do_launch; */
  15.113 -	vmx_create_vp(ed);
  15.114 +	/* v->arch.schedule_tail = arch_vmx_do_launch; */
  15.115 +	vmx_create_vp(v);
  15.116  
  15.117  	/* Set this ed to be vmx */
  15.118 -	ed->arch.arch_vmx.flags = 1;
  15.119 +	v->arch.arch_vmx.flags = 1;
  15.120  
  15.121  	/* Other vmx specific initialization work */
  15.122  }
    16.1 --- a/xen/arch/ia64/vmx_process.c	Thu Jun 02 19:19:24 2005 +0000
    16.2 +++ b/xen/arch/ia64/vmx_process.c	Thu Jun 02 21:05:33 2005 +0000
    16.3 @@ -59,7 +59,7 @@ vmx_ia64_handle_break (unsigned long ifa
    16.4  {
    16.5  	static int first_time = 1;
    16.6  	struct domain *d = (struct domain *) current->domain;
    16.7 -	struct exec_domain *ed = (struct domain *) current;
    16.8 +	struct vcpu *v = (struct domain *) current;
    16.9  	extern unsigned long running_on_sim;
   16.10  	unsigned long i, sal_param[8];
   16.11  
   16.12 @@ -80,18 +80,18 @@ vmx_ia64_handle_break (unsigned long ifa
   16.13  		    case FW_HYPERCALL_PAL_CALL:
   16.14  			//printf("*** PAL hypercall: index=%d\n",regs->r28);
   16.15  			//FIXME: This should call a C routine
   16.16 -			x = pal_emulator_static(VMX_VPD(ed, vgr[12]));
   16.17 +			x = pal_emulator_static(VMX_VPD(v, vgr[12]));
   16.18  			regs->r8 = x.status; regs->r9 = x.v0;
   16.19  			regs->r10 = x.v1; regs->r11 = x.v2;
   16.20  #if 0
   16.21  			if (regs->r8)
   16.22  				printk("Failed vpal emulation, with index:0x%lx\n",
   16.23 -					VMX_VPD(ed, vgr[12]));
   16.24 +					VMX_VPD(v, vgr[12]));
   16.25  #endif
   16.26  			break;
   16.27  		    case FW_HYPERCALL_SAL_CALL:
   16.28  			for (i = 0; i < 8; i++)
   16.29 -				vmx_vcpu_get_gr(ed, 32+i, &sal_param[i]);
   16.30 +				vmx_vcpu_get_gr(v, 32+i, &sal_param[i]);
   16.31  			x = sal_emulator(sal_param[0], sal_param[1],
   16.32  					 sal_param[2], sal_param[3],
   16.33  					 sal_param[4], sal_param[5],
   16.34 @@ -117,8 +117,8 @@ vmx_ia64_handle_break (unsigned long ifa
   16.35  			{
   16.36  			unsigned long *tv, *tc;
   16.37  			fooefi();
   16.38 -			vmx_vcpu_get_gr(ed, 32, &tv);
   16.39 -			vmx_vcpu_get_gr(ed, 33, &tc);
   16.40 +			vmx_vcpu_get_gr(v, 32, &tv);
   16.41 +			vmx_vcpu_get_gr(v, 33, &tc);
   16.42  			printf("efi_get_time(%p,%p) called...",tv,tc);
   16.43  			tv = __va(translate_domain_mpaddr(tv));
   16.44  			if (tc) tc = __va(translate_domain_mpaddr(tc));
   16.45 @@ -191,11 +191,11 @@ void vmx_reflect_interruption(UINT64 ifa
   16.46  void vmx_deliver_pending_interrupt(struct pt_regs *regs)
   16.47  {
   16.48  	struct domain *d = current->domain;
   16.49 -	struct exec_domain *ed = current;
   16.50 +	struct vcpu *v = current;
   16.51  	// FIXME: Will this work properly if doing an RFI???
   16.52  	if (!is_idle_task(d) ) {	// always comes from guest
   16.53 -		//vcpu_poke_timer(ed);
   16.54 -		//if (vcpu_deliverable_interrupts(ed)) {
   16.55 +		//vcpu_poke_timer(v);
   16.56 +		//if (vcpu_deliverable_interrupts(v)) {
   16.57  		//	unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
   16.58  		//	foodpi();
   16.59  		//	reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
   16.60 @@ -207,7 +207,7 @@ void vmx_deliver_pending_interrupt(struc
   16.61  			printk("WARNING: checking pending interrupt in nested interrupt!!!\n");
   16.62  		if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
   16.63  			return;
   16.64 -		vmx_check_pending_irq(ed);
   16.65 +		vmx_check_pending_irq(v);
   16.66  	}
   16.67  }
   16.68  
    17.1 --- a/xen/arch/ia64/xenirq.c	Thu Jun 02 19:19:24 2005 +0000
    17.2 +++ b/xen/arch/ia64/xenirq.c	Thu Jun 02 21:05:33 2005 +0000
    17.3 @@ -49,8 +49,8 @@ xen_do_IRQ(ia64_vector vector)
    17.4  		}
    17.5  #endif
    17.6  		//FIXME: TEMPORARY HACK!!!!
    17.7 -		vcpu_pend_interrupt(dom0->exec_domain[0],vector);
    17.8 -		domain_wake(dom0->exec_domain[0]);
    17.9 +		vcpu_pend_interrupt(dom0->vcpu[0],vector);
   17.10 +		domain_wake(dom0->vcpu[0]);
   17.11  		return(1);
   17.12  	}
   17.13  	return(0);
    18.1 --- a/xen/arch/ia64/xenmisc.c	Thu Jun 02 19:19:24 2005 +0000
    18.2 +++ b/xen/arch/ia64/xenmisc.c	Thu Jun 02 21:05:33 2005 +0000
    18.3 @@ -91,14 +91,14 @@ unsigned long __hypercall_create_continu
    18.4  
    18.5  int reprogram_ac_timer(s_time_t timeout)
    18.6  {
    18.7 -	struct exec_domain *ed = current;
    18.8 +	struct vcpu *v = current;
    18.9  
   18.10  #ifdef CONFIG_VTI
   18.11 -	if(VMX_DOMAIN(ed))
   18.12 +	if(VMX_DOMAIN(v))
   18.13  		return 1;
   18.14  #endif // CONFIG_VTI
   18.15  	local_cpu_data->itm_next = timeout;
   18.16 -	if (is_idle_task(ed->domain)) vcpu_safe_set_itm(timeout);
   18.17 +	if (is_idle_task(v->domain)) vcpu_safe_set_itm(timeout);
   18.18  	else vcpu_set_next_timer(current);
   18.19  	return 1;
   18.20  }
   18.21 @@ -232,7 +232,7 @@ void cs10foo(void) {}
   18.22  void cs01foo(void) {}
   18.23  
   18.24  // context_switch
   18.25 -void context_switch(struct exec_domain *prev, struct exec_domain *next)
   18.26 +void context_switch(struct vcpu *prev, struct vcpu *next)
   18.27  {
   18.28  //printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
   18.29  //printk("@@@@@@ context switch from domain %d (%x) to domain %d (%x)\n",
   18.30 @@ -261,7 +261,7 @@ void context_switch(struct exec_domain *
   18.31  {
   18.32  static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
   18.33  static int i = 100;
   18.34 -int id = ((struct exec_domain *)current)->domain->domain_id & 0xf;
   18.35 +int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
   18.36  if (!cnt[id]--) { printk("%x",id); cnt[id] = 500; }
   18.37  if (!i--) { printk("+",id); cnt[id] = 1000; }
   18.38  }
   18.39 @@ -281,7 +281,7 @@ if (!i--) { printk("+",id); cnt[id] = 10
   18.40  #endif // CONFIG_VTI
   18.41  }
   18.42  
   18.43 -void continue_running(struct exec_domain *same)
   18.44 +void continue_running(struct vcpu *same)
   18.45  {
   18.46      /* nothing to do */
   18.47  }
   18.48 @@ -290,23 +290,23 @@ void panic_domain(struct pt_regs *regs, 
   18.49  {
   18.50  	va_list args;
   18.51  	char buf[128];
   18.52 -	struct exec_domain *ed = current;
   18.53 +	struct vcpu *v = current;
   18.54  	static volatile int test = 1;	// so can continue easily in debug
   18.55  	extern spinlock_t console_lock;
   18.56  	unsigned long flags;
   18.57      
   18.58  loop:
   18.59  	printf("$$$$$ PANIC in domain %d (k6=%p): ",
   18.60 -		ed->domain->domain_id, ia64_get_kr(IA64_KR_CURRENT));
   18.61 +		v->domain->domain_id, ia64_get_kr(IA64_KR_CURRENT));
   18.62  	va_start(args, fmt);
   18.63  	(void)vsnprintf(buf, sizeof(buf), fmt, args);
   18.64  	va_end(args);
   18.65  	printf(buf);
   18.66  	if (regs) show_registers(regs);
   18.67  	domain_pause_by_systemcontroller(current->domain);
   18.68 -	ed->domain->shutdown_code = SHUTDOWN_crash;
   18.69 -	set_bit(_DOMF_shutdown, ed->domain->domain_flags);
   18.70 -	if (ed->domain->domain_id == 0) {
   18.71 +	v->domain->shutdown_code = SHUTDOWN_crash;
   18.72 +	set_bit(_DOMF_shutdown, v->domain->domain_flags);
   18.73 +	if (v->domain->domain_id == 0) {
   18.74  		int i = 1000000000L;
   18.75  		// if domain0 crashes, just periodically print out panic
   18.76  		// message to make post-mortem easier
    19.1 --- a/xen/arch/ia64/xensetup.c	Thu Jun 02 19:19:24 2005 +0000
    19.2 +++ b/xen/arch/ia64/xensetup.c	Thu Jun 02 21:05:33 2005 +0000
    19.3 @@ -24,7 +24,7 @@ unsigned long xenheap_phys_end;
    19.4  
    19.5  char saved_command_line[COMMAND_LINE_SIZE];
    19.6  
    19.7 -struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
    19.8 +struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu };
    19.9  
   19.10  #ifdef CLONE_DOMAIN0
   19.11  struct domain *clones[CLONE_DOMAIN0];
   19.12 @@ -147,9 +147,9 @@ void start_kernel(void)
   19.13      xen_pstart = ia64_tpa(KERNEL_START);
   19.14  
   19.15      /* Must do this early -- e.g., spinlocks rely on get_current(). */
   19.16 -    //set_current(&idle0_exec_domain);
   19.17 -    ia64_r13 = (void *)&idle0_exec_domain;
   19.18 -    idle0_exec_domain.domain = &idle0_domain;
   19.19 +    //set_current(&idle0_vcpu);
   19.20 +    ia64_r13 = (void *)&idle0_vcpu;
   19.21 +    idle0_vcpu.domain = &idle0_domain;
   19.22  
   19.23      early_setup_arch(&cmdline);
   19.24  
    20.1 --- a/xen/arch/ia64/xentime.c	Thu Jun 02 19:19:24 2005 +0000
    20.2 +++ b/xen/arch/ia64/xentime.c	Thu Jun 02 21:05:33 2005 +0000
    20.3 @@ -94,7 +94,7 @@ s_time_t get_s_time(void)
    20.4      return now; 
    20.5  }
    20.6  
    20.7 -void update_dom_time(struct exec_domain *ed)
    20.8 +void update_dom_time(struct vcpu *v)
    20.9  {
   20.10  // FIXME: implement this?
   20.11  //	printf("update_dom_time: called, not implemented, skipping\n");
   20.12 @@ -161,10 +161,10 @@ xen_timer_interrupt (int irq, void *dev_
   20.13  		//domain0_ready = 1; // moved to xensetup.c
   20.14  		current->vcpu_info->arch.pending_interruption = 1;
   20.15  	}
   20.16 -	if (domain0_ready && vcpu_timer_expired(dom0->exec_domain[0])) {
   20.17 -		vcpu_pend_timer(dom0->exec_domain[0]);
   20.18 -		//vcpu_set_next_timer(dom0->exec_domain[0]);
   20.19 -		domain_wake(dom0->exec_domain[0]);
   20.20 +	if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) {
   20.21 +		vcpu_pend_timer(dom0->vcpu[0]);
   20.22 +		//vcpu_set_next_timer(dom0->vcpu[0]);
   20.23 +		domain_wake(dom0->vcpu[0]);
   20.24  	}
   20.25  	if (!is_idle_task(current->domain) && current->domain != dom0) {
   20.26  		if (vcpu_timer_expired(current)) {
   20.27 @@ -304,7 +304,7 @@ static irqreturn_t
   20.28  vmx_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
   20.29  {
   20.30      unsigned long new_itm;
   20.31 -    struct exec_domain *ed = current;
   20.32 +    struct vcpu *v = current;
   20.33  
   20.34  
   20.35      new_itm = local_cpu_data->itm_next;
   20.36 @@ -319,7 +319,7 @@ vmx_timer_interrupt (int irq, void *dev_
   20.37           * fixing that would require updates to all
   20.38           * platforms.
   20.39           */
   20.40 -        update_process_times(user_mode(ed, regs));
   20.41 +        update_process_times(user_mode(v, regs));
   20.42  #endif
   20.43          new_itm += local_cpu_data->itm_delta;
   20.44  
    21.1 --- a/xen/arch/x86/audit.c	Thu Jun 02 19:19:24 2005 +0000
    21.2 +++ b/xen/arch/x86/audit.c	Thu Jun 02 21:05:33 2005 +0000
    21.3 @@ -404,16 +404,16 @@ int audit_adjust_pgtables(struct domain 
    21.4  
    21.5      void adjust_for_pgtbase()
    21.6      {
    21.7 -        struct exec_domain *ed;
    21.8 +        struct vcpu *v;
    21.9  
   21.10 -        for_each_exec_domain(d, ed)
   21.11 +        for_each_vcpu(d, v)
   21.12          {
   21.13 -            if ( pagetable_get_paddr(ed->arch.guest_table) )
   21.14 -                adjust(&frame_table[pagetable_get_pfn(ed->arch.guest_table)], 1);
   21.15 -            if ( pagetable_get_paddr(ed->arch.shadow_table) )
   21.16 -                adjust(&frame_table[pagetable_get_pfn(ed->arch.shadow_table)], 0);
   21.17 -            if ( ed->arch.monitor_shadow_ref )
   21.18 -                adjust(&frame_table[ed->arch.monitor_shadow_ref], 0);
   21.19 +            if ( pagetable_get_paddr(v->arch.guest_table) )
   21.20 +                adjust(&frame_table[pagetable_get_pfn(v->arch.guest_table)], 1);
   21.21 +            if ( pagetable_get_paddr(v->arch.shadow_table) )
   21.22 +                adjust(&frame_table[pagetable_get_pfn(v->arch.shadow_table)], 0);
   21.23 +            if ( v->arch.monitor_shadow_ref )
   21.24 +                adjust(&frame_table[v->arch.monitor_shadow_ref], 0);
   21.25          }
   21.26      }
   21.27  
    22.1 --- a/xen/arch/x86/dom0_ops.c	Thu Jun 02 19:19:24 2005 +0000
    22.2 +++ b/xen/arch/x86/dom0_ops.c	Thu Jun 02 21:05:33 2005 +0000
    22.3 @@ -374,7 +374,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    22.4  }
    22.5  
    22.6  void arch_getdomaininfo_ctxt(
    22.7 -    struct exec_domain *ed, struct vcpu_guest_context *c)
    22.8 +    struct vcpu *v, struct vcpu_guest_context *c)
    22.9  { 
   22.10  #ifdef __i386__  /* Remove when x86_64 VMX is implemented */
   22.11  #ifdef CONFIG_VMX
   22.12 @@ -382,30 +382,30 @@ void arch_getdomaininfo_ctxt(
   22.13  #endif
   22.14  #endif
   22.15  
   22.16 -    memcpy(c, &ed->arch.guest_context, sizeof(*c));
   22.17 +    memcpy(c, &v->arch.guest_context, sizeof(*c));
   22.18  
   22.19      /* IOPL privileges are virtualised -- merge back into returned eflags. */
   22.20      BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
   22.21 -    c->user_regs.eflags |= ed->arch.iopl << 12;
   22.22 +    c->user_regs.eflags |= v->arch.iopl << 12;
   22.23  
   22.24  #ifdef __i386__
   22.25  #ifdef CONFIG_VMX
   22.26 -    if ( VMX_DOMAIN(ed) )
   22.27 +    if ( VMX_DOMAIN(v) )
   22.28          save_vmx_cpu_user_regs(&c->user_regs);
   22.29  #endif
   22.30  #endif
   22.31  
   22.32      c->flags = 0;
   22.33 -    if ( test_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags) )
   22.34 +    if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
   22.35          c->flags |= VGCF_I387_VALID;
   22.36 -    if ( KERNEL_MODE(ed, &ed->arch.guest_context.user_regs) )
   22.37 +    if ( KERNEL_MODE(v, &v->arch.guest_context.user_regs) )
   22.38          c->flags |= VGCF_IN_KERNEL;
   22.39  #ifdef CONFIG_VMX
   22.40 -    if (VMX_DOMAIN(ed))
   22.41 +    if (VMX_DOMAIN(v))
   22.42          c->flags |= VGCF_VMX_GUEST;
   22.43  #endif
   22.44  
   22.45 -    c->pt_base = pagetable_get_paddr(ed->arch.guest_table);
   22.46 +    c->pt_base = pagetable_get_paddr(v->arch.guest_table);
   22.47  
   22.48 -    c->vm_assist = ed->domain->vm_assist;
   22.49 +    c->vm_assist = v->domain->vm_assist;
   22.50  }
    23.1 --- a/xen/arch/x86/domain.c	Thu Jun 02 19:19:24 2005 +0000
    23.2 +++ b/xen/arch/x86/domain.c	Thu Jun 02 21:05:33 2005 +0000
    23.3 @@ -47,16 +47,16 @@ static int opt_noreboot = 0;
    23.4  boolean_param("noreboot", opt_noreboot);
    23.5  
    23.6  struct percpu_ctxt {
    23.7 -    struct exec_domain *curr_ed;
    23.8 +    struct vcpu *curr_vcpu;
    23.9  } __cacheline_aligned;
   23.10  static struct percpu_ctxt percpu_ctxt[NR_CPUS];
   23.11  
   23.12 -static void continue_idle_task(struct exec_domain *ed)
   23.13 +static void continue_idle_task(struct vcpu *v)
   23.14  {
   23.15      reset_stack_and_jump(idle_loop);
   23.16  }
   23.17  
   23.18 -static void continue_nonidle_task(struct exec_domain *ed)
   23.19 +static void continue_nonidle_task(struct vcpu *v)
   23.20  {
   23.21      reset_stack_and_jump(ret_from_intr);
   23.22  }
   23.23 @@ -90,12 +90,12 @@ void idle_loop(void)
   23.24  
   23.25  void startup_cpu_idle_loop(void)
   23.26  {
   23.27 -    struct exec_domain *ed = current;
   23.28 +    struct vcpu *v = current;
   23.29  
   23.30 -    ASSERT(is_idle_task(ed->domain));
   23.31 -    percpu_ctxt[smp_processor_id()].curr_ed = ed;
   23.32 -    set_bit(smp_processor_id(), &ed->domain->cpuset);
   23.33 -    ed->arch.schedule_tail = continue_idle_task;
   23.34 +    ASSERT(is_idle_task(v->domain));
   23.35 +    percpu_ctxt[smp_processor_id()].curr_vcpu = v;
   23.36 +    set_bit(smp_processor_id(), &v->domain->cpuset);
   23.37 +    v->arch.schedule_tail = continue_idle_task;
   23.38  
   23.39      idle_loop();
   23.40  }
   23.41 @@ -206,14 +206,14 @@ void dump_pageframe_info(struct domain *
   23.42             page->u.inuse.type_info);
   23.43  }
   23.44  
   23.45 -struct exec_domain *arch_alloc_exec_domain_struct(void)
   23.46 +struct vcpu *arch_alloc_vcpu_struct(void)
   23.47  {
   23.48 -    return xmalloc(struct exec_domain);
   23.49 +    return xmalloc(struct vcpu);
   23.50  }
   23.51  
   23.52 -void arch_free_exec_domain_struct(struct exec_domain *ed)
   23.53 +void arch_free_vcpu_struct(struct vcpu *v)
   23.54  {
   23.55 -    xfree(ed);
   23.56 +    xfree(v);
   23.57  }
   23.58  
   23.59  void free_perdomain_pt(struct domain *d)
   23.60 @@ -225,21 +225,21 @@ void free_perdomain_pt(struct domain *d)
   23.61  #endif
   23.62  }
   23.63  
   23.64 -void arch_do_createdomain(struct exec_domain *ed)
   23.65 +void arch_do_createdomain(struct vcpu *v)
   23.66  {
   23.67 -    struct domain *d = ed->domain;
   23.68 +    struct domain *d = v->domain;
   23.69  
   23.70 -    ed->arch.flags = TF_kernel_mode;
   23.71 +    v->arch.flags = TF_kernel_mode;
   23.72  
   23.73      if ( is_idle_task(d) )
   23.74          return;
   23.75  
   23.76 -    ed->arch.schedule_tail = continue_nonidle_task;
   23.77 +    v->arch.schedule_tail = continue_nonidle_task;
   23.78      
   23.79      d->shared_info = (void *)alloc_xenheap_page();
   23.80      memset(d->shared_info, 0, PAGE_SIZE);
   23.81 -    ed->vcpu_info = &d->shared_info->vcpu_data[ed->vcpu_id];
   23.82 -    ed->cpumap = CPUMAP_RUNANYWHERE;
   23.83 +    v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
   23.84 +    v->cpumap = CPUMAP_RUNANYWHERE;
   23.85      SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
   23.86      machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 
   23.87                             PAGE_SHIFT] = INVALID_M2P_ENTRY;
   23.88 @@ -248,16 +248,16 @@ void arch_do_createdomain(struct exec_do
   23.89      memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
   23.90      machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> 
   23.91                             PAGE_SHIFT] = INVALID_M2P_ENTRY;
   23.92 -    ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
   23.93 -    ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
   23.94 +    v->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
   23.95 +    v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
   23.96          l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
   23.97  
   23.98 -    ed->arch.guest_vtable  = __linear_l2_table;
   23.99 -    ed->arch.shadow_vtable = __shadow_linear_l2_table;
  23.100 +    v->arch.guest_vtable  = __linear_l2_table;
  23.101 +    v->arch.shadow_vtable = __shadow_linear_l2_table;
  23.102  
  23.103  #ifdef __x86_64__
  23.104 -    ed->arch.guest_vl3table = __linear_l3_table;
  23.105 -    ed->arch.guest_vl4table = __linear_l4_table;
  23.106 +    v->arch.guest_vl3table = __linear_l3_table;
  23.107 +    v->arch.guest_vl4table = __linear_l4_table;
  23.108      
  23.109      d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
  23.110      memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
  23.111 @@ -277,41 +277,41 @@ void arch_do_createdomain(struct exec_do
  23.112      INIT_LIST_HEAD(&d->arch.free_shadow_frames);
  23.113  }
  23.114  
  23.115 -void arch_do_boot_vcpu(struct exec_domain *ed)
  23.116 +void arch_do_boot_vcpu(struct vcpu *v)
  23.117  {
  23.118 -    struct domain *d = ed->domain;
  23.119 +    struct domain *d = v->domain;
  23.120  
  23.121 -    ed->arch.flags = TF_kernel_mode;
  23.122 +    v->arch.flags = TF_kernel_mode;
  23.123  
  23.124 -    ed->arch.schedule_tail = d->exec_domain[0]->arch.schedule_tail;
  23.125 +    v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail;
  23.126  
  23.127 -    ed->arch.perdomain_ptes =
  23.128 -        d->arch.mm_perdomain_pt + (ed->vcpu_id << PDPT_VCPU_SHIFT);
  23.129 -    ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
  23.130 +    v->arch.perdomain_ptes =
  23.131 +        d->arch.mm_perdomain_pt + (v->vcpu_id << PDPT_VCPU_SHIFT);
  23.132 +    v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
  23.133          l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
  23.134  }
  23.135  
  23.136  #ifdef CONFIG_VMX
  23.137 -void arch_vmx_do_resume(struct exec_domain *ed) 
  23.138 +void arch_vmx_do_resume(struct vcpu *v) 
  23.139  {
  23.140 -    u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->arch.arch_vmx.vmcs);
  23.141 +    u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
  23.142  
  23.143 -    load_vmcs(&ed->arch.arch_vmx, vmcs_phys_ptr);
  23.144 -    vmx_do_resume(ed);
  23.145 +    load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
  23.146 +    vmx_do_resume(v);
  23.147      reset_stack_and_jump(vmx_asm_do_resume);
  23.148  }
  23.149  
  23.150 -void arch_vmx_do_launch(struct exec_domain *ed) 
  23.151 +void arch_vmx_do_launch(struct vcpu *v) 
  23.152  {
  23.153 -    u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->arch.arch_vmx.vmcs);
  23.154 +    u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
  23.155  
  23.156 -    load_vmcs(&ed->arch.arch_vmx, vmcs_phys_ptr);
  23.157 -    vmx_do_launch(ed);
  23.158 +    load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
  23.159 +    vmx_do_launch(v);
  23.160      reset_stack_and_jump(vmx_asm_do_launch);
  23.161  }
  23.162  
  23.163  static int vmx_final_setup_guest(
  23.164 -    struct exec_domain *ed, struct vcpu_guest_context *ctxt)
  23.165 +    struct vcpu *v, struct vcpu_guest_context *ctxt)
  23.166  {
  23.167      int error;
  23.168      struct cpu_user_regs *regs;
  23.169 @@ -327,36 +327,36 @@ static int vmx_final_setup_guest(
  23.170          return -ENOMEM;
  23.171      }
  23.172  
  23.173 -    memset(&ed->arch.arch_vmx, 0, sizeof (struct arch_vmx_struct));
  23.174 +    memset(&v->arch.arch_vmx, 0, sizeof (struct arch_vmx_struct));
  23.175  
  23.176 -    ed->arch.arch_vmx.vmcs = vmcs;
  23.177 +    v->arch.arch_vmx.vmcs = vmcs;
  23.178      error = construct_vmcs(
  23.179 -        &ed->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV);
  23.180 +        &v->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV);
  23.181      if ( error < 0 )
  23.182      {
  23.183          printk("Failed to construct a new VMCS\n");
  23.184          goto out;
  23.185      }
  23.186  
  23.187 -    ed->arch.schedule_tail = arch_vmx_do_launch;
  23.188 +    v->arch.schedule_tail = arch_vmx_do_launch;
  23.189  
  23.190  #if defined (__i386)
  23.191 -    ed->arch.arch_vmx.vmx_platform.real_mode_data = 
  23.192 +    v->arch.arch_vmx.vmx_platform.real_mode_data = 
  23.193          (unsigned long *) regs->esi;
  23.194  #endif
  23.195  
  23.196 -    if (ed == ed->domain->exec_domain[0]) {
  23.197 +    if (v == v->domain->vcpu[0]) {
  23.198          /* 
  23.199           * Required to do this once per domain
  23.200           * XXX todo: add a seperate function to do these.
  23.201           */
  23.202 -        memset(&ed->domain->shared_info->evtchn_mask[0], 0xff, 
  23.203 -               sizeof(ed->domain->shared_info->evtchn_mask));
  23.204 -        clear_bit(IOPACKET_PORT, &ed->domain->shared_info->evtchn_mask[0]);
  23.205 +        memset(&v->domain->shared_info->evtchn_mask[0], 0xff, 
  23.206 +               sizeof(v->domain->shared_info->evtchn_mask));
  23.207 +        clear_bit(IOPACKET_PORT, &v->domain->shared_info->evtchn_mask[0]);
  23.208  
  23.209          /* Put the domain in shadow mode even though we're going to be using
  23.210           * the shared 1:1 page table initially. It shouldn't hurt */
  23.211 -        shadow_mode_enable(ed->domain,
  23.212 +        shadow_mode_enable(v->domain,
  23.213                             SHM_enable|SHM_refcounts|
  23.214                             SHM_translate|SHM_external);
  23.215      }
  23.216 @@ -365,7 +365,7 @@ static int vmx_final_setup_guest(
  23.217  
  23.218  out:
  23.219      free_vmcs(vmcs);
  23.220 -    ed->arch.arch_vmx.vmcs = 0;
  23.221 +    v->arch.arch_vmx.vmcs = 0;
  23.222      return error;
  23.223  }
  23.224  #endif
  23.225 @@ -373,9 +373,9 @@ out:
  23.226  
  23.227  /* This is called by arch_final_setup_guest and do_boot_vcpu */
  23.228  int arch_set_info_guest(
  23.229 -    struct exec_domain *ed, struct vcpu_guest_context *c)
  23.230 +    struct vcpu *v, struct vcpu_guest_context *c)
  23.231  {
  23.232 -    struct domain *d = ed->domain;
  23.233 +    struct domain *d = v->domain;
  23.234      unsigned long phys_basetab;
  23.235      int i, rc;
  23.236  
  23.237 @@ -391,45 +391,45 @@ int arch_set_info_guest(
  23.238                  return -EINVAL;
  23.239      }
  23.240  
  23.241 -    clear_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags);
  23.242 +    clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
  23.243      if ( c->flags & VGCF_I387_VALID )
  23.244 -        set_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags);
  23.245 +        set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
  23.246  
  23.247 -    ed->arch.flags &= ~TF_kernel_mode;
  23.248 +    v->arch.flags &= ~TF_kernel_mode;
  23.249      if ( c->flags & VGCF_IN_KERNEL )
  23.250 -        ed->arch.flags |= TF_kernel_mode;
  23.251 +        v->arch.flags |= TF_kernel_mode;
  23.252  
  23.253 -    memcpy(&ed->arch.guest_context, c, sizeof(*c));
  23.254 +    memcpy(&v->arch.guest_context, c, sizeof(*c));
  23.255  
  23.256      if ( !(c->flags & VGCF_VMX_GUEST) )
  23.257      {
  23.258          /* IOPL privileges are virtualised. */
  23.259 -        ed->arch.iopl = (ed->arch.guest_context.user_regs.eflags >> 12) & 3;
  23.260 -        ed->arch.guest_context.user_regs.eflags &= ~EF_IOPL;
  23.261 +        v->arch.iopl = (v->arch.guest_context.user_regs.eflags >> 12) & 3;
  23.262 +        v->arch.guest_context.user_regs.eflags &= ~EF_IOPL;
  23.263  
  23.264          /* Ensure real hardware interrupts are enabled. */
  23.265 -        ed->arch.guest_context.user_regs.eflags |= EF_IE;
  23.266 +        v->arch.guest_context.user_regs.eflags |= EF_IE;
  23.267      } else {
  23.268 -        __vmwrite(GUEST_EFLAGS, ed->arch.guest_context.user_regs.eflags);
  23.269 -        if (ed->arch.guest_context.user_regs.eflags & EF_TF)
  23.270 +        __vmwrite(GUEST_EFLAGS, v->arch.guest_context.user_regs.eflags);
  23.271 +        if (v->arch.guest_context.user_regs.eflags & EF_TF)
  23.272                  __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
  23.273          else 
  23.274                  __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
  23.275      }
  23.276  
  23.277 -    if ( test_bit(_VCPUF_initialised, &ed->vcpu_flags) )
  23.278 +    if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
  23.279          return 0;
  23.280  
  23.281 -    memset(ed->arch.guest_context.debugreg, 0,
  23.282 -           sizeof(ed->arch.guest_context.debugreg));
  23.283 +    memset(v->arch.guest_context.debugreg, 0,
  23.284 +           sizeof(v->arch.guest_context.debugreg));
  23.285      for ( i = 0; i < 8; i++ )
  23.286 -        (void)set_debugreg(ed, i, c->debugreg[i]);
  23.287 +        (void)set_debugreg(v, i, c->debugreg[i]);
  23.288  
  23.289 -    if ( ed->vcpu_id == 0 )
  23.290 +    if ( v->vcpu_id == 0 )
  23.291          d->vm_assist = c->vm_assist;
  23.292  
  23.293      phys_basetab = c->pt_base;
  23.294 -    ed->arch.guest_table = mk_pagetable(phys_basetab);
  23.295 +    v->arch.guest_table = mk_pagetable(phys_basetab);
  23.296  
  23.297      if ( shadow_mode_refcounts(d) )
  23.298      {
  23.299 @@ -443,7 +443,7 @@ int arch_set_info_guest(
  23.300              return -EINVAL;
  23.301      }
  23.302  
  23.303 -    if ( (rc = (int)set_gdt(ed, c->gdt_frames, c->gdt_ents)) != 0 )
  23.304 +    if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
  23.305      {
  23.306          put_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT]);
  23.307          return rc;
  23.308 @@ -461,23 +461,23 @@ int arch_set_info_guest(
  23.309          //      page table, and/or build the table itself, or ???
  23.310          //
  23.311          if ( !pagetable_get_paddr(d->arch.phys_table) )
  23.312 -            d->arch.phys_table = ed->arch.guest_table;
  23.313 +            d->arch.phys_table = v->arch.guest_table;
  23.314  
  23.315 -        if ( (error = vmx_final_setup_guest(ed, c)) )
  23.316 +        if ( (error = vmx_final_setup_guest(v, c)) )
  23.317              return error;
  23.318      }
  23.319  #endif
  23.320  
  23.321 -    update_pagetables(ed);
  23.322 +    update_pagetables(v);
  23.323      
  23.324      /* Don't redo final setup */
  23.325 -    set_bit(_VCPUF_initialised, &ed->vcpu_flags);
  23.326 +    set_bit(_VCPUF_initialised, &v->vcpu_flags);
  23.327  
  23.328      return 0;
  23.329  }
  23.330  
  23.331  
  23.332 -void new_thread(struct exec_domain *d,
  23.333 +void new_thread(struct vcpu *d,
  23.334                  unsigned long start_pc,
  23.335                  unsigned long start_stack,
  23.336                  unsigned long start_info)
  23.337 @@ -506,12 +506,12 @@ void new_thread(struct exec_domain *d,
  23.338  
  23.339  #ifdef __x86_64__
  23.340  
  23.341 -void toggle_guest_mode(struct exec_domain *ed)
  23.342 +void toggle_guest_mode(struct vcpu *v)
  23.343  {
  23.344 -    ed->arch.flags ^= TF_kernel_mode;
  23.345 +    v->arch.flags ^= TF_kernel_mode;
  23.346      __asm__ __volatile__ ( "swapgs" );
  23.347 -    update_pagetables(ed);
  23.348 -    write_ptbase(ed);
  23.349 +    update_pagetables(v);
  23.350 +    write_ptbase(v);
  23.351  }
  23.352  
  23.353  #define loadsegment(seg,value) ({               \
  23.354 @@ -530,7 +530,7 @@ void toggle_guest_mode(struct exec_domai
  23.355          : "=r" (__r) : "r" (value), "0" (__r) );\
  23.356      __r; })
  23.357  
  23.358 -static void load_segments(struct exec_domain *p, struct exec_domain *n)
  23.359 +static void load_segments(struct vcpu *p, struct vcpu *n)
  23.360  {
  23.361      struct vcpu_guest_context *pctxt = &p->arch.guest_context;
  23.362      struct vcpu_guest_context *nctxt = &n->arch.guest_context;
  23.363 @@ -632,9 +632,9 @@ static void load_segments(struct exec_do
  23.364      }
  23.365  }
  23.366  
  23.367 -static void save_segments(struct exec_domain *ed)
  23.368 +static void save_segments(struct vcpu *v)
  23.369  {
  23.370 -    struct cpu_user_regs *regs = &ed->arch.guest_context.user_regs;
  23.371 +    struct cpu_user_regs *regs = &v->arch.guest_context.user_regs;
  23.372      __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (regs->ds) );
  23.373      __asm__ __volatile__ ( "movl %%es,%0" : "=m" (regs->es) );
  23.374      __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (regs->fs) );
  23.375 @@ -657,13 +657,13 @@ long do_switch_to_user(void)
  23.376  {
  23.377      struct cpu_user_regs  *regs = guest_cpu_user_regs();
  23.378      struct switch_to_user  stu;
  23.379 -    struct exec_domain    *ed = current;
  23.380 +    struct vcpu    *v = current;
  23.381  
  23.382      if ( unlikely(copy_from_user(&stu, (void *)regs->rsp, sizeof(stu))) ||
  23.383 -         unlikely(pagetable_get_paddr(ed->arch.guest_table_user) == 0) )
  23.384 +         unlikely(pagetable_get_paddr(v->arch.guest_table_user) == 0) )
  23.385          return -EFAULT;
  23.386  
  23.387 -    toggle_guest_mode(ed);
  23.388 +    toggle_guest_mode(v);
  23.389  
  23.390      regs->rip    = stu.rip;
  23.391      regs->cs     = stu.cs | 3; /* force guest privilege */
  23.392 @@ -690,7 +690,7 @@ long do_switch_to_user(void)
  23.393  #define save_segments(_p)     ((void)0)
  23.394  #define clear_segments()      ((void)0)
  23.395  
  23.396 -static inline void switch_kernel_stack(struct exec_domain *n, unsigned int cpu)
  23.397 +static inline void switch_kernel_stack(struct vcpu *n, unsigned int cpu)
  23.398  {
  23.399      struct tss_struct *tss = &init_tss[cpu];
  23.400      tss->esp1 = n->arch.guest_context.kernel_sp;
  23.401 @@ -699,15 +699,15 @@ static inline void switch_kernel_stack(s
  23.402  
  23.403  #endif
  23.404  
  23.405 -#define loaddebug(_ed,_reg) \
  23.406 -	__asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_ed)->debugreg[_reg]))
  23.407 +#define loaddebug(_v,_reg) \
  23.408 +	__asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
  23.409  
  23.410  static void __context_switch(void)
  23.411  {
  23.412      struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
  23.413      unsigned int         cpu = smp_processor_id();
  23.414 -    struct exec_domain  *p = percpu_ctxt[cpu].curr_ed;
  23.415 -    struct exec_domain  *n = current;
  23.416 +    struct vcpu  *p = percpu_ctxt[cpu].curr_vcpu;
  23.417 +    struct vcpu  *n = current;
  23.418  
  23.419      if ( !is_idle_task(p->domain) )
  23.420      {
  23.421 @@ -759,19 +759,19 @@ static void __context_switch(void)
  23.422      if ( p->domain != n->domain )
  23.423          clear_bit(cpu, &p->domain->cpuset);
  23.424  
  23.425 -    percpu_ctxt[cpu].curr_ed = n;
  23.426 +    percpu_ctxt[cpu].curr_vcpu = n;
  23.427  }
  23.428  
  23.429  
  23.430 -void context_switch(struct exec_domain *prev, struct exec_domain *next)
  23.431 +void context_switch(struct vcpu *prev, struct vcpu *next)
  23.432  {
  23.433 -    struct exec_domain *realprev;
  23.434 +    struct vcpu *realprev;
  23.435  
  23.436      local_irq_disable();
  23.437  
  23.438      set_current(next);
  23.439  
  23.440 -    if ( ((realprev = percpu_ctxt[smp_processor_id()].curr_ed) == next) || 
  23.441 +    if ( ((realprev = percpu_ctxt[smp_processor_id()].curr_vcpu) == next) || 
  23.442           is_idle_task(next->domain) )
  23.443      {
  23.444          local_irq_enable();
  23.445 @@ -801,7 +801,7 @@ void context_switch(struct exec_domain *
  23.446      BUG();
  23.447  }
  23.448  
  23.449 -void continue_running(struct exec_domain *same)
  23.450 +void continue_running(struct vcpu *same)
  23.451  {
  23.452      schedule_tail(same);
  23.453      BUG();
  23.454 @@ -809,7 +809,7 @@ void continue_running(struct exec_domain
  23.455  
  23.456  int __sync_lazy_execstate(void)
  23.457  {
  23.458 -    if ( percpu_ctxt[smp_processor_id()].curr_ed == current )
  23.459 +    if ( percpu_ctxt[smp_processor_id()].curr_vcpu == current )
  23.460          return 0;
  23.461      __context_switch();
  23.462      load_LDT(current);
  23.463 @@ -893,20 +893,20 @@ unsigned long __hypercall_create_continu
  23.464  }
  23.465  
  23.466  #ifdef CONFIG_VMX
  23.467 -static void vmx_relinquish_resources(struct exec_domain *ed)
  23.468 +static void vmx_relinquish_resources(struct vcpu *v)
  23.469  {
  23.470 -    if ( !VMX_DOMAIN(ed) )
  23.471 +    if ( !VMX_DOMAIN(v) )
  23.472          return;
  23.473  
  23.474 -    BUG_ON(ed->arch.arch_vmx.vmcs == NULL);
  23.475 -    free_vmcs(ed->arch.arch_vmx.vmcs);
  23.476 -    ed->arch.arch_vmx.vmcs = 0;
  23.477 +    BUG_ON(v->arch.arch_vmx.vmcs == NULL);
  23.478 +    free_vmcs(v->arch.arch_vmx.vmcs);
  23.479 +    v->arch.arch_vmx.vmcs = 0;
  23.480      
  23.481 -    free_monitor_pagetable(ed);
  23.482 -    rem_ac_timer(&ed->arch.arch_vmx.vmx_platform.vmx_pit.pit_timer);
  23.483 +    free_monitor_pagetable(v);
  23.484 +    rem_ac_timer(&v->arch.arch_vmx.vmx_platform.vmx_pit.pit_timer);
  23.485  }
  23.486  #else
  23.487 -#define vmx_relinquish_resources(_ed) ((void)0)
  23.488 +#define vmx_relinquish_resources(_v) ((void)0)
  23.489  #endif
  23.490  
  23.491  static void relinquish_memory(struct domain *d, struct list_head *list)
  23.492 @@ -969,7 +969,7 @@ static void relinquish_memory(struct dom
  23.493  
  23.494  void domain_relinquish_resources(struct domain *d)
  23.495  {
  23.496 -    struct exec_domain *ed;
  23.497 +    struct vcpu *v;
  23.498  
  23.499      BUG_ON(d->cpuset != 0);
  23.500  
  23.501 @@ -981,29 +981,29 @@ void domain_relinquish_resources(struct 
  23.502      gnttab_release_dev_mappings(d->grant_table);
  23.503  
  23.504      /* Drop the in-use references to page-table bases. */
  23.505 -    for_each_exec_domain ( d, ed )
  23.506 +    for_each_vcpu ( d, v )
  23.507      {
  23.508 -        if ( pagetable_get_paddr(ed->arch.guest_table) != 0 )
  23.509 +        if ( pagetable_get_paddr(v->arch.guest_table) != 0 )
  23.510          {
  23.511              if ( shadow_mode_refcounts(d) )
  23.512 -                put_page(&frame_table[pagetable_get_pfn(ed->arch.guest_table)]);
  23.513 +                put_page(&frame_table[pagetable_get_pfn(v->arch.guest_table)]);
  23.514              else
  23.515 -                put_page_and_type(&frame_table[pagetable_get_pfn(ed->arch.guest_table)]);
  23.516 +                put_page_and_type(&frame_table[pagetable_get_pfn(v->arch.guest_table)]);
  23.517  
  23.518 -            ed->arch.guest_table = mk_pagetable(0);
  23.519 +            v->arch.guest_table = mk_pagetable(0);
  23.520          }
  23.521  
  23.522 -        if ( pagetable_get_paddr(ed->arch.guest_table_user) != 0 )
  23.523 +        if ( pagetable_get_paddr(v->arch.guest_table_user) != 0 )
  23.524          {
  23.525              if ( shadow_mode_refcounts(d) )
  23.526 -                put_page(&frame_table[pagetable_get_pfn(ed->arch.guest_table_user)]);
  23.527 +                put_page(&frame_table[pagetable_get_pfn(v->arch.guest_table_user)]);
  23.528              else
  23.529 -                put_page_and_type(&frame_table[pagetable_get_pfn(ed->arch.guest_table_user)]);
  23.530 +                put_page_and_type(&frame_table[pagetable_get_pfn(v->arch.guest_table_user)]);
  23.531  
  23.532 -            ed->arch.guest_table_user = mk_pagetable(0);
  23.533 +            v->arch.guest_table_user = mk_pagetable(0);
  23.534          }
  23.535  
  23.536 -        vmx_relinquish_resources(ed);
  23.537 +        vmx_relinquish_resources(v);
  23.538      }
  23.539  
  23.540      shadow_mode_disable(d);
  23.541 @@ -1012,8 +1012,8 @@ void domain_relinquish_resources(struct 
  23.542       * Relinquish GDT mappings. No need for explicit unmapping of the LDT as 
  23.543       * it automatically gets squashed when the guest's mappings go away.
  23.544       */
  23.545 -    for_each_exec_domain(d, ed)
  23.546 -        destroy_gdt(ed);
  23.547 +    for_each_vcpu(d, v)
  23.548 +        destroy_gdt(v);
  23.549  
  23.550      /* Relinquish every page of memory. */
  23.551      relinquish_memory(d, &d->xenpage_list);
    24.1 --- a/xen/arch/x86/domain_build.c	Thu Jun 02 19:19:24 2005 +0000
    24.2 +++ b/xen/arch/x86/domain_build.c	Thu Jun 02 21:05:33 2005 +0000
    24.3 @@ -84,7 +84,7 @@ int construct_dom0(struct domain *d,
    24.4      unsigned long count;
    24.5      struct pfn_info *page = NULL;
    24.6      start_info_t *si;
    24.7 -    struct exec_domain *ed = d->exec_domain[0];
    24.8 +    struct vcpu *v = d->vcpu[0];
    24.9  #if defined(__i386__)
   24.10      char *image_start  = (char *)_image_start;  /* use lowmem mappings */
   24.11      char *initrd_start = (char *)_initrd_start; /* use lowmem mappings */
   24.12 @@ -238,14 +238,14 @@ int construct_dom0(struct domain *d,
   24.13       * We're basically forcing default RPLs to 1, so that our "what privilege
   24.14       * level are we returning to?" logic works.
   24.15       */
   24.16 -    ed->arch.guest_context.kernel_ss = FLAT_KERNEL_SS;
   24.17 +    v->arch.guest_context.kernel_ss = FLAT_KERNEL_SS;
   24.18      for ( i = 0; i < 256; i++ ) 
   24.19 -        ed->arch.guest_context.trap_ctxt[i].cs = FLAT_KERNEL_CS;
   24.20 +        v->arch.guest_context.trap_ctxt[i].cs = FLAT_KERNEL_CS;
   24.21  
   24.22  #if defined(__i386__)
   24.23  
   24.24 -    ed->arch.guest_context.failsafe_callback_cs = FLAT_KERNEL_CS;
   24.25 -    ed->arch.guest_context.event_callback_cs    = FLAT_KERNEL_CS;
   24.26 +    v->arch.guest_context.failsafe_callback_cs = FLAT_KERNEL_CS;
   24.27 +    v->arch.guest_context.event_callback_cs    = FLAT_KERNEL_CS;
   24.28  
   24.29      /*
   24.30       * Protect the lowest 1GB of memory. We use a temporary mapping there
   24.31 @@ -267,14 +267,17 @@ int construct_dom0(struct domain *d,
   24.32          l2tab[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT)+i] =
   24.33              l2e_from_paddr((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR);
   24.34      }
   24.35 -    unsigned long v;
   24.36 -    for (v = PERDOMAIN_VIRT_START; v < PERDOMAIN_VIRT_END;
   24.37 -         v += (1 << L2_PAGETABLE_SHIFT)) {
   24.38 -        l2tab[v >> L2_PAGETABLE_SHIFT] =
   24.39 -            l2e_from_paddr(__pa(d->arch.mm_perdomain_pt) + (v-PERDOMAIN_VIRT_START),
   24.40 -                            __PAGE_HYPERVISOR);
   24.41 +    {
   24.42 +        unsigned long va;
   24.43 +        for (va = PERDOMAIN_VIRT_START; va < PERDOMAIN_VIRT_END;
   24.44 +             va += (1 << L2_PAGETABLE_SHIFT)) {
   24.45 +            l2tab[va >> L2_PAGETABLE_SHIFT] =
   24.46 +                l2e_from_paddr(__pa(d->arch.mm_perdomain_pt) +
   24.47 +                               (va-PERDOMAIN_VIRT_START),
   24.48 +                               __PAGE_HYPERVISOR);
   24.49 +        }
   24.50      }
   24.51 -    ed->arch.guest_table = mk_pagetable((unsigned long)l3start);
   24.52 +    v->arch.guest_table = mk_pagetable((unsigned long)l3start);
   24.53  #else
   24.54      l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
   24.55      memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
   24.56 @@ -282,7 +285,7 @@ int construct_dom0(struct domain *d,
   24.57          l2e_from_paddr((unsigned long)l2start, __PAGE_HYPERVISOR);
   24.58      l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
   24.59          l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
   24.60 -    ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
   24.61 +    v->arch.guest_table = mk_pagetable((unsigned long)l2start);
   24.62  #endif
   24.63  
   24.64      l2tab += l2_linear_offset(dsi.v_start);
   24.65 @@ -405,7 +408,7 @@ int construct_dom0(struct domain *d,
   24.66          l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR);
   24.67      l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
   24.68          l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
   24.69 -    ed->arch.guest_table = mk_pagetable(__pa(l4start));
   24.70 +    v->arch.guest_table = mk_pagetable(__pa(l4start));
   24.71  
   24.72      l4tab += l4_table_offset(dsi.v_start);
   24.73      mfn = alloc_start >> PAGE_SHIFT;
   24.74 @@ -498,11 +501,11 @@ int construct_dom0(struct domain *d,
   24.75      d->shared_info->n_vcpu = num_online_cpus();
   24.76  
   24.77      /* Set up monitor table */
   24.78 -    update_pagetables(ed);
   24.79 +    update_pagetables(v);
   24.80  
   24.81      /* Install the new page tables. */
   24.82      local_irq_disable();
   24.83 -    write_ptbase(ed);
   24.84 +    write_ptbase(v);
   24.85  
   24.86      /* Copy the OS image and free temporary buffer. */
   24.87      (void)loadelfimage(&dsi);
   24.88 @@ -604,7 +607,7 @@ int construct_dom0(struct domain *d,
   24.89  
   24.90      set_bit(_DOMF_constructed, &d->domain_flags);
   24.91  
   24.92 -    new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start);
   24.93 +    new_thread(v, dsi.v_kernentry, vstack_end, vstartinfo_start);
   24.94  
   24.95      if ( opt_dom0_shadow || opt_dom0_translate )
   24.96      {
   24.97 @@ -638,13 +641,13 @@ int construct_dom0(struct domain *d,
   24.98              idle_pg_table[1] = root_from_paddr(
   24.99                  pagetable_get_paddr(d->arch.phys_table), __PAGE_HYPERVISOR);
  24.100              translate_l2pgtable(d, (l1_pgentry_t *)(1u << L2_PAGETABLE_SHIFT),
  24.101 -                                pagetable_get_pfn(ed->arch.guest_table));
  24.102 +                                pagetable_get_pfn(v->arch.guest_table));
  24.103              idle_pg_table[1] = root_empty();
  24.104              local_flush_tlb();
  24.105  #endif
  24.106          }
  24.107  
  24.108 -        update_pagetables(ed); /* XXX SMP */
  24.109 +        update_pagetables(v); /* XXX SMP */
  24.110      }
  24.111  
  24.112      return 0;
    25.1 --- a/xen/arch/x86/i387.c	Thu Jun 02 19:19:24 2005 +0000
    25.2 +++ b/xen/arch/x86/i387.c	Thu Jun 02 21:05:33 2005 +0000
    25.3 @@ -21,7 +21,7 @@ void init_fpu(void)
    25.4      set_bit(_VCPUF_fpu_initialised, &current->vcpu_flags);
    25.5  }
    25.6  
    25.7 -void save_init_fpu(struct exec_domain *tsk)
    25.8 +void save_init_fpu(struct vcpu *tsk)
    25.9  {
   25.10      /*
   25.11       * The guest OS may have set the 'virtual STTS' flag.
   25.12 @@ -44,7 +44,7 @@ void save_init_fpu(struct exec_domain *t
   25.13      stts();
   25.14  }
   25.15  
   25.16 -void restore_fpu(struct exec_domain *tsk)
   25.17 +void restore_fpu(struct vcpu *tsk)
   25.18  {
   25.19      /*
   25.20       * FXRSTOR can fault if passed a corrupted data block. We handle this
    26.1 --- a/xen/arch/x86/idle0_task.c	Thu Jun 02 19:19:24 2005 +0000
    26.2 +++ b/xen/arch/x86/idle0_task.c	Thu Jun 02 21:05:33 2005 +0000
    26.3 @@ -9,7 +9,7 @@ struct domain idle0_domain = {
    26.4      refcnt:      ATOMIC_INIT(1)
    26.5  };
    26.6  
    26.7 -struct exec_domain idle0_exec_domain = {
    26.8 +struct vcpu idle0_vcpu = {
    26.9      processor:   0,
   26.10      domain:      &idle0_domain
   26.11  };
    27.1 --- a/xen/arch/x86/irq.c	Thu Jun 02 19:19:24 2005 +0000
    27.2 +++ b/xen/arch/x86/irq.c	Thu Jun 02 21:05:33 2005 +0000
    27.3 @@ -183,22 +183,22 @@ typedef struct {
    27.4      u8 nr_guests;
    27.5      u8 in_flight;
    27.6      u8 shareable;
    27.7 -    struct exec_domain *guest[IRQ_MAX_GUESTS];
    27.8 +    struct vcpu *guest[IRQ_MAX_GUESTS];
    27.9  } irq_guest_action_t;
   27.10  
   27.11  static void __do_IRQ_guest(int irq)
   27.12  {
   27.13      irq_desc_t         *desc = &irq_desc[irq];
   27.14      irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
   27.15 -    struct exec_domain *ed;
   27.16 +    struct vcpu        *v;
   27.17      int                 i;
   27.18  
   27.19      for ( i = 0; i < action->nr_guests; i++ )
   27.20      {
   27.21 -        ed = action->guest[i];
   27.22 -        if ( !test_and_set_bit(irq, &ed->domain->pirq_mask) )
   27.23 +        v = action->guest[i];
   27.24 +        if ( !test_and_set_bit(irq, &v->domain->pirq_mask) )
   27.25              action->in_flight++;
   27.26 -        send_guest_pirq(ed, irq);
   27.27 +        send_guest_pirq(v, irq);
   27.28      }
   27.29  }
   27.30  
   27.31 @@ -230,9 +230,9 @@ int pirq_guest_unmask(struct domain *d)
   27.32      return 0;
   27.33  }
   27.34  
   27.35 -int pirq_guest_bind(struct exec_domain *ed, int irq, int will_share)
   27.36 +int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
   27.37  {
   27.38 -    struct domain      *d = ed->domain;
   27.39 +    struct domain      *d = v->domain;
   27.40      irq_desc_t         *desc = &irq_desc[irq];
   27.41      irq_guest_action_t *action;
   27.42      unsigned long       flags;
   27.43 @@ -274,7 +274,7 @@ int pirq_guest_bind(struct exec_domain *
   27.44          desc->handler->startup(irq);
   27.45  
   27.46          /* Attempt to bind the interrupt target to the correct CPU. */
   27.47 -        cpu_set(ed->processor, cpumask);
   27.48 +        cpu_set(v->processor, cpumask);
   27.49          if ( desc->handler->set_affinity != NULL )
   27.50              desc->handler->set_affinity(irq, cpumask);
   27.51      }
   27.52 @@ -293,7 +293,7 @@ int pirq_guest_bind(struct exec_domain *
   27.53          goto out;
   27.54      }
   27.55  
   27.56 -    action->guest[action->nr_guests++] = ed;
   27.57 +    action->guest[action->nr_guests++] = v;
   27.58  
   27.59   out:
   27.60      spin_unlock_irqrestore(&desc->lock, flags);
    28.1 --- a/xen/arch/x86/mm.c	Thu Jun 02 19:19:24 2005 +0000
    28.2 +++ b/xen/arch/x86/mm.c	Thu Jun 02 21:05:33 2005 +0000
    28.3 @@ -232,35 +232,35 @@ void arch_init_memory(void)
    28.4      subarch_init_memory(dom_xen);
    28.5  }
    28.6  
    28.7 -void write_ptbase(struct exec_domain *ed)
    28.8 +void write_ptbase(struct vcpu *v)
    28.9  {
   28.10 -    write_cr3(pagetable_get_paddr(ed->arch.monitor_table));
   28.11 +    write_cr3(pagetable_get_paddr(v->arch.monitor_table));
   28.12  }
   28.13  
   28.14 -void invalidate_shadow_ldt(struct exec_domain *d)
   28.15 +void invalidate_shadow_ldt(struct vcpu *v)
   28.16  {
   28.17      int i;
   28.18      unsigned long pfn;
   28.19      struct pfn_info *page;
   28.20      
   28.21 -    if ( d->arch.shadow_ldt_mapcnt == 0 )
   28.22 +    if ( v->arch.shadow_ldt_mapcnt == 0 )
   28.23          return;
   28.24  
   28.25 -    d->arch.shadow_ldt_mapcnt = 0;
   28.26 +    v->arch.shadow_ldt_mapcnt = 0;
   28.27  
   28.28      for ( i = 16; i < 32; i++ )
   28.29      {
   28.30 -        pfn = l1e_get_pfn(d->arch.perdomain_ptes[i]);
   28.31 +        pfn = l1e_get_pfn(v->arch.perdomain_ptes[i]);
   28.32          if ( pfn == 0 ) continue;
   28.33 -        d->arch.perdomain_ptes[i] = l1e_empty();
   28.34 +        v->arch.perdomain_ptes[i] = l1e_empty();
   28.35          page = &frame_table[pfn];
   28.36          ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
   28.37 -        ASSERT_PAGE_IS_DOMAIN(page, d->domain);
   28.38 +        ASSERT_PAGE_IS_DOMAIN(page, v->domain);
   28.39          put_page_and_type(page);
   28.40      }
   28.41  
   28.42      /* Dispose of the (now possibly invalid) mappings from the TLB.  */
   28.43 -    percpu_info[d->processor].deferred_ops |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
   28.44 +    percpu_info[v->processor].deferred_ops |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
   28.45  }
   28.46  
   28.47  
   28.48 @@ -287,25 +287,25 @@ static int alloc_segdesc_page(struct pfn
   28.49  /* Map shadow page at offset @off. */
   28.50  int map_ldt_shadow_page(unsigned int off)
   28.51  {
   28.52 -    struct exec_domain *ed = current;
   28.53 -    struct domain *d = ed->domain;
   28.54 +    struct vcpu *v = current;
   28.55 +    struct domain *d = v->domain;
   28.56      unsigned long gpfn, gmfn;
   28.57      l1_pgentry_t l1e, nl1e;
   28.58 -    unsigned gva = ed->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
   28.59 +    unsigned gva = v->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
   28.60      int res;
   28.61  
   28.62  #if defined(__x86_64__)
   28.63      /* If in user mode, switch to kernel mode just to read LDT mapping. */
   28.64 -    extern void toggle_guest_mode(struct exec_domain *);
   28.65 -    int user_mode = !(ed->arch.flags & TF_kernel_mode);
   28.66 -#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(ed)
   28.67 +    extern void toggle_guest_mode(struct vcpu *);
   28.68 +    int user_mode = !(v->arch.flags & TF_kernel_mode);
   28.69 +#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
   28.70  #elif defined(__i386__)
   28.71  #define TOGGLE_MODE() ((void)0)
   28.72  #endif
   28.73  
   28.74      BUG_ON(unlikely(in_irq()));
   28.75  
   28.76 -    shadow_sync_va(ed, gva);
   28.77 +    shadow_sync_va(v, gva);
   28.78  
   28.79      TOGGLE_MODE();
   28.80      __copy_from_user(&l1e, &linear_pg_table[l1_linear_offset(gva)],
   28.81 @@ -335,8 +335,8 @@ int map_ldt_shadow_page(unsigned int off
   28.82  
   28.83      nl1e = l1e_from_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
   28.84  
   28.85 -    ed->arch.perdomain_ptes[off + 16] = nl1e;
   28.86 -    ed->arch.shadow_ldt_mapcnt++;
   28.87 +    v->arch.perdomain_ptes[off + 16] = nl1e;
   28.88 +    v->arch.shadow_ldt_mapcnt++;
   28.89  
   28.90      return 1;
   28.91  }
   28.92 @@ -615,7 +615,7 @@ void put_page_from_l1e(l1_pgentry_t l1e,
   28.93               unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) )
   28.94  
   28.95              // XXX SMP BUG?
   28.96 -            invalidate_shadow_ldt(e->exec_domain[0]);
   28.97 +            invalidate_shadow_ldt(e->vcpu[0]);
   28.98          put_page(page);
   28.99      }
  28.100  }
  28.101 @@ -1433,8 +1433,8 @@ int get_page_type(struct pfn_info *page,
  28.102  
  28.103  int new_guest_cr3(unsigned long mfn)
  28.104  {
  28.105 -    struct exec_domain *ed = current;
  28.106 -    struct domain *d = ed->domain;
  28.107 +    struct vcpu *v = current;
  28.108 +    struct domain *d = v->domain;
  28.109      int okay;
  28.110      unsigned long old_base_mfn;
  28.111  
  28.112 @@ -1445,13 +1445,13 @@ int new_guest_cr3(unsigned long mfn)
  28.113  
  28.114      if ( likely(okay) )
  28.115      {
  28.116 -        invalidate_shadow_ldt(ed);
  28.117 -
  28.118 -        old_base_mfn = pagetable_get_pfn(ed->arch.guest_table);
  28.119 -        ed->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
  28.120 -        update_pagetables(ed); /* update shadow_table and monitor_table */
  28.121 -
  28.122 -        write_ptbase(ed);
  28.123 +        invalidate_shadow_ldt(v);
  28.124 +
  28.125 +        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  28.126 +        v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
  28.127 +        update_pagetables(v); /* update shadow_table and monitor_table */
  28.128 +
  28.129 +        write_ptbase(v);
  28.130  
  28.131          if ( shadow_mode_refcounts(d) )
  28.132              put_page(&frame_table[old_base_mfn]);
  28.133 @@ -1461,12 +1461,12 @@ int new_guest_cr3(unsigned long mfn)
  28.134          /* CR3 also holds a ref to its shadow... */
  28.135          if ( shadow_mode_enabled(d) )
  28.136          {
  28.137 -            if ( ed->arch.monitor_shadow_ref )
  28.138 -                put_shadow_ref(ed->arch.monitor_shadow_ref);
  28.139 -            ed->arch.monitor_shadow_ref =
  28.140 -                pagetable_get_pfn(ed->arch.monitor_table);
  28.141 -            ASSERT(!page_get_owner(&frame_table[ed->arch.monitor_shadow_ref]));
  28.142 -            get_shadow_ref(ed->arch.monitor_shadow_ref);
  28.143 +            if ( v->arch.monitor_shadow_ref )
  28.144 +                put_shadow_ref(v->arch.monitor_shadow_ref);
  28.145 +            v->arch.monitor_shadow_ref =
  28.146 +                pagetable_get_pfn(v->arch.monitor_table);
  28.147 +            ASSERT(!page_get_owner(&frame_table[v->arch.monitor_shadow_ref]));
  28.148 +            get_shadow_ref(v->arch.monitor_shadow_ref);
  28.149          }
  28.150      }
  28.151      else
  28.152 @@ -1560,15 +1560,15 @@ static inline unsigned long vcpuset_to_p
  28.153  {
  28.154      unsigned int  vcpu;
  28.155      unsigned long pset = 0;
  28.156 -    struct exec_domain *ed;
  28.157 +    struct vcpu *v;
  28.158  
  28.159      while ( vset != 0 )
  28.160      {
  28.161          vcpu = find_first_set_bit(vset);
  28.162          vset &= ~(1UL << vcpu);
  28.163          if ( (vcpu < MAX_VIRT_CPUS) &&
  28.164 -             ((ed = d->exec_domain[vcpu]) != NULL) )
  28.165 -            pset |= 1UL << ed->processor;
  28.166 +             ((v = d->vcpu[vcpu]) != NULL) )
  28.167 +            pset |= 1UL << v->processor;
  28.168      }
  28.169  
  28.170      return pset;
  28.171 @@ -1584,8 +1584,8 @@ int do_mmuext_op(
  28.172      int rc = 0, i = 0, okay, cpu = smp_processor_id();
  28.173      unsigned int type, done = 0;
  28.174      struct pfn_info *page;
  28.175 -    struct exec_domain *ed = current;
  28.176 -    struct domain *d = ed->domain, *e;
  28.177 +    struct vcpu *v = current;
  28.178 +    struct domain *d = v->domain, *e;
  28.179      u32 x, y, _d, _nd;
  28.180  
  28.181      LOCK_BIGLOCK(d);
  28.182 @@ -1710,8 +1710,8 @@ int do_mmuext_op(
  28.183              else
  28.184              {
  28.185                  unsigned long old_mfn =
  28.186 -                    pagetable_get_pfn(ed->arch.guest_table_user);
  28.187 -                ed->arch.guest_table_user = mk_pagetable(op.mfn << PAGE_SHIFT);
  28.188 +                    pagetable_get_pfn(v->arch.guest_table_user);
  28.189 +                v->arch.guest_table_user = mk_pagetable(op.mfn << PAGE_SHIFT);
  28.190                  if ( old_mfn != 0 )
  28.191                      put_page_and_type(&frame_table[old_mfn]);
  28.192              }
  28.193 @@ -1724,7 +1724,7 @@ int do_mmuext_op(
  28.194      
  28.195          case MMUEXT_INVLPG_LOCAL:
  28.196              if ( shadow_mode_enabled(d) )
  28.197 -                shadow_invlpg(ed, op.linear_addr);
  28.198 +                shadow_invlpg(v, op.linear_addr);
  28.199              local_flush_tlb_one(op.linear_addr);
  28.200              break;
  28.201  
  28.202 @@ -1792,13 +1792,13 @@ int do_mmuext_op(
  28.203                  okay = 0;
  28.204                  MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents);
  28.205              }
  28.206 -            else if ( (ed->arch.guest_context.ldt_ents != ents) || 
  28.207 -                      (ed->arch.guest_context.ldt_base != ptr) )
  28.208 +            else if ( (v->arch.guest_context.ldt_ents != ents) || 
  28.209 +                      (v->arch.guest_context.ldt_base != ptr) )
  28.210              {
  28.211 -                invalidate_shadow_ldt(ed);
  28.212 -                ed->arch.guest_context.ldt_base = ptr;
  28.213 -                ed->arch.guest_context.ldt_ents = ents;
  28.214 -                load_LDT(ed);
  28.215 +                invalidate_shadow_ldt(v);
  28.216 +                v->arch.guest_context.ldt_base = ptr;
  28.217 +                v->arch.guest_context.ldt_ents = ents;
  28.218 +                load_LDT(v);
  28.219                  percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT;
  28.220                  if ( ents != 0 )
  28.221                      percpu_info[cpu].deferred_ops |= DOP_RELOAD_LDT;
  28.222 @@ -1943,8 +1943,8 @@ int do_mmu_update(
  28.223      struct pfn_info *page;
  28.224      int rc = 0, okay = 1, i = 0, cpu = smp_processor_id();
  28.225      unsigned int cmd, done = 0;
  28.226 -    struct exec_domain *ed = current;
  28.227 -    struct domain *d = ed->domain;
  28.228 +    struct vcpu *v = current;
  28.229 +    struct domain *d = v->domain;
  28.230      u32 type_info;
  28.231      struct map_dom_mem_cache mapcache, sh_mapcache;
  28.232  
  28.233 @@ -1953,7 +1953,7 @@ int do_mmu_update(
  28.234      cleanup_writable_pagetable(d);
  28.235  
  28.236      if ( unlikely(shadow_mode_enabled(d)) )
  28.237 -        check_pagetable(ed, "pre-mmu"); /* debug */
  28.238 +        check_pagetable(v, "pre-mmu"); /* debug */
  28.239  
  28.240      if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
  28.241      {
  28.242 @@ -2096,7 +2096,7 @@ int do_mmu_update(
  28.243                          if ( page_is_page_table(page) &&
  28.244                               !page_out_of_sync(page) )
  28.245                          {
  28.246 -                            shadow_mark_mfn_out_of_sync(ed, gpfn, mfn);
  28.247 +                            shadow_mark_mfn_out_of_sync(v, gpfn, mfn);
  28.248                          }
  28.249                      }
  28.250  
  28.251 @@ -2185,7 +2185,7 @@ int do_mmu_update(
  28.252          __put_user(done + i, pdone);
  28.253  
  28.254      if ( unlikely(shadow_mode_enabled(d)) )
  28.255 -        check_pagetable(ed, "post-mmu"); /* debug */
  28.256 +        check_pagetable(v, "post-mmu"); /* debug */
  28.257  
  28.258      UNLOCK_BIGLOCK(d);
  28.259      return rc;
  28.260 @@ -2197,7 +2197,7 @@ int do_mmu_update(
  28.261  int update_grant_va_mapping(unsigned long va,
  28.262                              l1_pgentry_t _nl1e, 
  28.263                              struct domain *d,
  28.264 -                            struct exec_domain *ed)
  28.265 +                            struct vcpu *v)
  28.266  {
  28.267      /* Caller must:
  28.268       * . own d's BIGLOCK 
  28.269 @@ -2216,7 +2216,7 @@ int update_grant_va_mapping(unsigned lon
  28.270      // just everything involved in getting to this L1 (i.e. we need
  28.271      // linear_pg_table[l1_linear_offset(va)] to be in sync)...
  28.272      //
  28.273 -    __shadow_sync_va(ed, va);
  28.274 +    __shadow_sync_va(v, va);
  28.275  
  28.276      pl1e = &linear_pg_table[l1_linear_offset(va)];
  28.277  
  28.278 @@ -2242,7 +2242,7 @@ int update_grant_va_mapping(unsigned lon
  28.279      }
  28.280  
  28.281      if ( unlikely(shadow_mode_enabled(d)) )
  28.282 -        shadow_do_update_va_mapping(va, _nl1e, ed);
  28.283 +        shadow_do_update_va_mapping(va, _nl1e, v);
  28.284  
  28.285      return rc;
  28.286  }
  28.287 @@ -2252,12 +2252,12 @@ int do_update_va_mapping(unsigned long v
  28.288                           unsigned long val32,
  28.289                           unsigned long flags)
  28.290  {
  28.291 -    l1_pgentry_t       val  = l1e_from_intpte(val32);
  28.292 -    struct exec_domain *ed  = current;
  28.293 -    struct domain      *d   = ed->domain;
  28.294 -    unsigned int        cpu = ed->processor;
  28.295 -    unsigned long       vset, pset, bmap_ptr;
  28.296 -    int                 rc = 0;
  28.297 +    l1_pgentry_t   val = l1e_from_intpte(val32);
  28.298 +    struct vcpu   *v   = current;
  28.299 +    struct domain *d   = v->domain;
  28.300 +    unsigned int   cpu = v->processor;
  28.301 +    unsigned long  vset, pset, bmap_ptr;
  28.302 +    int            rc  = 0;
  28.303  
  28.304      perfc_incrc(calls_to_update_va);
  28.305  
  28.306 @@ -2269,7 +2269,7 @@ int do_update_va_mapping(unsigned long v
  28.307      cleanup_writable_pagetable(d);
  28.308  
  28.309      if ( unlikely(shadow_mode_enabled(d)) )
  28.310 -        check_pagetable(ed, "pre-va"); /* debug */
  28.311 +        check_pagetable(v, "pre-va"); /* debug */
  28.312  
  28.313      if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
  28.314                                  val)) )
  28.315 @@ -2288,9 +2288,9 @@ int do_update_va_mapping(unsigned long v
  28.316              domain_crash();
  28.317          }
  28.318      
  28.319 -        rc = shadow_do_update_va_mapping(va, val, ed);
  28.320 -
  28.321 -        check_pagetable(ed, "post-va"); /* debug */
  28.322 +        rc = shadow_do_update_va_mapping(va, val, v);
  28.323 +
  28.324 +        check_pagetable(v, "post-va"); /* debug */
  28.325      }
  28.326  
  28.327      switch ( flags & UVMF_FLUSHTYPE_MASK )
  28.328 @@ -2376,27 +2376,27 @@ int do_update_va_mapping_otherdomain(uns
  28.329   * Descriptor Tables
  28.330   */
  28.331  
  28.332 -void destroy_gdt(struct exec_domain *ed)
  28.333 +void destroy_gdt(struct vcpu *v)
  28.334  {
  28.335      int i;
  28.336      unsigned long pfn;
  28.337  
  28.338 -    ed->arch.guest_context.gdt_ents = 0;
  28.339 +    v->arch.guest_context.gdt_ents = 0;
  28.340      for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
  28.341      {
  28.342 -        if ( (pfn = l1e_get_pfn(ed->arch.perdomain_ptes[i])) != 0 )
  28.343 +        if ( (pfn = l1e_get_pfn(v->arch.perdomain_ptes[i])) != 0 )
  28.344              put_page_and_type(&frame_table[pfn]);
  28.345 -        ed->arch.perdomain_ptes[i] = l1e_empty();
  28.346 -        ed->arch.guest_context.gdt_frames[i] = 0;
  28.347 +        v->arch.perdomain_ptes[i] = l1e_empty();
  28.348 +        v->arch.guest_context.gdt_frames[i] = 0;
  28.349      }
  28.350  }
  28.351  
  28.352  
  28.353 -long set_gdt(struct exec_domain *ed, 
  28.354 +long set_gdt(struct vcpu *v, 
  28.355               unsigned long *frames,
  28.356               unsigned int entries)
  28.357  {
  28.358 -    struct domain *d = ed->domain;
  28.359 +    struct domain *d = v->domain;
  28.360      /* NB. There are 512 8-byte entries per GDT page. */
  28.361      int i, nr_pages = (entries + 511) / 512;
  28.362      unsigned long pfn;
  28.363 @@ -2413,14 +2413,14 @@ long set_gdt(struct exec_domain *ed,
  28.364              goto fail;
  28.365  
  28.366      /* Tear down the old GDT. */
  28.367 -    destroy_gdt(ed);
  28.368 +    destroy_gdt(v);
  28.369  
  28.370      /* Install the new GDT. */
  28.371 -    ed->arch.guest_context.gdt_ents = entries;
  28.372 +    v->arch.guest_context.gdt_ents = entries;
  28.373      for ( i = 0; i < nr_pages; i++ )
  28.374      {
  28.375 -        ed->arch.guest_context.gdt_frames[i] = frames[i];
  28.376 -        ed->arch.perdomain_ptes[i] =
  28.377 +        v->arch.guest_context.gdt_frames[i] = frames[i];
  28.378 +        v->arch.perdomain_ptes[i] =
  28.379              l1e_from_pfn(frames[i], __PAGE_HYPERVISOR);
  28.380      }
  28.381  
  28.382 @@ -2610,8 +2610,8 @@ void ptwr_flush(struct domain *d, const 
  28.383  
  28.384      ASSERT(!shadow_mode_enabled(d));
  28.385  
  28.386 -    if ( unlikely(d->arch.ptwr[which].ed != current) )
  28.387 -        write_ptbase(d->arch.ptwr[which].ed);
  28.388 +    if ( unlikely(d->arch.ptwr[which].vcpu != current) )
  28.389 +        write_ptbase(d->arch.ptwr[which].vcpu);
  28.390  
  28.391      l1va = d->arch.ptwr[which].l1va;
  28.392      ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
  28.393 @@ -2676,7 +2676,7 @@ void ptwr_flush(struct domain *d, const 
  28.394  
  28.395      d->arch.ptwr[which].l1va = 0;
  28.396  
  28.397 -    if ( unlikely(d->arch.ptwr[which].ed != current) )
  28.398 +    if ( unlikely(d->arch.ptwr[which].vcpu != current) )
  28.399          write_ptbase(current);
  28.400  }
  28.401  
  28.402 @@ -2871,7 +2871,7 @@ int ptwr_do_page_fault(struct domain *d,
  28.403       * If this is a multi-processor guest then ensure that the page is hooked
  28.404       * into at most one L2 table, which must be the one running on this VCPU.
  28.405       */
  28.406 -    if ( (d->exec_domain[0]->next_in_list != NULL) &&
  28.407 +    if ( (d->vcpu[0]->next_in_list != NULL) &&
  28.408           ((page->u.inuse.type_info & PGT_count_mask) != 
  28.409            (!!(page->u.inuse.type_info & PGT_pinned) +
  28.410             (which == PTWR_PT_ACTIVE))) )
  28.411 @@ -2905,7 +2905,7 @@ int ptwr_do_page_fault(struct domain *d,
  28.412  
  28.413      d->arch.ptwr[which].l1va   = addr | 1;
  28.414      d->arch.ptwr[which].l2_idx = l2_idx;
  28.415 -    d->arch.ptwr[which].ed     = current;
  28.416 +    d->arch.ptwr[which].vcpu   = current;
  28.417      
  28.418      /* For safety, disconnect the L1 p.t. page from current space. */
  28.419      if ( which == PTWR_PT_ACTIVE )
    29.1 --- a/xen/arch/x86/setup.c	Thu Jun 02 19:19:24 2005 +0000
    29.2 +++ b/xen/arch/x86/setup.c	Thu Jun 02 21:05:33 2005 +0000
    29.3 @@ -95,7 +95,7 @@ unsigned long mmu_cr4_features = X86_CR4
    29.4  #endif
    29.5  EXPORT_SYMBOL(mmu_cr4_features);
    29.6  
    29.7 -struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
    29.8 +struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu };
    29.9  
   29.10  int acpi_disabled;
   29.11  
   29.12 @@ -252,7 +252,7 @@ void __init __start_xen(multiboot_info_t
   29.13          cmdline_parse(__va(mbi->cmdline));
   29.14  
   29.15      /* Must do this early -- e.g., spinlocks rely on get_current(). */
   29.16 -    set_current(&idle0_exec_domain);
   29.17 +    set_current(&idle0_vcpu);
   29.18      set_processor_id(0);
   29.19  
   29.20      smp_prepare_boot_cpu();
    30.1 --- a/xen/arch/x86/shadow.c	Thu Jun 02 19:19:24 2005 +0000
    30.2 +++ b/xen/arch/x86/shadow.c	Thu Jun 02 21:05:33 2005 +0000
    30.3 @@ -553,7 +553,7 @@ static void free_shadow_pages(struct dom
    30.4  {
    30.5      int                   i;
    30.6      struct shadow_status *x;
    30.7 -    struct exec_domain   *ed;
    30.8 +    struct vcpu          *v;
    30.9   
   30.10      /*
   30.11       * WARNING! The shadow page table must not currently be in use!
   30.12 @@ -568,21 +568,21 @@ static void free_shadow_pages(struct dom
   30.13      //
   30.14      free_out_of_sync_state(d);
   30.15  
   30.16 -    // second, remove any outstanding refs from ed->arch.shadow_table
   30.17 +    // second, remove any outstanding refs from v->arch.shadow_table
   30.18      // and CR3.
   30.19      //
   30.20 -    for_each_exec_domain(d, ed)
   30.21 +    for_each_vcpu(d, v)
   30.22      {
   30.23 -        if ( pagetable_get_paddr(ed->arch.shadow_table) )
   30.24 +        if ( pagetable_get_paddr(v->arch.shadow_table) )
   30.25          {
   30.26 -            put_shadow_ref(pagetable_get_pfn(ed->arch.shadow_table));
   30.27 -            ed->arch.shadow_table = mk_pagetable(0);
   30.28 +            put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table));
   30.29 +            v->arch.shadow_table = mk_pagetable(0);
   30.30          }
   30.31  
   30.32 -        if ( ed->arch.monitor_shadow_ref )
   30.33 +        if ( v->arch.monitor_shadow_ref )
   30.34          {
   30.35 -            put_shadow_ref(ed->arch.monitor_shadow_ref);
   30.36 -            ed->arch.monitor_shadow_ref = 0;
   30.37 +            put_shadow_ref(v->arch.monitor_shadow_ref);
   30.38 +            v->arch.monitor_shadow_ref = 0;
   30.39          }
   30.40      }
   30.41  
   30.42 @@ -590,9 +590,9 @@ static void free_shadow_pages(struct dom
   30.43      //
   30.44      if ( shadow_mode_external(d) )
   30.45      {
   30.46 -        for_each_exec_domain(d, ed)
   30.47 +        for_each_vcpu(d, v)
   30.48          {
   30.49 -            l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
   30.50 +            l2_pgentry_t *mpl2e = v->arch.monitor_vtable;
   30.51  
   30.52              if ( mpl2e )
   30.53              {
   30.54 @@ -677,14 +677,14 @@ int _shadow_mode_refcounts(struct domain
   30.55      return shadow_mode_refcounts(d);
   30.56  }
   30.57  
   30.58 -static void alloc_monitor_pagetable(struct exec_domain *ed)
   30.59 +static void alloc_monitor_pagetable(struct vcpu *v)
   30.60  {
   30.61      unsigned long mmfn;
   30.62      l2_pgentry_t *mpl2e;
   30.63      struct pfn_info *mmfn_info;
   30.64 -    struct domain *d = ed->domain;
   30.65 -
   30.66 -    ASSERT(pagetable_get_paddr(ed->arch.monitor_table) == 0);
   30.67 +    struct domain *d = v->domain;
   30.68 +
   30.69 +    ASSERT(pagetable_get_paddr(v->arch.monitor_table) == 0);
   30.70  
   30.71      mmfn_info = alloc_domheap_page(NULL);
   30.72      ASSERT(mmfn_info != NULL);
   30.73 @@ -714,21 +714,21 @@ static void alloc_monitor_pagetable(stru
   30.74      mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty();
   30.75      mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty();
   30.76  
   30.77 -    ed->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
   30.78 -    ed->arch.monitor_vtable = mpl2e;
   30.79 +    v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
   30.80 +    v->arch.monitor_vtable = mpl2e;
   30.81  }
   30.82  
   30.83  /*
   30.84   * Free the pages for monitor_table and hl2_table
   30.85   */
   30.86 -void free_monitor_pagetable(struct exec_domain *ed)
   30.87 +void free_monitor_pagetable(struct vcpu *v)
   30.88  {
   30.89      l2_pgentry_t *mpl2e, hl2e, sl2e;
   30.90      unsigned long mfn;
   30.91  
   30.92 -    ASSERT( pagetable_get_paddr(ed->arch.monitor_table) );
   30.93 +    ASSERT( pagetable_get_paddr(v->arch.monitor_table) );
   30.94      
   30.95 -    mpl2e = ed->arch.monitor_vtable;
   30.96 +    mpl2e = v->arch.monitor_vtable;
   30.97  
   30.98      /*
   30.99       * First get the mfn for hl2_table by looking at monitor_table
  30.100 @@ -754,11 +754,11 @@ void free_monitor_pagetable(struct exec_
  30.101      /*
  30.102       * Then free monitor_table.
  30.103       */
  30.104 -    mfn = pagetable_get_pfn(ed->arch.monitor_table);
  30.105 +    mfn = pagetable_get_pfn(v->arch.monitor_table);
  30.106      free_domheap_page(&frame_table[mfn]);
  30.107  
  30.108 -    ed->arch.monitor_table = mk_pagetable(0);
  30.109 -    ed->arch.monitor_vtable = 0;
  30.110 +    v->arch.monitor_table = mk_pagetable(0);
  30.111 +    v->arch.monitor_vtable = 0;
  30.112  }
  30.113  
  30.114  int
  30.115 @@ -866,7 +866,7 @@ free_p2m_table(struct domain *d)
  30.116  
  30.117  int __shadow_mode_enable(struct domain *d, unsigned int mode)
  30.118  {
  30.119 -    struct exec_domain *ed;
  30.120 +    struct vcpu *v;
  30.121      int new_modes = (mode & ~d->arch.shadow_mode);
  30.122  
  30.123      // Gotta be adding something to call this function.
  30.124 @@ -875,9 +875,9 @@ int __shadow_mode_enable(struct domain *
  30.125      // can't take anything away by calling this function.
  30.126      ASSERT(!(d->arch.shadow_mode & ~mode));
  30.127  
  30.128 -    for_each_exec_domain(d, ed)
  30.129 +    for_each_vcpu(d, v)
  30.130      {
  30.131 -        invalidate_shadow_ldt(ed);
  30.132 +        invalidate_shadow_ldt(v);
  30.133  
  30.134          // We need to set these up for __update_pagetables().
  30.135          // See the comment there.
  30.136 @@ -885,52 +885,52 @@ int __shadow_mode_enable(struct domain *
  30.137          /*
  30.138           * arch.guest_vtable
  30.139           */
  30.140 -        if ( ed->arch.guest_vtable &&
  30.141 -             (ed->arch.guest_vtable != __linear_l2_table) )
  30.142 +        if ( v->arch.guest_vtable &&
  30.143 +             (v->arch.guest_vtable != __linear_l2_table) )
  30.144          {
  30.145 -            unmap_domain_mem(ed->arch.guest_vtable);
  30.146 +            unmap_domain_mem(v->arch.guest_vtable);
  30.147          }
  30.148          if ( (mode & (SHM_translate | SHM_external)) == SHM_translate )
  30.149 -            ed->arch.guest_vtable = __linear_l2_table;
  30.150 +            v->arch.guest_vtable = __linear_l2_table;
  30.151          else
  30.152 -            ed->arch.guest_vtable = NULL;
  30.153 +            v->arch.guest_vtable = NULL;
  30.154  
  30.155          /*
  30.156           * arch.shadow_vtable
  30.157           */
  30.158 -        if ( ed->arch.shadow_vtable &&
  30.159 -             (ed->arch.shadow_vtable != __shadow_linear_l2_table) )
  30.160 +        if ( v->arch.shadow_vtable &&
  30.161 +             (v->arch.shadow_vtable != __shadow_linear_l2_table) )
  30.162          {
  30.163 -            unmap_domain_mem(ed->arch.shadow_vtable);
  30.164 +            unmap_domain_mem(v->arch.shadow_vtable);
  30.165          }
  30.166          if ( !(mode & SHM_external) )
  30.167 -            ed->arch.shadow_vtable = __shadow_linear_l2_table;
  30.168 +            v->arch.shadow_vtable = __shadow_linear_l2_table;
  30.169          else
  30.170 -            ed->arch.shadow_vtable = NULL;
  30.171 +            v->arch.shadow_vtable = NULL;
  30.172  
  30.173          /*
  30.174           * arch.hl2_vtable
  30.175           */
  30.176 -        if ( ed->arch.hl2_vtable &&
  30.177 -             (ed->arch.hl2_vtable != __linear_hl2_table) )
  30.178 +        if ( v->arch.hl2_vtable &&
  30.179 +             (v->arch.hl2_vtable != __linear_hl2_table) )
  30.180          {
  30.181 -            unmap_domain_mem(ed->arch.hl2_vtable);
  30.182 +            unmap_domain_mem(v->arch.hl2_vtable);
  30.183          }
  30.184          if ( (mode & (SHM_translate | SHM_external)) == SHM_translate )
  30.185 -            ed->arch.hl2_vtable = __linear_hl2_table;
  30.186 +            v->arch.hl2_vtable = __linear_hl2_table;
  30.187          else
  30.188 -            ed->arch.hl2_vtable = NULL;
  30.189 +            v->arch.hl2_vtable = NULL;
  30.190  
  30.191          /*
  30.192           * arch.monitor_table & arch.monitor_vtable
  30.193           */
  30.194 -        if ( ed->arch.monitor_vtable )
  30.195 +        if ( v->arch.monitor_vtable )
  30.196          {
  30.197 -            free_monitor_pagetable(ed);
  30.198 +            free_monitor_pagetable(v);
  30.199          }
  30.200          if ( mode & SHM_external )
  30.201          {
  30.202 -            alloc_monitor_pagetable(ed);
  30.203 +            alloc_monitor_pagetable(v);
  30.204          }
  30.205      }
  30.206  
  30.207 @@ -1205,10 +1205,10 @@ void __shadow_mode_disable(struct domain
  30.208      free_shadow_ht_entries(d);
  30.209      free_out_of_sync_entries(d);
  30.210  
  30.211 -    struct exec_domain *ed;
  30.212 -    for_each_exec_domain(d, ed)
  30.213 +    struct vcpu *v;
  30.214 +    for_each_vcpu(d, v)
  30.215      {
  30.216 -        update_pagetables(ed);
  30.217 +        update_pagetables(v);
  30.218      }
  30.219  }
  30.220  
  30.221 @@ -1217,13 +1217,13 @@ static int shadow_mode_table_op(
  30.222  {
  30.223      unsigned int      op = sc->op;
  30.224      int               i, rc = 0;
  30.225 -    struct exec_domain *ed;
  30.226 +    struct vcpu *v;
  30.227  
  30.228      ASSERT(shadow_lock_is_acquired(d));
  30.229  
  30.230      SH_VLOG("shadow mode table op %lx %lx count %d",
  30.231 -            (unsigned long)pagetable_get_pfn(d->exec_domain[0]->arch.guest_table),  /* XXX SMP */
  30.232 -            (unsigned long)pagetable_get_pfn(d->exec_domain[0]->arch.shadow_table), /* XXX SMP */
  30.233 +            (unsigned long)pagetable_get_pfn(d->vcpu[0]->arch.guest_table),  /* XXX SMP */
  30.234 +            (unsigned long)pagetable_get_pfn(d->vcpu[0]->arch.shadow_table), /* XXX SMP */
  30.235              d->arch.shadow_page_count);
  30.236  
  30.237      shadow_audit(d, 1);
  30.238 @@ -1323,8 +1323,8 @@ static int shadow_mode_table_op(
  30.239      SH_VLOG("shadow mode table op : page count %d", d->arch.shadow_page_count);
  30.240      shadow_audit(d, 1);
  30.241  
  30.242 -    for_each_exec_domain(d,ed)
  30.243 -        __update_pagetables(ed);
  30.244 +    for_each_vcpu(d,v)
  30.245 +        __update_pagetables(v);
  30.246  
  30.247      return rc;
  30.248  }
  30.249 @@ -1333,7 +1333,7 @@ int shadow_mode_control(struct domain *d
  30.250  {
  30.251      unsigned int op = sc->op;
  30.252      int          rc = 0;
  30.253 -    struct exec_domain *ed;
  30.254 +    struct vcpu *v;
  30.255  
  30.256      if ( unlikely(d == current->domain) )
  30.257      {
  30.258 @@ -1376,8 +1376,8 @@ int shadow_mode_control(struct domain *d
  30.259  
  30.260      shadow_unlock(d);
  30.261  
  30.262 -    for_each_exec_domain(d,ed)
  30.263 -        update_pagetables(ed);
  30.264 +    for_each_vcpu(d,v)
  30.265 +        update_pagetables(v);
  30.266  
  30.267      domain_unpause(d);
  30.268  
  30.269 @@ -1393,7 +1393,7 @@ void vmx_shadow_clear_state(struct domai
  30.270      shadow_lock(d);
  30.271      free_shadow_pages(d);
  30.272      shadow_unlock(d);
  30.273 -    update_pagetables(d->exec_domain[0]);
  30.274 +    update_pagetables(d->vcpu[0]);
  30.275  }
  30.276  
  30.277  unsigned long
  30.278 @@ -1573,14 +1573,14 @@ static unsigned long shadow_l2_table(
  30.279  
  30.280  void shadow_map_l1_into_current_l2(unsigned long va)
  30.281  { 
  30.282 -    struct exec_domain *ed = current;
  30.283 -    struct domain *d = ed->domain;
  30.284 +    struct vcpu *v = current;
  30.285 +    struct domain *d = v->domain;
  30.286      l1_pgentry_t *gpl1e, *spl1e;
  30.287      l2_pgentry_t gl2e, sl2e;
  30.288      unsigned long gl1pfn, gl1mfn, sl1mfn;
  30.289      int i, init_table = 0;
  30.290  
  30.291 -    __guest_get_l2e(ed, va, &gl2e);
  30.292 +    __guest_get_l2e(v, va, &gl2e);
  30.293      ASSERT(l2e_get_flags(gl2e) & _PAGE_PRESENT);
  30.294      gl1pfn = l2e_get_pfn(gl2e);
  30.295  
  30.296 @@ -1616,15 +1616,15 @@ void shadow_map_l1_into_current_l2(unsig
  30.297  
  30.298  #ifndef NDEBUG
  30.299      l2_pgentry_t old_sl2e;
  30.300 -    __shadow_get_l2e(ed, va, &old_sl2e);
  30.301 +    __shadow_get_l2e(v, va, &old_sl2e);
  30.302      ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) );
  30.303  #endif
  30.304  
  30.305      if ( !get_shadow_ref(sl1mfn) )
  30.306          BUG();
  30.307      l2pde_general(d, &gl2e, &sl2e, sl1mfn);
  30.308 -    __guest_set_l2e(ed, va, gl2e);
  30.309 -    __shadow_set_l2e(ed, va, sl2e);
  30.310 +    __guest_set_l2e(v, va, gl2e);
  30.311 +    __shadow_set_l2e(v, va, sl2e);
  30.312  
  30.313      if ( init_table )
  30.314      {
  30.315 @@ -1667,16 +1667,16 @@ void shadow_map_l1_into_current_l2(unsig
  30.316      }
  30.317  }
  30.318  
  30.319 -void shadow_invlpg(struct exec_domain *ed, unsigned long va)
  30.320 +void shadow_invlpg(struct vcpu *v, unsigned long va)
  30.321  {
  30.322 -    struct domain *d = ed->domain;
  30.323 +    struct domain *d = v->domain;
  30.324      l1_pgentry_t gpte, spte;
  30.325  
  30.326      ASSERT(shadow_mode_enabled(d));
  30.327  
  30.328      shadow_lock(d);
  30.329  
  30.330 -    __shadow_sync_va(ed, va);
  30.331 +    __shadow_sync_va(v, va);
  30.332  
  30.333      // XXX mafetter: will need to think about 4MB pages...
  30.334  
  30.335 @@ -1808,10 +1808,10 @@ shadow_free_snapshot(struct domain *d, s
  30.336  }
  30.337  
  30.338  struct out_of_sync_entry *
  30.339 -shadow_mark_mfn_out_of_sync(struct exec_domain *ed, unsigned long gpfn,
  30.340 +shadow_mark_mfn_out_of_sync(struct vcpu *v, unsigned long gpfn,
  30.341                               unsigned long mfn)
  30.342  {
  30.343 -    struct domain *d = ed->domain;
  30.344 +    struct domain *d = v->domain;
  30.345      struct pfn_info *page = &frame_table[mfn];
  30.346      struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
  30.347  
  30.348 @@ -1864,22 +1864,22 @@ shadow_mark_mfn_out_of_sync(struct exec_
  30.349  }
  30.350  
  30.351  void shadow_mark_va_out_of_sync(
  30.352 -    struct exec_domain *ed, unsigned long gpfn, unsigned long mfn, unsigned long va)
  30.353 +    struct vcpu *v, unsigned long gpfn, unsigned long mfn, unsigned long va)
  30.354  {
  30.355      struct out_of_sync_entry *entry =
  30.356 -        shadow_mark_mfn_out_of_sync(ed, gpfn, mfn);
  30.357 +        shadow_mark_mfn_out_of_sync(v, gpfn, mfn);
  30.358      l2_pgentry_t sl2e;
  30.359  
  30.360      // We need the address of shadow PTE that maps @va.
  30.361      // It might not exist yet.  Make sure it's there.
  30.362      //
  30.363 -    __shadow_get_l2e(ed, va, &sl2e);
  30.364 +    __shadow_get_l2e(v, va, &sl2e);
  30.365      if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
  30.366      {
  30.367          // either this L1 isn't shadowed yet, or the shadow isn't linked into
  30.368          // the current L2.
  30.369          shadow_map_l1_into_current_l2(va);
  30.370 -        __shadow_get_l2e(ed, va, &sl2e);
  30.371 +        __shadow_get_l2e(v, va, &sl2e);
  30.372      }
  30.373      ASSERT(l2e_get_flags(sl2e) & _PAGE_PRESENT);
  30.374  
  30.375 @@ -1937,10 +1937,10 @@ static int snapshot_entry_matches(
  30.376   * Returns 1 if va's shadow mapping is out-of-sync.
  30.377   * Returns 0 otherwise.
  30.378   */
  30.379 -int __shadow_out_of_sync(struct exec_domain *ed, unsigned long va)
  30.380 +int __shadow_out_of_sync(struct vcpu *v, unsigned long va)
  30.381  {
  30.382 -    struct domain *d = ed->domain;
  30.383 -    unsigned long l2mfn = pagetable_get_pfn(ed->arch.guest_table);
  30.384 +    struct domain *d = v->domain;
  30.385 +    unsigned long l2mfn = pagetable_get_pfn(v->arch.guest_table);
  30.386      unsigned long l2pfn = __mfn_to_gpfn(d, l2mfn);
  30.387      l2_pgentry_t l2e;
  30.388      unsigned long l1pfn, l1mfn;
  30.389 @@ -1951,11 +1951,11 @@ int __shadow_out_of_sync(struct exec_dom
  30.390      perfc_incrc(shadow_out_of_sync_calls);
  30.391  
  30.392      if ( page_out_of_sync(&frame_table[l2mfn]) &&
  30.393 -         !snapshot_entry_matches(d, (l1_pgentry_t *)ed->arch.guest_vtable,
  30.394 +         !snapshot_entry_matches(d, (l1_pgentry_t *)v->arch.guest_vtable,
  30.395                                   l2pfn, l2_table_offset(va)) )
  30.396          return 1;
  30.397  
  30.398 -    __guest_get_l2e(ed, va, &l2e);
  30.399 +    __guest_get_l2e(v, va, &l2e);
  30.400      if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
  30.401          return 0;
  30.402  
  30.403 @@ -2552,8 +2552,8 @@ void __shadow_sync_all(struct domain *d)
  30.404  int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
  30.405  {
  30.406      l1_pgentry_t gpte, spte, orig_gpte;
  30.407 -    struct exec_domain *ed = current;
  30.408 -    struct domain *d = ed->domain;
  30.409 +    struct vcpu *v = current;
  30.410 +    struct domain *d = v->domain;
  30.411      l2_pgentry_t gpde;
  30.412  
  30.413      spte = l1e_empty();
  30.414 @@ -2562,7 +2562,7 @@ int shadow_fault(unsigned long va, struc
  30.415               va, (unsigned long)regs->error_code);
  30.416      perfc_incrc(shadow_fault_calls);
  30.417      
  30.418 -    check_pagetable(ed, "pre-sf");
  30.419 +    check_pagetable(v, "pre-sf");
  30.420  
  30.421      /*
  30.422       * Don't let someone else take the guest's table pages out-of-sync.
  30.423 @@ -2574,12 +2574,12 @@ int shadow_fault(unsigned long va, struc
  30.424       *         out-of-sync table page entry, or if we should pass this
  30.425       *         fault onto the guest.
  30.426       */
  30.427 -    __shadow_sync_va(ed, va);
  30.428 +    __shadow_sync_va(v, va);
  30.429  
  30.430      /*
  30.431       * STEP 2. Check the guest PTE.
  30.432       */
  30.433 -    __guest_get_l2e(ed, va, &gpde);
  30.434 +    __guest_get_l2e(v, va, &gpde);
  30.435      if ( unlikely(!(l2e_get_flags(gpde) & _PAGE_PRESENT)) )
  30.436      {
  30.437          SH_VVLOG("shadow_fault - EXIT: L1 not present");
  30.438 @@ -2622,7 +2622,7 @@ int shadow_fault(unsigned long va, struc
  30.439              }
  30.440          }
  30.441  
  30.442 -        if ( !l1pte_write_fault(ed, &gpte, &spte, va) )
  30.443 +        if ( !l1pte_write_fault(v, &gpte, &spte, va) )
  30.444          {
  30.445              SH_VVLOG("shadow_fault - EXIT: l1pte_write_fault failed");
  30.446              perfc_incrc(write_fault_bail);
  30.447 @@ -2671,7 +2671,7 @@ int shadow_fault(unsigned long va, struc
  30.448  
  30.449      shadow_unlock(d);
  30.450  
  30.451 -    check_pagetable(ed, "post-sf");
  30.452 +    check_pagetable(v, "post-sf");
  30.453      return EXCRET_fault_fixed;
  30.454  
  30.455   fail:
  30.456 @@ -2750,9 +2750,9 @@ void shadow_l4_normal_pt_update(
  30.457  
  30.458  int shadow_do_update_va_mapping(unsigned long va,
  30.459                                  l1_pgentry_t val,
  30.460 -                                struct exec_domain *ed)
  30.461 +                                struct vcpu *v)
  30.462  {
  30.463 -    struct domain *d = ed->domain;
  30.464 +    struct domain *d = v->domain;
  30.465      l1_pgentry_t spte;
  30.466      int rc = 0;
  30.467  
  30.468 @@ -2764,7 +2764,7 @@ int shadow_do_update_va_mapping(unsigned
  30.469      // just everything involved in getting to this L1 (i.e. we need
  30.470      // linear_pg_table[l1_linear_offset(va)] to be in sync)...
  30.471      //
  30.472 -    __shadow_sync_va(ed, va);
  30.473 +    __shadow_sync_va(v, va);
  30.474  
  30.475      l1pte_propagate_from_guest(d, val, &spte);
  30.476      shadow_set_l1e(va, spte, 0);
  30.477 @@ -2775,7 +2775,7 @@ int shadow_do_update_va_mapping(unsigned
  30.478       * for this.
  30.479       */
  30.480      if ( shadow_mode_log_dirty(d) )
  30.481 -        __mark_dirty(d, va_to_l1mfn(ed, va));
  30.482 +        __mark_dirty(d, va_to_l1mfn(v, va));
  30.483  
  30.484  // out:
  30.485      shadow_unlock(d);
  30.486 @@ -2810,10 +2810,10 @@ int shadow_do_update_va_mapping(unsigned
  30.487   * shadow_l2_table(), shadow_hl2_table(), and alloc_monitor_pagetable()
  30.488   * all play a part in maintaining these mappings.
  30.489   */
  30.490 -void __update_pagetables(struct exec_domain *ed)
  30.491 +void __update_pagetables(struct vcpu *v)
  30.492  {
  30.493 -    struct domain *d = ed->domain;
  30.494 -    unsigned long gmfn = pagetable_get_pfn(ed->arch.guest_table);
  30.495 +    struct domain *d = v->domain;
  30.496 +    unsigned long gmfn = pagetable_get_pfn(v->arch.guest_table);
  30.497      unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
  30.498      unsigned long smfn, hl2mfn, old_smfn;
  30.499  
  30.500 @@ -2830,9 +2830,9 @@ void __update_pagetables(struct exec_dom
  30.501       */
  30.502      if ( max_mode & (SHM_enable | SHM_external) )
  30.503      {
  30.504 -        if ( likely(ed->arch.guest_vtable != NULL) )
  30.505 -            unmap_domain_mem(ed->arch.guest_vtable);
  30.506 -        ed->arch.guest_vtable = map_domain_mem(gmfn << PAGE_SHIFT);
  30.507 +        if ( likely(v->arch.guest_vtable != NULL) )
  30.508 +            unmap_domain_mem(v->arch.guest_vtable);
  30.509 +        v->arch.guest_vtable = map_domain_mem(gmfn << PAGE_SHIFT);
  30.510      }
  30.511  
  30.512      /*
  30.513 @@ -2842,8 +2842,8 @@ void __update_pagetables(struct exec_dom
  30.514          smfn = shadow_l2_table(d, gpfn, gmfn);
  30.515      if ( !get_shadow_ref(smfn) )
  30.516          BUG();
  30.517 -    old_smfn = pagetable_get_pfn(ed->arch.shadow_table);
  30.518 -    ed->arch.shadow_table = mk_pagetable(smfn << PAGE_SHIFT);
  30.519 +    old_smfn = pagetable_get_pfn(v->arch.shadow_table);
  30.520 +    v->arch.shadow_table = mk_pagetable(smfn << PAGE_SHIFT);
  30.521      if ( old_smfn )
  30.522          put_shadow_ref(old_smfn);
  30.523  
  30.524 @@ -2854,9 +2854,9 @@ void __update_pagetables(struct exec_dom
  30.525       */
  30.526      if ( max_mode == SHM_external )
  30.527      {
  30.528 -        if ( ed->arch.shadow_vtable )
  30.529 -            unmap_domain_mem(ed->arch.shadow_vtable);
  30.530 -        ed->arch.shadow_vtable = map_domain_mem(smfn << PAGE_SHIFT);
  30.531 +        if ( v->arch.shadow_vtable )
  30.532 +            unmap_domain_mem(v->arch.shadow_vtable);
  30.533 +        v->arch.shadow_vtable = map_domain_mem(smfn << PAGE_SHIFT);
  30.534      }
  30.535  
  30.536      /*
  30.537 @@ -2870,9 +2870,9 @@ void __update_pagetables(struct exec_dom
  30.538      {
  30.539          if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
  30.540              hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn);
  30.541 -        if ( ed->arch.hl2_vtable )
  30.542 -            unmap_domain_mem(ed->arch.hl2_vtable);
  30.543 -        ed->arch.hl2_vtable = map_domain_mem(hl2mfn << PAGE_SHIFT);
  30.544 +        if ( v->arch.hl2_vtable )
  30.545 +            unmap_domain_mem(v->arch.hl2_vtable);
  30.546 +        v->arch.hl2_vtable = map_domain_mem(hl2mfn << PAGE_SHIFT);
  30.547      }
  30.548  
  30.549      /*
  30.550 @@ -2880,7 +2880,7 @@ void __update_pagetables(struct exec_dom
  30.551       */
  30.552      if ( max_mode == SHM_external )
  30.553      {
  30.554 -        l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
  30.555 +        l2_pgentry_t *mpl2e = v->arch.monitor_vtable;
  30.556          l2_pgentry_t old_hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)];
  30.557          l2_pgentry_t old_sl2e = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)];
  30.558  
  30.559 @@ -2959,9 +2959,9 @@ static int sh_l1_present;
  30.560  char * sh_check_name;
  30.561  int shadow_status_noswap;
  30.562  
  30.563 -#define v2m(_ed, _adr) ({                                                    \
  30.564 +#define v2m(_v, _adr) ({                                                     \
  30.565      unsigned long _a  = (unsigned long)(_adr);                               \
  30.566 -    l2_pgentry_t _pde = shadow_linear_l2_table(_ed)[l2_table_offset(_a)];    \
  30.567 +    l2_pgentry_t _pde = shadow_linear_l2_table(_v)[l2_table_offset(_a)];     \
  30.568      unsigned long _pa = -1;                                                  \
  30.569      if ( l2e_get_flags(_pde) & _PAGE_PRESENT )                               \
  30.570      {                                                                        \
  30.571 @@ -2985,21 +2985,21 @@ int shadow_status_noswap;
  30.572                 l1e_get_intpte(guest_pte), l1e_get_intpte(eff_guest_pte),     \
  30.573                 l1e_get_intpte(shadow_pte), l1e_get_intpte(snapshot_pte),     \
  30.574                 p_guest_pte, p_shadow_pte, p_snapshot_pte,                    \
  30.575 -               (void *)v2m(ed, p_guest_pte), (void *)v2m(ed, p_shadow_pte),  \
  30.576 -               (void *)v2m(ed, p_snapshot_pte),                              \
  30.577 +               (void *)v2m(v, p_guest_pte), (void *)v2m(v, p_shadow_pte),    \
  30.578 +               (void *)v2m(v, p_snapshot_pte),                               \
  30.579                 (l2_idx << L2_PAGETABLE_SHIFT) |                              \
  30.580                 (l1_idx << L1_PAGETABLE_SHIFT));                              \
  30.581          errors++;                                                            \
  30.582      } while ( 0 )
  30.583  
  30.584  static int check_pte(
  30.585 -    struct exec_domain *ed,
  30.586 +    struct vcpu *v,
  30.587      l1_pgentry_t *p_guest_pte,
  30.588      l1_pgentry_t *p_shadow_pte,
  30.589      l1_pgentry_t *p_snapshot_pte,
  30.590      int level, int l2_idx, int l1_idx)
  30.591  {
  30.592 -    struct domain *d = ed->domain;
  30.593 +    struct domain *d = v->domain;
  30.594      l1_pgentry_t guest_pte = *p_guest_pte;
  30.595      l1_pgentry_t shadow_pte = *p_shadow_pte;
  30.596      l1_pgentry_t snapshot_pte = p_snapshot_pte ? *p_snapshot_pte : l1e_empty();
  30.597 @@ -3104,10 +3104,10 @@ static int check_pte(
  30.598  #undef v2m
  30.599  
  30.600  static int check_l1_table(
  30.601 -    struct exec_domain *ed, unsigned long gpfn,
  30.602 +    struct vcpu *v, unsigned long gpfn,
  30.603      unsigned long gmfn, unsigned long smfn, unsigned l2_idx)
  30.604  {
  30.605 -    struct domain *d = ed->domain;
  30.606 +    struct domain *d = v->domain;
  30.607      int i;
  30.608      unsigned long snapshot_mfn;
  30.609      l1_pgentry_t *p_guest, *p_shadow, *p_snapshot = NULL;
  30.610 @@ -3124,7 +3124,7 @@ static int check_l1_table(
  30.611      p_shadow = map_domain_mem(smfn << PAGE_SHIFT);
  30.612  
  30.613      for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
  30.614 -        errors += check_pte(ed, p_guest+i, p_shadow+i,
  30.615 +        errors += check_pte(v, p_guest+i, p_shadow+i,
  30.616                              p_snapshot ? p_snapshot+i : NULL,
  30.617                              1, l2_idx, i);
  30.618   
  30.619 @@ -3143,9 +3143,9 @@ static int check_l1_table(
  30.620      } while ( 0 )
  30.621  
  30.622  int check_l2_table(
  30.623 -    struct exec_domain *ed, unsigned long gmfn, unsigned long smfn, int oos_pdes)
  30.624 +    struct vcpu *v, unsigned long gmfn, unsigned long smfn, int oos_pdes)
  30.625  {
  30.626 -    struct domain *d = ed->domain;
  30.627 +    struct domain *d = v->domain;
  30.628      l2_pgentry_t *gpl2e = (l2_pgentry_t *)map_domain_mem(gmfn << PAGE_SHIFT);
  30.629      l2_pgentry_t *spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
  30.630      l2_pgentry_t match;
  30.631 @@ -3213,7 +3213,7 @@ int check_l2_table(
  30.632  
  30.633      /* Check the whole L2. */
  30.634      for ( i = 0; i < limit; i++ )
  30.635 -        errors += check_pte(ed,
  30.636 +        errors += check_pte(v,
  30.637                              (l1_pgentry_t*)(&gpl2e[i]), /* Hmm, dirty ... */
  30.638                              (l1_pgentry_t*)(&spl2e[i]),
  30.639                              NULL,
  30.640 @@ -3231,10 +3231,10 @@ int check_l2_table(
  30.641  }
  30.642  #undef FAILPT
  30.643  
  30.644 -int _check_pagetable(struct exec_domain *ed, char *s)
  30.645 +int _check_pagetable(struct vcpu *v, char *s)
  30.646  {
  30.647 -    struct domain *d = ed->domain;
  30.648 -    pagetable_t pt = ed->arch.guest_table;
  30.649 +    struct domain *d = v->domain;
  30.650 +    pagetable_t pt = v->arch.guest_table;
  30.651      unsigned long gptbase = pagetable_get_paddr(pt);
  30.652      unsigned long ptbase_pfn, smfn;
  30.653      unsigned long i;
  30.654 @@ -3265,7 +3265,7 @@ int _check_pagetable(struct exec_domain 
  30.655          ASSERT(ptbase_mfn);
  30.656      }
  30.657   
  30.658 -    errors += check_l2_table(ed, ptbase_mfn, smfn, oos_pdes);
  30.659 +    errors += check_l2_table(v, ptbase_mfn, smfn, oos_pdes);
  30.660  
  30.661      gpl2e = (l2_pgentry_t *) map_domain_mem( ptbase_mfn << PAGE_SHIFT );
  30.662      spl2e = (l2_pgentry_t *) map_domain_mem( smfn << PAGE_SHIFT );
  30.663 @@ -3288,7 +3288,7 @@ int _check_pagetable(struct exec_domain 
  30.664  
  30.665          if ( l2e_get_intpte(spl2e[i]) != 0 )  /* FIXME: check flags? */
  30.666          {
  30.667 -            errors += check_l1_table(ed, gl1pfn, gl1mfn, sl1mfn, i);
  30.668 +            errors += check_l1_table(v, gl1pfn, gl1mfn, sl1mfn, i);
  30.669          }
  30.670      }
  30.671  
  30.672 @@ -3309,9 +3309,9 @@ int _check_pagetable(struct exec_domain 
  30.673      return errors;
  30.674  }
  30.675  
  30.676 -int _check_all_pagetables(struct exec_domain *ed, char *s)
  30.677 +int _check_all_pagetables(struct vcpu *v, char *s)
  30.678  {
  30.679 -    struct domain *d = ed->domain;
  30.680 +    struct domain *d = v->domain;
  30.681      int i;
  30.682      struct shadow_status *a;
  30.683      unsigned long gmfn;
  30.684 @@ -3334,11 +3334,11 @@ int _check_all_pagetables(struct exec_do
  30.685              switch ( a->gpfn_and_flags & PGT_type_mask )
  30.686              {
  30.687              case PGT_l1_shadow:
  30.688 -                errors += check_l1_table(ed, a->gpfn_and_flags & PGT_mfn_mask,
  30.689 +                errors += check_l1_table(v, a->gpfn_and_flags & PGT_mfn_mask,
  30.690                                           gmfn, a->smfn, 0);
  30.691                  break;
  30.692              case PGT_l2_shadow:
  30.693 -                errors += check_l2_table(ed, gmfn, a->smfn,
  30.694 +                errors += check_l2_table(v, gmfn, a->smfn,
  30.695                                           page_out_of_sync(pfn_to_page(gmfn)));
  30.696                  break;
  30.697              case PGT_l3_shadow:
    31.1 --- a/xen/arch/x86/smpboot.c	Thu Jun 02 19:19:24 2005 +0000
    31.2 +++ b/xen/arch/x86/smpboot.c	Thu Jun 02 21:05:33 2005 +0000
    31.3 @@ -756,7 +756,7 @@ static int __init do_boot_cpu(int apicid
    31.4   */
    31.5  {
    31.6  	struct domain *idle;
    31.7 -	struct exec_domain *ed;
    31.8 +	struct vcpu *v;
    31.9  	void *stack;
   31.10  	unsigned long boot_error;
   31.11  	int timeout, cpu;
   31.12 @@ -768,11 +768,11 @@ static int __init do_boot_cpu(int apicid
   31.13  	if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
   31.14  		panic("failed 'createdomain' for CPU %d", cpu);
   31.15  
   31.16 -	ed = idle_task[cpu] = idle->exec_domain[0];
   31.17 +	v = idle_task[cpu] = idle->vcpu[0];
   31.18  
   31.19  	set_bit(_DOMF_idle_domain, &idle->domain_flags);
   31.20  
   31.21 -	ed->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
   31.22 +	v->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
   31.23  
   31.24  	/* start_eip had better be page-aligned! */
   31.25  	start_eip = setup_trampoline();
    32.1 --- a/xen/arch/x86/time.c	Thu Jun 02 19:19:24 2005 +0000
    32.2 +++ b/xen/arch/x86/time.c	Thu Jun 02 21:05:33 2005 +0000
    32.3 @@ -273,9 +273,9 @@ s_time_t get_s_time(void)
    32.4      return now; 
    32.5  }
    32.6  
    32.7 -static inline void __update_dom_time(struct exec_domain *ed)
    32.8 +static inline void __update_dom_time(struct vcpu *v)
    32.9  {
   32.10 -    struct domain *d  = ed->domain;
   32.11 +    struct domain *d  = v->domain;
   32.12      shared_info_t *si = d->shared_info;
   32.13  
   32.14      spin_lock(&d->time_lock);
   32.15 @@ -295,14 +295,14 @@ static inline void __update_dom_time(str
   32.16      spin_unlock(&d->time_lock);
   32.17  }
   32.18  
   32.19 -void update_dom_time(struct exec_domain *ed)
   32.20 +void update_dom_time(struct vcpu *v)
   32.21  {
   32.22      unsigned long flags;
   32.23  
   32.24 -    if ( ed->domain->shared_info->tsc_timestamp != full_tsc_irq )
   32.25 +    if ( v->domain->shared_info->tsc_timestamp != full_tsc_irq )
   32.26      {
   32.27          read_lock_irqsave(&time_lock, flags);
   32.28 -        __update_dom_time(ed);
   32.29 +        __update_dom_time(v);
   32.30          read_unlock_irqrestore(&time_lock, flags);
   32.31      }
   32.32  }
    33.1 --- a/xen/arch/x86/traps.c	Thu Jun 02 19:19:24 2005 +0000
    33.2 +++ b/xen/arch/x86/traps.c	Thu Jun 02 21:05:33 2005 +0000
    33.3 @@ -236,8 +236,8 @@ static inline int do_trap(int trapnr, ch
    33.4                            struct cpu_user_regs *regs, 
    33.5                            int use_error_code)
    33.6  {
    33.7 -    struct exec_domain *ed = current;
    33.8 -    struct trap_bounce *tb = &ed->arch.trap_bounce;
    33.9 +    struct vcpu *v = current;
   33.10 +    struct trap_bounce *tb = &v->arch.trap_bounce;
   33.11      trap_info_t *ti;
   33.12      unsigned long fixup;
   33.13  
   33.14 @@ -303,8 +303,8 @@ DO_ERROR_NOCODE(19, "simd error", simd_c
   33.15  
   33.16  asmlinkage int do_int3(struct cpu_user_regs *regs)
   33.17  {
   33.18 -    struct exec_domain *ed = current;
   33.19 -    struct trap_bounce *tb = &ed->arch.trap_bounce;
   33.20 +    struct vcpu *v = current;
   33.21 +    struct trap_bounce *tb = &v->arch.trap_bounce;
   33.22      trap_info_t *ti;
   33.23  
   33.24      DEBUGGER_trap_entry(TRAP_int3, regs);
   33.25 @@ -335,10 +335,10 @@ asmlinkage int do_machine_check(struct c
   33.26  void propagate_page_fault(unsigned long addr, u16 error_code)
   33.27  {
   33.28      trap_info_t *ti;
   33.29 -    struct exec_domain *ed = current;
   33.30 -    struct trap_bounce *tb = &ed->arch.trap_bounce;
   33.31 +    struct vcpu *v = current;
   33.32 +    struct trap_bounce *tb = &v->arch.trap_bounce;
   33.33  
   33.34 -    ti = &ed->arch.guest_context.trap_ctxt[TRAP_page_fault];
   33.35 +    ti = &v->arch.guest_context.trap_ctxt[TRAP_page_fault];
   33.36      tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
   33.37      tb->cr2        = addr;
   33.38      tb->error_code = error_code;
   33.39 @@ -347,7 +347,7 @@ void propagate_page_fault(unsigned long 
   33.40      if ( TI_GET_IF(ti) )
   33.41          tb->flags |= TBF_INTERRUPT;
   33.42  
   33.43 -    ed->arch.guest_cr2 = addr;
   33.44 +    v->arch.guest_cr2 = addr;
   33.45  }
   33.46  
   33.47  static int handle_perdomain_mapping_fault(
   33.48 @@ -355,8 +355,8 @@ static int handle_perdomain_mapping_faul
   33.49  {
   33.50      extern int map_ldt_shadow_page(unsigned int);
   33.51  
   33.52 -    struct exec_domain *ed = current;
   33.53 -    struct domain      *d  = ed->domain;
   33.54 +    struct vcpu *v = current;
   33.55 +    struct domain *d  = v->domain;
   33.56      int ret;
   33.57  
   33.58      /* Which vcpu's area did we fault in, and is it in the ldt sub-area? */
   33.59 @@ -383,7 +383,7 @@ static int handle_perdomain_mapping_faul
   33.60                  return 0;
   33.61              /* In guest mode? Propagate #PF to guest, with adjusted %cr2. */
   33.62              propagate_page_fault(
   33.63 -                ed->arch.guest_context.ldt_base + offset, regs->error_code);
   33.64 +                v->arch.guest_context.ldt_base + offset, regs->error_code);
   33.65          }
   33.66      }
   33.67      else
   33.68 @@ -399,8 +399,8 @@ static int handle_perdomain_mapping_faul
   33.69  asmlinkage int do_page_fault(struct cpu_user_regs *regs)
   33.70  {
   33.71      unsigned long addr, fixup;
   33.72 -    struct exec_domain *ed = current;
   33.73 -    struct domain *d = ed->domain;
   33.74 +    struct vcpu *v = current;
   33.75 +    struct domain *d = v->domain;
   33.76  
   33.77      __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (addr) : );
   33.78  
   33.79 @@ -433,7 +433,7 @@ asmlinkage int do_page_fault(struct cpu_
   33.80  
   33.81      if ( unlikely(shadow_mode_enabled(d)) &&
   33.82           ((addr < HYPERVISOR_VIRT_START) ||
   33.83 -          (shadow_mode_external(d) && GUEST_CONTEXT(ed, regs))) &&
   33.84 +          (shadow_mode_external(d) && GUEST_CONTEXT(v, regs))) &&
   33.85           shadow_fault(addr, regs) )
   33.86          return EXCRET_fault_fixed;
   33.87  
   33.88 @@ -472,17 +472,17 @@ asmlinkage int do_page_fault(struct cpu_
   33.89  
   33.90  long do_fpu_taskswitch(int set)
   33.91  {
   33.92 -    struct exec_domain *ed = current;
   33.93 +    struct vcpu *v = current;
   33.94  
   33.95      if ( set )
   33.96      {
   33.97 -        set_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
   33.98 +        set_bit(_VCPUF_guest_stts, &v->vcpu_flags);
   33.99          stts();
  33.100      }
  33.101      else
  33.102      {
  33.103 -        clear_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
  33.104 -        if ( test_bit(_VCPUF_fpu_dirtied, &ed->vcpu_flags) )
  33.105 +        clear_bit(_VCPUF_guest_stts, &v->vcpu_flags);
  33.106 +        if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
  33.107              clts();
  33.108      }
  33.109  
  33.110 @@ -492,25 +492,25 @@ long do_fpu_taskswitch(int set)
  33.111  /* Has the guest requested sufficient permission for this I/O access? */
  33.112  static inline int guest_io_okay(
  33.113      unsigned int port, unsigned int bytes,
  33.114 -    struct exec_domain *ed, struct cpu_user_regs *regs)
  33.115 +    struct vcpu *v, struct cpu_user_regs *regs)
  33.116  {
  33.117      u16 x;
  33.118  #if defined(__x86_64__)
  33.119      /* If in user mode, switch to kernel mode just to read I/O bitmap. */
  33.120 -    extern void toggle_guest_mode(struct exec_domain *);
  33.121 -    int user_mode = !(ed->arch.flags & TF_kernel_mode);
  33.122 -#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(ed)
  33.123 +    extern void toggle_guest_mode(struct vcpu *);
  33.124 +    int user_mode = !(v->arch.flags & TF_kernel_mode);
  33.125 +#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
  33.126  #elif defined(__i386__)
  33.127  #define TOGGLE_MODE() ((void)0)
  33.128  #endif
  33.129  
  33.130 -    if ( ed->arch.iopl >= (KERNEL_MODE(ed, regs) ? 1 : 3) )
  33.131 +    if ( v->arch.iopl >= (KERNEL_MODE(v, regs) ? 1 : 3) )
  33.132          return 1;
  33.133  
  33.134 -    if ( ed->arch.iobmp_limit > (port + bytes) )
  33.135 +    if ( v->arch.iobmp_limit > (port + bytes) )
  33.136      {
  33.137          TOGGLE_MODE();
  33.138 -        __get_user(x, (u16 *)(ed->arch.iobmp+(port>>3)));
  33.139 +        __get_user(x, (u16 *)(v->arch.iobmp+(port>>3)));
  33.140          TOGGLE_MODE();
  33.141          if ( (x & (((1<<bytes)-1) << (port&7))) == 0 )
  33.142              return 1;
  33.143 @@ -522,9 +522,9 @@ static inline int guest_io_okay(
  33.144  /* Has the administrator granted sufficient permission for this I/O access? */
  33.145  static inline int admin_io_okay(
  33.146      unsigned int port, unsigned int bytes,
  33.147 -    struct exec_domain *ed, struct cpu_user_regs *regs)
  33.148 +    struct vcpu *v, struct cpu_user_regs *regs)
  33.149  {
  33.150 -    struct domain *d = ed->domain;
  33.151 +    struct domain *d = v->domain;
  33.152      u16 x;
  33.153  
  33.154      if ( d->arch.iobmp_mask != NULL )
  33.155 @@ -565,7 +565,7 @@ static inline int admin_io_okay(
  33.156  
  33.157  static int emulate_privileged_op(struct cpu_user_regs *regs)
  33.158  {
  33.159 -    struct exec_domain *ed = current;
  33.160 +    struct vcpu *v = current;
  33.161      unsigned long *reg, eip = regs->eip;
  33.162      u8 opcode, modrm_reg = 0, rep_prefix = 0;
  33.163      unsigned int port, i, op_bytes = 4, data;
  33.164 @@ -619,22 +619,22 @@ static int emulate_privileged_op(struct 
  33.165          case 0x6c: /* INSB */
  33.166              op_bytes = 1;
  33.167          case 0x6d: /* INSW/INSL */
  33.168 -            if ( !guest_io_okay((u16)regs->edx, op_bytes, ed, regs) )
  33.169 +            if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
  33.170                  goto fail;
  33.171              switch ( op_bytes )
  33.172              {
  33.173              case 1:
  33.174 -                data = (u8)inb_user((u16)regs->edx, ed, regs);
  33.175 +                data = (u8)inb_user((u16)regs->edx, v, regs);
  33.176                  if ( put_user((u8)data, (u8 *)regs->edi) )
  33.177                      PAGE_FAULT(regs->edi, USER_WRITE_FAULT);
  33.178                  break;
  33.179              case 2:
  33.180 -                data = (u16)inw_user((u16)regs->edx, ed, regs);
  33.181 +                data = (u16)inw_user((u16)regs->edx, v, regs);
  33.182                  if ( put_user((u16)data, (u16 *)regs->edi) )
  33.183                      PAGE_FAULT(regs->edi, USER_WRITE_FAULT);
  33.184                  break;
  33.185              case 4:
  33.186 -                data = (u32)inl_user((u16)regs->edx, ed, regs);
  33.187 +                data = (u32)inl_user((u16)regs->edx, v, regs);
  33.188                  if ( put_user((u32)data, (u32 *)regs->edi) )
  33.189                      PAGE_FAULT(regs->edi, USER_WRITE_FAULT);
  33.190                  break;
  33.191 @@ -645,24 +645,24 @@ static int emulate_privileged_op(struct 
  33.192          case 0x6e: /* OUTSB */
  33.193              op_bytes = 1;
  33.194          case 0x6f: /* OUTSW/OUTSL */
  33.195 -            if ( !guest_io_okay((u16)regs->edx, op_bytes, ed, regs) )
  33.196 +            if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
  33.197                  goto fail;
  33.198              switch ( op_bytes )
  33.199              {
  33.200              case 1:
  33.201                  if ( get_user(data, (u8 *)regs->esi) )
  33.202                      PAGE_FAULT(regs->esi, USER_READ_FAULT);
  33.203 -                outb_user((u8)data, (u16)regs->edx, ed, regs);
  33.204 +                outb_user((u8)data, (u16)regs->edx, v, regs);
  33.205                  break;
  33.206              case 2:
  33.207                  if ( get_user(data, (u16 *)regs->esi) )
  33.208                      PAGE_FAULT(regs->esi, USER_READ_FAULT);
  33.209 -                outw_user((u16)data, (u16)regs->edx, ed, regs);
  33.210 +                outw_user((u16)data, (u16)regs->edx, v, regs);
  33.211                  break;
  33.212              case 4:
  33.213                  if ( get_user(data, (u32 *)regs->esi) )
  33.214                      PAGE_FAULT(regs->esi, USER_READ_FAULT);
  33.215 -                outl_user((u32)data, (u16)regs->edx, ed, regs);
  33.216 +                outl_user((u32)data, (u16)regs->edx, v, regs);
  33.217                  break;
  33.218              }
  33.219              regs->esi += (regs->eflags & EF_DF) ? -op_bytes : op_bytes;
  33.220 @@ -687,20 +687,20 @@ static int emulate_privileged_op(struct 
  33.221      case 0xe5: /* IN imm8,%eax */
  33.222          port = insn_fetch(u8, 1, eip);
  33.223      exec_in:
  33.224 -        if ( !guest_io_okay(port, op_bytes, ed, regs) )
  33.225 +        if ( !guest_io_okay(port, op_bytes, v, regs) )
  33.226              goto fail;
  33.227          switch ( op_bytes )
  33.228          {
  33.229          case 1:
  33.230              regs->eax &= ~0xffUL;
  33.231 -            regs->eax |= (u8)inb_user(port, ed, regs);
  33.232 +            regs->eax |= (u8)inb_user(port, v, regs);
  33.233              break;
  33.234          case 2:
  33.235              regs->eax &= ~0xffffUL;
  33.236 -            regs->eax |= (u16)inw_user(port, ed, regs);
  33.237 +            regs->eax |= (u16)inw_user(port, v, regs);
  33.238              break;
  33.239          case 4:
  33.240 -            regs->eax = (u32)inl_user(port, ed, regs);
  33.241 +            regs->eax = (u32)inl_user(port, v, regs);
  33.242              break;
  33.243          }
  33.244          goto done;
  33.245 @@ -716,18 +716,18 @@ static int emulate_privileged_op(struct 
  33.246      case 0xe7: /* OUT %eax,imm8 */
  33.247          port = insn_fetch(u8, 1, eip);
  33.248      exec_out:
  33.249 -        if ( !guest_io_okay(port, op_bytes, ed, regs) )
  33.250 +        if ( !guest_io_okay(port, op_bytes, v, regs) )
  33.251              goto fail;
  33.252          switch ( op_bytes )
  33.253          {
  33.254          case 1:
  33.255 -            outb_user((u8)regs->eax, port, ed, regs);
  33.256 +            outb_user((u8)regs->eax, port, v, regs);
  33.257              break;
  33.258          case 2:
  33.259 -            outw_user((u16)regs->eax, port, ed, regs);
  33.260 +            outw_user((u16)regs->eax, port, v, regs);
  33.261              break;
  33.262          case 4:
  33.263 -            outl_user((u32)regs->eax, port, ed, regs);
  33.264 +            outl_user((u32)regs->eax, port, v, regs);
  33.265              break;
  33.266          }
  33.267          goto done;
  33.268 @@ -740,7 +740,7 @@ static int emulate_privileged_op(struct 
  33.269  
  33.270      case 0xfa: /* CLI */
  33.271      case 0xfb: /* STI */
  33.272 -        if ( ed->arch.iopl < (KERNEL_MODE(ed, regs) ? 1 : 3) )
  33.273 +        if ( v->arch.iopl < (KERNEL_MODE(v, regs) ? 1 : 3) )
  33.274              goto fail;
  33.275          /*
  33.276           * This is just too dangerous to allow, in my opinion. Consider if the
  33.277 @@ -748,7 +748,7 @@ static int emulate_privileged_op(struct 
  33.278           * that and we'll end up with hard-to-debug lockups. Fast & loose will
  33.279           * do for us. :-)
  33.280           */
  33.281 -        /*ed->vcpu_info->evtchn_upcall_mask = (opcode == 0xfa);*/
  33.282 +        /*v->vcpu_info->evtchn_upcall_mask = (opcode == 0xfa);*/
  33.283          goto done;
  33.284  
  33.285      case 0x0f: /* Two-byte opcode */
  33.286 @@ -759,7 +759,7 @@ static int emulate_privileged_op(struct 
  33.287      }
  33.288  
  33.289      /* Remaining instructions only emulated from guest kernel. */
  33.290 -    if ( !KERNEL_MODE(ed, regs) )
  33.291 +    if ( !KERNEL_MODE(v, regs) )
  33.292          goto fail;
  33.293  
  33.294      /* Privileged (ring 0) instructions. */
  33.295 @@ -772,7 +772,7 @@ static int emulate_privileged_op(struct 
  33.296  
  33.297      case 0x09: /* WBINVD */
  33.298          /* Ignore the instruction if unprivileged. */
  33.299 -        if ( !IS_CAPABLE_PHYSDEV(ed->domain) )
  33.300 +        if ( !IS_CAPABLE_PHYSDEV(v->domain) )
  33.301              DPRINTK("Non-physdev domain attempted WBINVD.\n");
  33.302          else
  33.303              wbinvd();
  33.304 @@ -789,15 +789,15 @@ static int emulate_privileged_op(struct 
  33.305          case 0: /* Read CR0 */
  33.306              *reg = 
  33.307                  (read_cr0() & ~X86_CR0_TS) | 
  33.308 -                (test_bit(_VCPUF_guest_stts, &ed->vcpu_flags) ? X86_CR0_TS:0);
  33.309 +                (test_bit(_VCPUF_guest_stts, &v->vcpu_flags) ? X86_CR0_TS:0);
  33.310              break;
  33.311  
  33.312          case 2: /* Read CR2 */
  33.313 -            *reg = ed->arch.guest_cr2;
  33.314 +            *reg = v->arch.guest_cr2;
  33.315              break;
  33.316              
  33.317          case 3: /* Read CR3 */
  33.318 -            *reg = pagetable_get_paddr(ed->arch.guest_table);
  33.319 +            *reg = pagetable_get_paddr(v->arch.guest_table);
  33.320              break;
  33.321  
  33.322          default:
  33.323 @@ -818,13 +818,13 @@ static int emulate_privileged_op(struct 
  33.324              break;
  33.325  
  33.326          case 2: /* Write CR2 */
  33.327 -            ed->arch.guest_cr2 = *reg;
  33.328 +            v->arch.guest_cr2 = *reg;
  33.329              break;
  33.330              
  33.331          case 3: /* Write CR3 */
  33.332 -            LOCK_BIGLOCK(ed->domain);
  33.333 +            LOCK_BIGLOCK(v->domain);
  33.334              (void)new_guest_cr3(*reg);
  33.335 -            UNLOCK_BIGLOCK(ed->domain);
  33.336 +            UNLOCK_BIGLOCK(v->domain);
  33.337              break;
  33.338  
  33.339          default:
  33.340 @@ -834,7 +834,7 @@ static int emulate_privileged_op(struct 
  33.341  
  33.342      case 0x30: /* WRMSR */
  33.343          /* Ignore the instruction if unprivileged. */
  33.344 -        if ( !IS_PRIV(ed->domain) )
  33.345 +        if ( !IS_PRIV(v->domain) )
  33.346              DPRINTK("Non-priv domain attempted WRMSR(%p,%08lx,%08lx).\n",
  33.347                      _p(regs->ecx), (long)regs->eax, (long)regs->edx);
  33.348          else if ( wrmsr_user(regs->ecx, regs->eax, regs->edx) )
  33.349 @@ -842,7 +842,7 @@ static int emulate_privileged_op(struct 
  33.350          break;
  33.351  
  33.352      case 0x32: /* RDMSR */
  33.353 -        if ( !IS_PRIV(ed->domain) )
  33.354 +        if ( !IS_PRIV(v->domain) )
  33.355              DPRINTK("Non-priv domain attempted RDMSR(%p,%08lx,%08lx).\n",
  33.356                      _p(regs->ecx), (long)regs->eax, (long)regs->edx);
  33.357          /* Everyone can read the MSR space. */
  33.358 @@ -864,8 +864,8 @@ static int emulate_privileged_op(struct 
  33.359  
  33.360  asmlinkage int do_general_protection(struct cpu_user_regs *regs)
  33.361  {
  33.362 -    struct exec_domain *ed = current;
  33.363 -    struct trap_bounce *tb = &ed->arch.trap_bounce;
  33.364 +    struct vcpu *v = current;
  33.365 +    struct trap_bounce *tb = &v->arch.trap_bounce;
  33.366      trap_info_t *ti;
  33.367      unsigned long fixup;
  33.368  
  33.369 @@ -901,7 +901,7 @@ asmlinkage int do_general_protection(str
  33.370      {
  33.371          /* This fault must be due to <INT n> instruction. */
  33.372          ti = &current->arch.guest_context.trap_ctxt[regs->error_code>>3];
  33.373 -        if ( PERMIT_SOFTINT(TI_GET_DPL(ti), ed, regs) )
  33.374 +        if ( PERMIT_SOFTINT(TI_GET_DPL(ti), v, regs) )
  33.375          {
  33.376              tb->flags = TBF_EXCEPTION;
  33.377              regs->eip += 2;
  33.378 @@ -915,7 +915,7 @@ asmlinkage int do_general_protection(str
  33.379          return 0;
  33.380  
  33.381  #if defined(__i386__)
  33.382 -    if ( VM_ASSIST(ed->domain, VMASST_TYPE_4gb_segments) && 
  33.383 +    if ( VM_ASSIST(v->domain, VMASST_TYPE_4gb_segments) && 
  33.384           (regs->error_code == 0) && 
  33.385           gpf_emulate_4gb(regs) )
  33.386          return 0;
  33.387 @@ -958,10 +958,10 @@ static void nmi_softirq(void)
  33.388          return;
  33.389  
  33.390      if ( test_and_clear_bit(0, &nmi_softirq_reason) )
  33.391 -        send_guest_virq(dom0->exec_domain[0], VIRQ_PARITY_ERR);
  33.392 +        send_guest_virq(dom0->vcpu[0], VIRQ_PARITY_ERR);
  33.393  
  33.394      if ( test_and_clear_bit(1, &nmi_softirq_reason) )
  33.395 -        send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR);
  33.396 +        send_guest_virq(dom0->vcpu[0], VIRQ_IO_ERR);
  33.397  }
  33.398  
  33.399  asmlinkage void mem_parity_error(struct cpu_user_regs *regs)
  33.400 @@ -1045,14 +1045,14 @@ asmlinkage int math_state_restore(struct
  33.401  asmlinkage int do_debug(struct cpu_user_regs *regs)
  33.402  {
  33.403      unsigned long condition;
  33.404 -    struct exec_domain *ed = current;
  33.405 -    struct trap_bounce *tb = &ed->arch.trap_bounce;
  33.406 +    struct vcpu *v = current;
  33.407 +    struct trap_bounce *tb = &v->arch.trap_bounce;
  33.408  
  33.409      __asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
  33.410  
  33.411      /* Mask out spurious debug traps due to lazy DR7 setting */
  33.412      if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
  33.413 -         (ed->arch.guest_context.debugreg[7] == 0) )
  33.414 +         (v->arch.guest_context.debugreg[7] == 0) )
  33.415      {
  33.416          __asm__("mov %0,%%db7" : : "r" (0UL));
  33.417          goto out;
  33.418 @@ -1074,11 +1074,11 @@ asmlinkage int do_debug(struct cpu_user_
  33.419      } 
  33.420  
  33.421      /* Save debug status register where guest OS can peek at it */
  33.422 -    ed->arch.guest_context.debugreg[6] = condition;
  33.423 +    v->arch.guest_context.debugreg[6] = condition;
  33.424  
  33.425      tb->flags = TBF_EXCEPTION;
  33.426 -    tb->cs    = ed->arch.guest_context.trap_ctxt[TRAP_debug].cs;
  33.427 -    tb->eip   = ed->arch.guest_context.trap_ctxt[TRAP_debug].address;
  33.428 +    tb->cs    = v->arch.guest_context.trap_ctxt[TRAP_debug].cs;
  33.429 +    tb->eip   = v->arch.guest_context.trap_ctxt[TRAP_debug].address;
  33.430  
  33.431   out:
  33.432      return EXCRET_not_a_fault;
  33.433 @@ -1208,7 +1208,7 @@ long do_set_trap_table(trap_info_t *trap
  33.434  }
  33.435  
  33.436  
  33.437 -long set_debugreg(struct exec_domain *p, int reg, unsigned long value)
  33.438 +long set_debugreg(struct vcpu *p, int reg, unsigned long value)
  33.439  {
  33.440      int i;
  33.441  
    34.1 --- a/xen/arch/x86/vmx.c	Thu Jun 02 19:19:24 2005 +0000
    34.2 +++ b/xen/arch/x86/vmx.c	Thu Jun 02 21:05:33 2005 +0000
    34.3 @@ -243,7 +243,7 @@ static void vmx_dr_access (unsigned long
    34.4  {
    34.5      unsigned int reg;
    34.6      unsigned long *reg_p = 0;
    34.7 -    struct exec_domain *ed = current;
    34.8 +    struct vcpu *v = current;
    34.9      unsigned long eip;
   34.10  
   34.11      __vmread(GUEST_EIP, &eip);
   34.12 @@ -272,18 +272,18 @@ static void vmx_dr_access (unsigned long
   34.13      case TYPE_MOV_TO_DR: 
   34.14          /* don't need to check the range */
   34.15          if (reg != REG_ESP)
   34.16 -            ed->arch.guest_context.debugreg[reg] = *reg_p; 
   34.17 +            v->arch.guest_context.debugreg[reg] = *reg_p; 
   34.18          else {
   34.19              unsigned long value;
   34.20              __vmread(GUEST_ESP, &value);
   34.21 -            ed->arch.guest_context.debugreg[reg] = value;
   34.22 +            v->arch.guest_context.debugreg[reg] = value;
   34.23          }
   34.24          break;
   34.25      case TYPE_MOV_FROM_DR:
   34.26          if (reg != REG_ESP)
   34.27 -            *reg_p = ed->arch.guest_context.debugreg[reg];
   34.28 +            *reg_p = v->arch.guest_context.debugreg[reg];
   34.29          else {
   34.30 -            __vmwrite(GUEST_ESP, ed->arch.guest_context.debugreg[reg]);
   34.31 +            __vmwrite(GUEST_ESP, v->arch.guest_context.debugreg[reg]);
   34.32          }
   34.33          break;
   34.34      }
   34.35 @@ -296,7 +296,7 @@ static void vmx_dr_access (unsigned long
   34.36  static void vmx_vmexit_do_invlpg(unsigned long va) 
   34.37  {
   34.38      unsigned long eip;
   34.39 -    struct exec_domain *ed = current;
   34.40 +    struct vcpu *v = current;
   34.41  
   34.42      __vmread(GUEST_EIP, &eip);
   34.43  
   34.44 @@ -307,7 +307,7 @@ static void vmx_vmexit_do_invlpg(unsigne
   34.45       * We do the safest things first, then try to update the shadow
   34.46       * copying from guest
   34.47       */
   34.48 -    shadow_invlpg(ed, va);
   34.49 +    shadow_invlpg(v, va);
   34.50  }
   34.51  
   34.52  static int check_for_null_selector(unsigned long eip)
   34.53 @@ -362,7 +362,7 @@ static int check_for_null_selector(unsig
   34.54  static void vmx_io_instruction(struct cpu_user_regs *regs, 
   34.55                     unsigned long exit_qualification, unsigned long inst_len) 
   34.56  {
   34.57 -    struct exec_domain *d = current;
   34.58 +    struct vcpu *d = current;
   34.59      vcpu_iodata_t *vio;
   34.60      ioreq_t *p;
   34.61      unsigned long addr;
   34.62 @@ -489,7 +489,7 @@ vmx_copy(void *buf, unsigned long laddr,
   34.63  }
   34.64  
   34.65  int
   34.66 -vmx_world_save(struct exec_domain *d, struct vmx_assist_context *c)
   34.67 +vmx_world_save(struct vcpu *d, struct vmx_assist_context *c)
   34.68  {
   34.69      unsigned long inst_len;
   34.70      int error = 0;
   34.71 @@ -554,7 +554,7 @@ vmx_world_save(struct exec_domain *d, st
   34.72  }
   34.73  
   34.74  int
   34.75 -vmx_world_restore(struct exec_domain *d, struct vmx_assist_context *c)
   34.76 +vmx_world_restore(struct vcpu *d, struct vmx_assist_context *c)
   34.77  {
   34.78      unsigned long mfn, old_cr4;
   34.79      int error = 0;
   34.80 @@ -664,7 +664,7 @@ skip_cr3:
   34.81  enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
   34.82  
   34.83  int
   34.84 -vmx_assist(struct exec_domain *d, int mode)
   34.85 +vmx_assist(struct vcpu *d, int mode)
   34.86  {
   34.87      struct vmx_assist_context c;
   34.88      unsigned long magic, cp;
   34.89 @@ -731,7 +731,7 @@ error:
   34.90  
   34.91  static int vmx_set_cr0(unsigned long value)
   34.92  {
   34.93 -    struct exec_domain *d = current;
   34.94 +    struct vcpu *d = current;
   34.95      unsigned long old_base_mfn, mfn;
   34.96      unsigned long eip;
   34.97      int paging_enabled;
   34.98 @@ -821,7 +821,7 @@ static int mov_to_cr(int gp, int cr, str
   34.99  {
  34.100      unsigned long value;
  34.101      unsigned long old_cr;
  34.102 -    struct exec_domain *d = current;
  34.103 +    struct vcpu *d = current;
  34.104  
  34.105      switch (gp) {
  34.106          CASE_GET_REG(EAX, eax);
  34.107 @@ -937,7 +937,7 @@ static int mov_to_cr(int gp, int cr, str
  34.108  static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
  34.109  {
  34.110      unsigned long value;
  34.111 -    struct exec_domain *d = current;
  34.112 +    struct vcpu *d = current;
  34.113  
  34.114      if (cr != 3)
  34.115          __vmx_bug(regs);
  34.116 @@ -1046,7 +1046,7 @@ static inline void vmx_vmexit_do_mwait(v
  34.117  char print_buf[BUF_SIZ];
  34.118  static int index;
  34.119  
  34.120 -static void vmx_print_line(const char c, struct exec_domain *d) 
  34.121 +static void vmx_print_line(const char c, struct vcpu *d) 
  34.122  {
  34.123  
  34.124      if (index == MAX_LINE || c == '\n') {
  34.125 @@ -1109,7 +1109,7 @@ asmlinkage void vmx_vmexit_handler(struc
  34.126  {
  34.127      unsigned int exit_reason, idtv_info_field;
  34.128      unsigned long exit_qualification, eip, inst_len = 0;
  34.129 -    struct exec_domain *ed = current;
  34.130 +    struct vcpu *v = current;
  34.131      int error;
  34.132  
  34.133      if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
  34.134 @@ -1143,7 +1143,7 @@ asmlinkage void vmx_vmexit_handler(struc
  34.135      }
  34.136  
  34.137      __vmread(GUEST_EIP, &eip);
  34.138 -    TRACE_3D(TRC_VMX_VMEXIT, ed->domain->domain_id, eip, exit_reason);
  34.139 +    TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
  34.140  
  34.141      switch (exit_reason) {
  34.142      case EXIT_REASON_EXCEPTION_NMI:
  34.143 @@ -1164,7 +1164,7 @@ asmlinkage void vmx_vmexit_handler(struc
  34.144  
  34.145          perfc_incra(cause_vector, vector);
  34.146  
  34.147 -        TRACE_3D(TRC_VMX_VECTOR, ed->domain->domain_id, eip, vector);
  34.148 +        TRACE_3D(TRC_VMX_VECTOR, v->domain->domain_id, eip, vector);
  34.149          switch (vector) {
  34.150  #ifdef XEN_DEBUGGER
  34.151          case TRAP_debug:
  34.152 @@ -1216,7 +1216,7 @@ asmlinkage void vmx_vmexit_handler(struc
  34.153                          (unsigned long)regs.eax, (unsigned long)regs.ebx,
  34.154                          (unsigned long)regs.ecx, (unsigned long)regs.edx,
  34.155                          (unsigned long)regs.esi, (unsigned long)regs.edi);
  34.156 -            ed->arch.arch_vmx.vmx_platform.mpci.inst_decoder_regs = &regs;
  34.157 +            v->arch.arch_vmx.vmx_platform.mpci.inst_decoder_regs = &regs;
  34.158  
  34.159              if (!(error = vmx_do_page_fault(va, &regs))) {
  34.160                  /*
  34.161 @@ -1230,8 +1230,8 @@ asmlinkage void vmx_vmexit_handler(struc
  34.162                             TRAP_page_fault);
  34.163                  __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
  34.164                  __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, regs.error_code);
  34.165 -                ed->arch.arch_vmx.cpu_cr2 = va;
  34.166 -                TRACE_3D(TRC_VMX_INT, ed->domain->domain_id, TRAP_page_fault, va);
  34.167 +                v->arch.arch_vmx.cpu_cr2 = va;
  34.168 +                TRACE_3D(TRC_VMX_INT, v->domain->domain_id, TRAP_page_fault, va);
  34.169              }
  34.170              break;
  34.171          }
  34.172 @@ -1300,7 +1300,7 @@ asmlinkage void vmx_vmexit_handler(struc
  34.173          __vmread(GUEST_EIP, &eip);
  34.174          __vmread(EXIT_QUALIFICATION, &exit_qualification);
  34.175  
  34.176 -        vmx_print_line(regs.eax, ed); /* provides the current domain */
  34.177 +        vmx_print_line(regs.eax, v); /* provides the current domain */
  34.178          __update_guest_eip(inst_len);
  34.179          break;
  34.180      case EXIT_REASON_CR_ACCESS:
  34.181 @@ -1348,13 +1348,13 @@ asmlinkage void vmx_vmexit_handler(struc
  34.182          __vmx_bug(&regs);       /* should not happen */
  34.183      }
  34.184  
  34.185 -    vmx_intr_assist(ed);
  34.186 +    vmx_intr_assist(v);
  34.187      return;
  34.188  }
  34.189  
  34.190  asmlinkage void load_cr2(void)
  34.191  {
  34.192 -    struct exec_domain *d = current;
  34.193 +    struct vcpu *d = current;
  34.194  
  34.195      local_irq_disable();        
  34.196  #ifdef __i386__
    35.1 --- a/xen/arch/x86/vmx_intercept.c	Thu Jun 02 19:19:24 2005 +0000
    35.2 +++ b/xen/arch/x86/vmx_intercept.c	Thu Jun 02 21:05:33 2005 +0000
    35.3 @@ -34,7 +34,7 @@
    35.4  /* for intercepting io request after vm_exit, return value: 0--not handle; 1--handled */
    35.5  int vmx_io_intercept(ioreq_t *p)
    35.6  {
    35.7 -    struct exec_domain *d = current;
    35.8 +    struct vcpu *d = current;
    35.9      struct vmx_handler_t *handler = &(d->arch.arch_vmx.vmx_platform.vmx_handler);
   35.10      int i;
   35.11      unsigned long addr, offset;
   35.12 @@ -50,7 +50,7 @@ int vmx_io_intercept(ioreq_t *p)
   35.13  
   35.14  int register_io_handler(unsigned long addr, unsigned long offset, intercept_action_t action)
   35.15  {
   35.16 -    struct exec_domain *d = current;
   35.17 +    struct vcpu *d = current;
   35.18      struct vmx_handler_t *handler = &(d->arch.arch_vmx.vmx_platform.vmx_handler);
   35.19      int num = handler->num_slot;
   35.20  
   35.21 @@ -162,7 +162,7 @@ static void resume_pit_io(ioreq_t *p)
   35.22  /* the intercept action for PIT DM retval:0--not handled; 1--handled */
   35.23  int intercept_pit_io(ioreq_t *p)
   35.24  {
   35.25 -    struct exec_domain *d = current;
   35.26 +    struct vcpu *d = current;
   35.27      struct vmx_virpit_t *vpit = &(d->arch.arch_vmx.vmx_platform.vmx_pit);
   35.28  
   35.29      if (p->size != 1 ||
   35.30 @@ -204,7 +204,7 @@ static void pit_timer_fn(void *data)
   35.31  /* Only some PIT operations such as load init counter need a hypervisor hook.
   35.32   * leave all other operations in user space DM
   35.33   */
   35.34 -void vmx_hooks_assist(struct exec_domain *d)
   35.35 +void vmx_hooks_assist(struct vcpu *d)
   35.36  {
   35.37      vcpu_iodata_t *vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
   35.38      ioreq_t *p = &vio->vp_ioreq;
    36.1 --- a/xen/arch/x86/vmx_io.c	Thu Jun 02 19:19:24 2005 +0000
    36.2 +++ b/xen/arch/x86/vmx_io.c	Thu Jun 02 21:05:33 2005 +0000
    36.3 @@ -179,7 +179,7 @@ static void set_reg_value (int size, int
    36.4  }
    36.5  #endif
    36.6  
    36.7 -void vmx_io_assist(struct exec_domain *ed) 
    36.8 +void vmx_io_assist(struct vcpu *v) 
    36.9  {
   36.10      vcpu_iodata_t *vio;
   36.11      ioreq_t *p;
   36.12 @@ -189,10 +189,10 @@ void vmx_io_assist(struct exec_domain *e
   36.13      struct mi_per_cpu_info *mpci_p;
   36.14      struct cpu_user_regs *inst_decoder_regs;
   36.15  
   36.16 -    mpci_p = &ed->arch.arch_vmx.vmx_platform.mpci;
   36.17 +    mpci_p = &v->arch.arch_vmx.vmx_platform.mpci;
   36.18      inst_decoder_regs = mpci_p->inst_decoder_regs;
   36.19  
   36.20 -    vio = (vcpu_iodata_t *) ed->arch.arch_vmx.vmx_platform.shared_page_va;
   36.21 +    vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
   36.22      if (vio == 0) {
   36.23          VMX_DBG_LOG(DBG_LEVEL_1, 
   36.24                      "bad shared page: %lx", (unsigned long) vio);
   36.25 @@ -201,18 +201,18 @@ void vmx_io_assist(struct exec_domain *e
   36.26      p = &vio->vp_ioreq;
   36.27  
   36.28      if (p->state == STATE_IORESP_HOOK){
   36.29 -        vmx_hooks_assist(ed);
   36.30 +        vmx_hooks_assist(v);
   36.31      }
   36.32  
   36.33      /* clear IO wait VMX flag */
   36.34 -    if (test_bit(ARCH_VMX_IO_WAIT, &ed->arch.arch_vmx.flags)) {
   36.35 +    if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
   36.36          if (p->state != STATE_IORESP_READY) {
   36.37                  /* An interrupt send event raced us */
   36.38                  return;
   36.39          } else {
   36.40              p->state = STATE_INVALID;
   36.41          }
   36.42 -        clear_bit(ARCH_VMX_IO_WAIT, &ed->arch.arch_vmx.flags);
   36.43 +        clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
   36.44      } else {
   36.45          return;
   36.46      }
   36.47 @@ -228,10 +228,10 @@ void vmx_io_assist(struct exec_domain *e
   36.48              }
   36.49              int size = -1, index = -1;
   36.50  
   36.51 -            size = operand_size(ed->arch.arch_vmx.vmx_platform.mpci.mmio_target);
   36.52 -            index = operand_index(ed->arch.arch_vmx.vmx_platform.mpci.mmio_target);
   36.53 +            size = operand_size(v->arch.arch_vmx.vmx_platform.mpci.mmio_target);
   36.54 +            index = operand_index(v->arch.arch_vmx.vmx_platform.mpci.mmio_target);
   36.55  
   36.56 -            if (ed->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
   36.57 +            if (v->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
   36.58                  p->u.data = p->u.data & 0xffff;
   36.59              }        
   36.60              set_reg_value(size, index, 0, regs, p->u.data);
   36.61 @@ -272,17 +272,17 @@ void vmx_io_assist(struct exec_domain *e
   36.62      }
   36.63  }
   36.64  
   36.65 -int vmx_clear_pending_io_event(struct exec_domain *ed) 
   36.66 +int vmx_clear_pending_io_event(struct vcpu *v) 
   36.67  {
   36.68 -    struct domain *d = ed->domain;
   36.69 +    struct domain *d = v->domain;
   36.70  
   36.71      /* evtchn_pending is shared by other event channels in 0-31 range */
   36.72      if (!d->shared_info->evtchn_pending[IOPACKET_PORT>>5])
   36.73 -        clear_bit(IOPACKET_PORT>>5, &ed->vcpu_info->evtchn_pending_sel);
   36.74 +        clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
   36.75  
   36.76      /* Note: VMX domains may need upcalls as well */
   36.77 -    if (!ed->vcpu_info->evtchn_pending_sel) 
   36.78 -        ed->vcpu_info->evtchn_upcall_pending = 0;
   36.79 +    if (!v->vcpu_info->evtchn_pending_sel) 
   36.80 +        v->vcpu_info->evtchn_upcall_pending = 0;
   36.81  
   36.82      /* clear the pending bit for IOPACKET_PORT */
   36.83      return test_and_clear_bit(IOPACKET_PORT, 
   36.84 @@ -295,7 +295,7 @@ int vmx_clear_pending_io_event(struct ex
   36.85   * interrupts are guaranteed to be checked before resuming guest. 
   36.86   * VMX upcalls have been already arranged for if necessary. 
   36.87   */
   36.88 -void vmx_check_events(struct exec_domain *d) 
   36.89 +void vmx_check_events(struct vcpu *d) 
   36.90  {
   36.91      /* clear the event *before* checking for work. This should avoid 
   36.92         the set-and-check races */
   36.93 @@ -383,7 +383,7 @@ static __inline__ int find_highest_irq(u
   36.94   * Return 0-255 for pending irq.
   36.95   *        -1 when no pending.
   36.96   */
   36.97 -static inline int find_highest_pending_irq(struct exec_domain *d)
   36.98 +static inline int find_highest_pending_irq(struct vcpu *d)
   36.99  {
  36.100      vcpu_iodata_t *vio;
  36.101  
  36.102 @@ -397,7 +397,7 @@ static inline int find_highest_pending_i
  36.103      return find_highest_irq((unsigned int *)&vio->vp_intr[0]);
  36.104  }
  36.105  
  36.106 -static inline void clear_highest_bit(struct exec_domain *d, int vector)
  36.107 +static inline void clear_highest_bit(struct vcpu *d, int vector)
  36.108  {
  36.109      vcpu_iodata_t *vio;
  36.110  
  36.111 @@ -416,7 +416,7 @@ static inline int irq_masked(unsigned lo
  36.112      return ((eflags & X86_EFLAGS_IF) == 0);
  36.113  }
  36.114  
  36.115 -void vmx_intr_assist(struct exec_domain *d) 
  36.116 +void vmx_intr_assist(struct vcpu *d) 
  36.117  {
  36.118      int highest_vector = find_highest_pending_irq(d);
  36.119      unsigned long intr_fields, eflags;
  36.120 @@ -462,7 +462,7 @@ void vmx_intr_assist(struct exec_domain 
  36.121      return;
  36.122  }
  36.123  
  36.124 -void vmx_do_resume(struct exec_domain *d) 
  36.125 +void vmx_do_resume(struct vcpu *d) 
  36.126  {
  36.127      vmx_stts();
  36.128      if ( vmx_paging_enabled(d) )
    37.1 --- a/xen/arch/x86/vmx_platform.c	Thu Jun 02 19:19:24 2005 +0000
    37.2 +++ b/xen/arch/x86/vmx_platform.c	Thu Jun 02 21:05:33 2005 +0000
    37.3 @@ -481,7 +481,7 @@ static int read_from_mmio(struct instruc
    37.4  static void send_mmio_req(unsigned long gpa, 
    37.5                     struct instruction *inst_p, long value, int dir, int pvalid)
    37.6  {
    37.7 -    struct exec_domain *d = current;
    37.8 +    struct vcpu *d = current;
    37.9      vcpu_iodata_t *vio;
   37.10      ioreq_t *p;
   37.11      int vm86;
    38.1 --- a/xen/arch/x86/vmx_vmcs.c	Thu Jun 02 19:19:24 2005 +0000
    38.2 +++ b/xen/arch/x86/vmx_vmcs.c	Thu Jun 02 21:05:33 2005 +0000
    38.3 @@ -102,7 +102,7 @@ struct host_execution_env {
    38.4  
    38.5  #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
    38.6  
    38.7 -int vmx_setup_platform(struct exec_domain *d, struct cpu_user_regs *regs)
    38.8 +int vmx_setup_platform(struct vcpu *d, struct cpu_user_regs *regs)
    38.9  {
   38.10      int i;
   38.11      unsigned int n;
   38.12 @@ -156,7 +156,7 @@ int vmx_setup_platform(struct exec_domai
   38.13      return 0;
   38.14  }
   38.15  
   38.16 -void vmx_do_launch(struct exec_domain *ed) 
   38.17 +void vmx_do_launch(struct vcpu *v) 
   38.18  {
   38.19  /* Update CR3, GDT, LDT, TR */
   38.20      unsigned int tr, cpu, error = 0;
   38.21 @@ -167,14 +167,14 @@ void vmx_do_launch(struct exec_domain *e
   38.22      struct cpu_user_regs *regs = guest_cpu_user_regs();
   38.23  
   38.24      vmx_stts();
   38.25 -    set_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
   38.26 +    set_bit(_VCPUF_guest_stts, &v->vcpu_flags);
   38.27  
   38.28      cpu = smp_processor_id();
   38.29  
   38.30      page = (struct pfn_info *) alloc_domheap_page(NULL);
   38.31      pfn = (unsigned long) (page - frame_table);
   38.32  
   38.33 -    vmx_setup_platform(ed, regs);
   38.34 +    vmx_setup_platform(v, regs);
   38.35  
   38.36      __asm__ __volatile__ ("sgdt  (%0) \n" :: "a"(&desc) : "memory");
   38.37      host_env.gdtr_limit = desc.size;
   38.38 @@ -196,11 +196,11 @@ void vmx_do_launch(struct exec_domain *e
   38.39      error |= __vmwrite(GUEST_TR_BASE, 0);
   38.40      error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
   38.41  
   38.42 -    __vmwrite(GUEST_CR3, pagetable_get_paddr(ed->arch.guest_table));
   38.43 -    __vmwrite(HOST_CR3, pagetable_get_paddr(ed->arch.monitor_table));
   38.44 +    __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.guest_table));
   38.45 +    __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
   38.46      __vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
   38.47  
   38.48 -    ed->arch.schedule_tail = arch_vmx_do_resume;
   38.49 +    v->arch.schedule_tail = arch_vmx_do_resume;
   38.50  }
   38.51  
   38.52  /*
    39.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Thu Jun 02 19:19:24 2005 +0000
    39.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Thu Jun 02 21:05:33 2005 +0000
    39.3 @@ -47,21 +47,21 @@ void __dummy__(void)
    39.4      DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
    39.5      BLANK();
    39.6  
    39.7 -    OFFSET(EDOMAIN_processor, struct exec_domain, processor);
    39.8 -    OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
    39.9 -    OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
   39.10 -    OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
   39.11 -    OFFSET(EDOMAIN_event_sel, struct exec_domain,
   39.12 +    OFFSET(VCPU_processor, struct vcpu, processor);
   39.13 +    OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
   39.14 +    OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
   39.15 +    OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
   39.16 +    OFFSET(VCPU_event_sel, struct vcpu,
   39.17             arch.guest_context.event_callback_cs);
   39.18 -    OFFSET(EDOMAIN_event_addr, struct exec_domain, 
   39.19 +    OFFSET(VCPU_event_addr, struct vcpu, 
   39.20             arch.guest_context.event_callback_eip);
   39.21 -    OFFSET(EDOMAIN_failsafe_sel, struct exec_domain,
   39.22 +    OFFSET(VCPU_failsafe_sel, struct vcpu,
   39.23             arch.guest_context.failsafe_callback_cs);
   39.24 -    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain,
   39.25 +    OFFSET(VCPU_failsafe_addr, struct vcpu,
   39.26             arch.guest_context.failsafe_callback_eip);
   39.27 -    OFFSET(EDOMAIN_kernel_ss, struct exec_domain,
   39.28 +    OFFSET(VCPU_kernel_ss, struct vcpu,
   39.29             arch.guest_context.kernel_ss);
   39.30 -    OFFSET(EDOMAIN_kernel_sp, struct exec_domain,
   39.31 +    OFFSET(VCPU_kernel_sp, struct vcpu,
   39.32             arch.guest_context.kernel_sp);
   39.33      BLANK();
   39.34  
    40.1 --- a/xen/arch/x86/x86_32/entry.S	Thu Jun 02 19:19:24 2005 +0000
    40.2 +++ b/xen/arch/x86/x86_32/entry.S	Thu Jun 02 21:05:33 2005 +0000
    40.3 @@ -139,7 +139,7 @@ vmx_test_all_events:
    40.4          notl %ecx
    40.5          cli                             # tests must not race interrupts
    40.6  /*test_softirqs:*/  
    40.7 -        movl EDOMAIN_processor(%ebx),%eax
    40.8 +        movl VCPU_processor(%ebx),%eax
    40.9          shl  $IRQSTAT_shift,%eax
   40.10          test %ecx,irq_stat(%eax,1)
   40.11          jnz  vmx_process_softirqs
   40.12 @@ -216,10 +216,10 @@ DBLFLT1:GET_CURRENT(%ebx)
   40.13          jmp   test_all_events
   40.14  failsafe_callback:
   40.15          GET_CURRENT(%ebx)
   40.16 -        leal  EDOMAIN_trap_bounce(%ebx),%edx
   40.17 -        movl  EDOMAIN_failsafe_addr(%ebx),%eax
   40.18 +        leal  VCPU_trap_bounce(%ebx),%edx
   40.19 +        movl  VCPU_failsafe_addr(%ebx),%eax
   40.20          movl  %eax,TRAPBOUNCE_eip(%edx)
   40.21 -        movl  EDOMAIN_failsafe_sel(%ebx),%eax
   40.22 +        movl  VCPU_failsafe_sel(%ebx),%eax
   40.23          movw  %ax,TRAPBOUNCE_cs(%edx)
   40.24          movw  $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
   40.25          call  create_bounce_frame
   40.26 @@ -269,22 +269,22 @@ test_all_events:
   40.27          notl %ecx
   40.28          cli                             # tests must not race interrupts
   40.29  /*test_softirqs:*/  
   40.30 -        movl EDOMAIN_processor(%ebx),%eax
   40.31 +        movl VCPU_processor(%ebx),%eax
   40.32          shl  $IRQSTAT_shift,%eax
   40.33          test %ecx,irq_stat(%eax,1)
   40.34          jnz  process_softirqs
   40.35  /*test_guest_events:*/
   40.36 -        movl EDOMAIN_vcpu_info(%ebx),%eax
   40.37 +        movl VCPU_vcpu_info(%ebx),%eax
   40.38          testb $0xFF,VCPUINFO_upcall_mask(%eax)
   40.39          jnz  restore_all_guest
   40.40          testb $0xFF,VCPUINFO_upcall_pending(%eax)
   40.41          jz   restore_all_guest
   40.42  /*process_guest_events:*/
   40.43          sti
   40.44 -        leal EDOMAIN_trap_bounce(%ebx),%edx
   40.45 -        movl EDOMAIN_event_addr(%ebx),%eax
   40.46 +        leal VCPU_trap_bounce(%ebx),%edx
   40.47 +        movl VCPU_event_addr(%ebx),%eax
   40.48          movl %eax,TRAPBOUNCE_eip(%edx)
   40.49 -        movl EDOMAIN_event_sel(%ebx),%eax
   40.50 +        movl VCPU_event_sel(%ebx),%eax
   40.51          movw %ax,TRAPBOUNCE_cs(%edx)
   40.52          movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
   40.53          call create_bounce_frame
   40.54 @@ -298,15 +298,15 @@ process_softirqs:
   40.55                  
   40.56  /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
   40.57  /*   {EIP, CS, EFLAGS, [ESP, SS]}                                        */
   40.58 -/* %edx == trap_bounce, %ebx == struct exec_domain                       */
   40.59 +/* %edx == trap_bounce, %ebx == struct vcpu                       */
   40.60  /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
   40.61  create_bounce_frame:
   40.62          movl UREGS_eflags+4(%esp),%ecx
   40.63          movb UREGS_cs+4(%esp),%cl
   40.64          testl $(2|X86_EFLAGS_VM),%ecx
   40.65          jz   ring1 /* jump if returning to an existing ring-1 activation */
   40.66 -        movl EDOMAIN_kernel_sp(%ebx),%esi
   40.67 -FLT6:   movl EDOMAIN_kernel_ss(%ebx),%gs
   40.68 +        movl VCPU_kernel_sp(%ebx),%esi
   40.69 +FLT6:   movl VCPU_kernel_ss(%ebx),%gs
   40.70          testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
   40.71          jz   nvm86_1
   40.72          subl $16,%esi       /* push ES/DS/FS/GS (VM86 stack frame) */
   40.73 @@ -334,7 +334,7 @@ 1:      /* Construct a stack frame: EFLA
   40.74          test %eax,%eax
   40.75          jz   domain_crash_synchronous
   40.76  FLT14:  movl %eax,%gs:(%esi) 
   40.77 -        movl EDOMAIN_vcpu_info(%ebx),%eax
   40.78 +        movl VCPU_vcpu_info(%ebx),%eax
   40.79          pushl VCPUINFO_upcall_mask(%eax)
   40.80          testb $TBF_INTERRUPT,%cl
   40.81          setnz VCPUINFO_upcall_mask(%eax) # TBF_INTERRUPT -> clear upcall mask
   40.82 @@ -407,7 +407,7 @@ nvm86_3:/* Rewrite our stack frame and r
   40.83  
   40.84          ALIGN
   40.85  process_guest_exception_and_events:
   40.86 -        leal EDOMAIN_trap_bounce(%ebx),%edx
   40.87 +        leal VCPU_trap_bounce(%ebx),%edx
   40.88          testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
   40.89          jz   test_all_events
   40.90          call create_bounce_frame
    41.1 --- a/xen/arch/x86/x86_32/mm.c	Thu Jun 02 19:19:24 2005 +0000
    41.2 +++ b/xen/arch/x86/x86_32/mm.c	Thu Jun 02 21:05:33 2005 +0000
    41.3 @@ -69,7 +69,7 @@ void __init paging_init(void)
    41.4      printk("PAE disabled.\n");
    41.5  #endif
    41.6  
    41.7 -    idle0_exec_domain.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    41.8 +    idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    41.9  
   41.10      /*
   41.11       * Allocate and map the machine-to-phys table and create read-only mapping 
    42.1 --- a/xen/arch/x86/x86_32/seg_fixup.c	Thu Jun 02 19:19:24 2005 +0000
    42.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c	Thu Jun 02 21:05:33 2005 +0000
    42.3 @@ -106,7 +106,7 @@ static unsigned char insn_decode[256] = 
    42.4   */
    42.5  int get_baselimit(u16 seg, unsigned long *base, unsigned long *limit)
    42.6  {
    42.7 -    struct exec_domain *d = current;
    42.8 +    struct vcpu *d = current;
    42.9      unsigned long *table, a, b;
   42.10      int            ldt = !!(seg & 4);
   42.11      int            idx = (seg >> 3) & 8191;
   42.12 @@ -172,7 +172,7 @@ int linearise_address(u16 seg, unsigned 
   42.13  
   42.14  int fixup_seg(u16 seg, unsigned long offset)
   42.15  {
   42.16 -    struct exec_domain *d = current;
   42.17 +    struct vcpu *d = current;
   42.18      unsigned long *table, a, b, base, limit;
   42.19      int            ldt = !!(seg & 4);
   42.20      int            idx = (seg >> 3) & 8191;
   42.21 @@ -265,7 +265,7 @@ int fixup_seg(u16 seg, unsigned long off
   42.22   */
   42.23  int gpf_emulate_4gb(struct cpu_user_regs *regs)
   42.24  {
   42.25 -    struct exec_domain *d = current;
   42.26 +    struct vcpu *d = current;
   42.27      trap_info_t   *ti;
   42.28      struct trap_bounce *tb;
   42.29      u8            modrm, mod, reg, rm, decode;
    43.1 --- a/xen/arch/x86/x86_32/traps.c	Thu Jun 02 19:19:24 2005 +0000
    43.2 +++ b/xen/arch/x86/x86_32/traps.c	Thu Jun 02 21:05:33 2005 +0000
    43.3 @@ -195,9 +195,9 @@ void __init percpu_traps_init(void)
    43.4      set_task_gate(TRAP_double_fault, __DOUBLEFAULT_TSS_ENTRY<<3);
    43.5  }
    43.6  
    43.7 -void init_int80_direct_trap(struct exec_domain *ed)
    43.8 +void init_int80_direct_trap(struct vcpu *v)
    43.9  {
   43.10 -    trap_info_t *ti = &ed->arch.guest_context.trap_ctxt[0x80];
   43.11 +    trap_info_t *ti = &v->arch.guest_context.trap_ctxt[0x80];
   43.12  
   43.13      /*
   43.14       * We can't virtualise interrupt gates, as there's no way to get
   43.15 @@ -206,12 +206,12 @@ void init_int80_direct_trap(struct exec_
   43.16      if ( TI_GET_IF(ti) )
   43.17          return;
   43.18  
   43.19 -    ed->arch.int80_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
   43.20 -    ed->arch.int80_desc.b =
   43.21 +    v->arch.int80_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
   43.22 +    v->arch.int80_desc.b =
   43.23          (ti->address & 0xffff0000) | 0x8f00 | ((TI_GET_DPL(ti) & 3) << 13);
   43.24  
   43.25 -    if ( ed == current )
   43.26 -        set_int80_direct_trap(ed);
   43.27 +    if ( v == current )
   43.28 +        set_int80_direct_trap(v);
   43.29  }
   43.30  
   43.31  long do_set_callbacks(unsigned long event_selector,
   43.32 @@ -219,7 +219,7 @@ long do_set_callbacks(unsigned long even
   43.33                        unsigned long failsafe_selector,
   43.34                        unsigned long failsafe_address)
   43.35  {
   43.36 -    struct exec_domain *d = current;
   43.37 +    struct vcpu *d = current;
   43.38  
   43.39      if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
   43.40          return -EPERM;
    44.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Thu Jun 02 19:19:24 2005 +0000
    44.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Thu Jun 02 21:05:33 2005 +0000
    44.3 @@ -51,17 +51,17 @@ void __dummy__(void)
    44.4      DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
    44.5      BLANK();
    44.6  
    44.7 -    OFFSET(EDOMAIN_processor, struct exec_domain, processor);
    44.8 -    OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
    44.9 -    OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
   44.10 -    OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
   44.11 -    OFFSET(EDOMAIN_event_addr, struct exec_domain,
   44.12 +    OFFSET(VCPU_processor, struct vcpu, processor);
   44.13 +    OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
   44.14 +    OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
   44.15 +    OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
   44.16 +    OFFSET(VCPU_event_addr, struct vcpu,
   44.17             arch.guest_context.event_callback_eip);
   44.18 -    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain,
   44.19 +    OFFSET(VCPU_failsafe_addr, struct vcpu,
   44.20             arch.guest_context.failsafe_callback_eip);
   44.21 -    OFFSET(EDOMAIN_syscall_addr, struct exec_domain,
   44.22 +    OFFSET(VCPU_syscall_addr, struct vcpu,
   44.23             arch.guest_context.syscall_callback_eip);
   44.24 -    OFFSET(EDOMAIN_kernel_sp, struct exec_domain,
   44.25 +    OFFSET(VCPU_kernel_sp, struct vcpu,
   44.26             arch.guest_context.kernel_sp);
   44.27      BLANK();
   44.28  
    45.1 --- a/xen/arch/x86/x86_64/entry.S	Thu Jun 02 19:19:24 2005 +0000
    45.2 +++ b/xen/arch/x86/x86_64/entry.S	Thu Jun 02 21:05:33 2005 +0000
    45.3 @@ -19,15 +19,15 @@
    45.4          movq (reg),reg;
    45.5  
    45.6          ALIGN
    45.7 -/* %rbx: struct exec_domain, interrupts disabled */
    45.8 +/* %rbx: struct vcpu, interrupts disabled */
    45.9  switch_to_kernel:
   45.10 -        leaq  EDOMAIN_trap_bounce(%rbx),%rdx
   45.11 -        movq  EDOMAIN_syscall_addr(%rbx),%rax
   45.12 +        leaq  VCPU_trap_bounce(%rbx),%rdx
   45.13 +        movq  VCPU_syscall_addr(%rbx),%rax
   45.14          movq  %rax,TRAPBOUNCE_eip(%rdx)
   45.15          movw  $0,TRAPBOUNCE_flags(%rdx)
   45.16          call  create_bounce_frame
   45.17  
   45.18 -/* %rbx: struct exec_domain */
   45.19 +/* %rbx: struct vcpu */
   45.20  restore_all_guest:
   45.21          RESTORE_ALL
   45.22          testw $TRAP_syscall,4(%rsp)
   45.23 @@ -68,8 +68,8 @@ DBLFLT1:GET_CURRENT(%rbx)
   45.24          jmp   test_all_events
   45.25  failsafe_callback:
   45.26          GET_CURRENT(%rbx)
   45.27 -        leaq  EDOMAIN_trap_bounce(%rbx),%rdx
   45.28 -        movq  EDOMAIN_failsafe_addr(%rbx),%rax
   45.29 +        leaq  VCPU_trap_bounce(%rbx),%rdx
   45.30 +        movq  VCPU_failsafe_addr(%rbx),%rax
   45.31          movq  %rax,TRAPBOUNCE_eip(%rdx)
   45.32          movw  $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
   45.33          call  create_bounce_frame
   45.34 @@ -113,7 +113,7 @@ ENTRY(syscall_enter)
   45.35          movl  $TRAP_syscall,4(%rsp)
   45.36          SAVE_ALL
   45.37          GET_CURRENT(%rbx)
   45.38 -        testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
   45.39 +        testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
   45.40          jz    switch_to_kernel
   45.41  
   45.42  /*hypercall:*/
   45.43 @@ -125,25 +125,25 @@ ENTRY(syscall_enter)
   45.44          callq *(%r10,%rax,8)
   45.45          movq %rax,UREGS_rax(%rsp)       # save the return value
   45.46  
   45.47 -/* %rbx: struct exec_domain */
   45.48 +/* %rbx: struct vcpu */
   45.49  test_all_events:
   45.50          cli                             # tests must not race interrupts
   45.51  /*test_softirqs:*/  
   45.52 -        movl  EDOMAIN_processor(%rbx),%eax
   45.53 +        movl  VCPU_processor(%rbx),%eax
   45.54          shl   $IRQSTAT_shift,%rax
   45.55          leaq  irq_stat(%rip),%rcx
   45.56          testl $~0,(%rcx,%rax,1)
   45.57          jnz   process_softirqs
   45.58  /*test_guest_events:*/
   45.59 -        movq  EDOMAIN_vcpu_info(%rbx),%rax
   45.60 +        movq  VCPU_vcpu_info(%rbx),%rax
   45.61          testb $0xFF,VCPUINFO_upcall_mask(%rax)
   45.62          jnz   restore_all_guest
   45.63          testb $0xFF,VCPUINFO_upcall_pending(%rax)
   45.64          jz    restore_all_guest
   45.65  /*process_guest_events:*/
   45.66          sti
   45.67 -        leaq  EDOMAIN_trap_bounce(%rbx),%rdx
   45.68 -        movq  EDOMAIN_event_addr(%rbx),%rax
   45.69 +        leaq  VCPU_trap_bounce(%rbx),%rdx
   45.70 +        movq  VCPU_event_addr(%rbx),%rax
   45.71          movq  %rax,TRAPBOUNCE_eip(%rdx)
   45.72          movw  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
   45.73          call  create_bounce_frame
   45.74 @@ -231,7 +231,7 @@ vmx_test_all_events:
   45.75  /* test_all_events: */
   45.76          cli                             # tests must not race interrupts
   45.77  /*test_softirqs:*/  
   45.78 -        movl  EDOMAIN_processor(%rbx),%eax
   45.79 +        movl  VCPU_processor(%rbx),%eax
   45.80          shl   $IRQSTAT_shift,%rax
   45.81          leaq  irq_stat(%rip), %rdx
   45.82          testl $~0,(%rdx,%rax,1)
   45.83 @@ -274,7 +274,7 @@ vmx_process_softirqs:
   45.84  #endif
   45.85  
   45.86          ALIGN
   45.87 -/* %rbx: struct exec_domain */
   45.88 +/* %rbx: struct vcpu */
   45.89  process_softirqs:
   45.90          sti       
   45.91          call do_softirq
   45.92 @@ -282,17 +282,17 @@ process_softirqs:
   45.93  
   45.94  /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK:                     */
   45.95  /*   { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS }   */
   45.96 -/* %rdx: trap_bounce, %rbx: struct exec_domain                           */
   45.97 +/* %rdx: trap_bounce, %rbx: struct vcpu                           */
   45.98  /* On return only %rbx is guaranteed non-clobbered.                      */
   45.99  create_bounce_frame:
  45.100 -        testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
  45.101 +        testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
  45.102          jnz   1f
  45.103          /* Push new frame at registered guest-OS stack base. */
  45.104          pushq %rdx
  45.105          movq  %rbx,%rdi
  45.106          call  toggle_guest_mode
  45.107          popq  %rdx
  45.108 -        movq  EDOMAIN_kernel_sp(%rbx),%rsi
  45.109 +        movq  VCPU_kernel_sp(%rbx),%rsi
  45.110          jmp   2f
  45.111  1:      /* In kernel context already: push new frame at existing %rsp. */
  45.112          movq  UREGS_rsp+8(%rsp),%rsi
  45.113 @@ -311,7 +311,7 @@ FLT2:   movq  %rax,32(%rsi)             
  45.114  FLT3:   movq  %rax,24(%rsi)             # RSP
  45.115          movq  UREGS_eflags+8(%rsp),%rax
  45.116  FLT4:   movq  %rax,16(%rsi)             # RFLAGS
  45.117 -        movq  EDOMAIN_vcpu_info(%rbx),%rax
  45.118 +        movq  VCPU_vcpu_info(%rbx),%rax
  45.119          pushq VCPUINFO_upcall_mask(%rax)
  45.120          testb $TBF_INTERRUPT,%cl
  45.121          setnz VCPUINFO_upcall_mask(%rax)# TBF_INTERRUPT -> clear upcall mask
  45.122 @@ -371,9 +371,9 @@ FLT14:  movq  %rax,(%rsi)               
  45.123  .previous
  45.124  
  45.125          ALIGN
  45.126 -/* %rbx: struct exec_domain */
  45.127 +/* %rbx: struct vcpu */
  45.128  process_guest_exception_and_events:
  45.129 -        leaq  EDOMAIN_trap_bounce(%rbx),%rdx
  45.130 +        leaq  VCPU_trap_bounce(%rbx),%rdx
  45.131          testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
  45.132          jz    test_all_events
  45.133          call  create_bounce_frame
    46.1 --- a/xen/arch/x86/x86_64/mm.c	Thu Jun 02 19:19:24 2005 +0000
    46.2 +++ b/xen/arch/x86/x86_64/mm.c	Thu Jun 02 21:05:33 2005 +0000
    46.3 @@ -78,7 +78,7 @@ void __init paging_init(void)
    46.4      l2_pgentry_t *l2_ro_mpt;
    46.5      struct pfn_info *pg;
    46.6  
    46.7 -    idle0_exec_domain.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    46.8 +    idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    46.9  
   46.10      /* Create user-accessible L2 directory to map the MPT for guests. */
   46.11      l3_ro_mpt = (l3_pgentry_t *)alloc_xenheap_page();
   46.12 @@ -181,7 +181,7 @@ long do_stack_switch(unsigned long ss, u
   46.13  
   46.14  long do_set_segment_base(unsigned int which, unsigned long base)
   46.15  {
   46.16 -    struct exec_domain *ed = current;
   46.17 +    struct vcpu *v = current;
   46.18      long ret = 0;
   46.19  
   46.20      switch ( which )
   46.21 @@ -190,21 +190,21 @@ long do_set_segment_base(unsigned int wh
   46.22          if ( wrmsr_user(MSR_FS_BASE, base, base>>32) )
   46.23              ret = -EFAULT;
   46.24          else
   46.25 -            ed->arch.guest_context.fs_base = base;
   46.26 +            v->arch.guest_context.fs_base = base;
   46.27          break;
   46.28  
   46.29      case SEGBASE_GS_USER:
   46.30          if ( wrmsr_user(MSR_SHADOW_GS_BASE, base, base>>32) )
   46.31              ret = -EFAULT;
   46.32          else
   46.33 -            ed->arch.guest_context.gs_base_user = base;
   46.34 +            v->arch.guest_context.gs_base_user = base;
   46.35          break;
   46.36  
   46.37      case SEGBASE_GS_KERNEL:
   46.38          if ( wrmsr_user(MSR_GS_BASE, base, base>>32) )
   46.39              ret = -EFAULT;
   46.40          else
   46.41 -            ed->arch.guest_context.gs_base_kernel = base;
   46.42 +            v->arch.guest_context.gs_base_kernel = base;
   46.43          break;
   46.44  
   46.45      case SEGBASE_GS_USER_SEL:
    47.1 --- a/xen/arch/x86/x86_64/traps.c	Thu Jun 02 19:19:24 2005 +0000
    47.2 +++ b/xen/arch/x86/x86_64/traps.c	Thu Jun 02 21:05:33 2005 +0000
    47.3 @@ -181,7 +181,7 @@ long do_set_callbacks(unsigned long even
    47.4                        unsigned long failsafe_address,
    47.5                        unsigned long syscall_address)
    47.6  {
    47.7 -    struct exec_domain *d = current;
    47.8 +    struct vcpu *d = current;
    47.9  
   47.10      d->arch.guest_context.event_callback_eip    = event_address;
   47.11      d->arch.guest_context.failsafe_callback_eip = failsafe_address;
    48.1 --- a/xen/common/dom0_ops.c	Thu Jun 02 19:19:24 2005 +0000
    48.2 +++ b/xen/common/dom0_ops.c	Thu Jun 02 21:05:33 2005 +0000
    48.3 @@ -21,7 +21,7 @@
    48.4  
    48.5  extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
    48.6  extern void arch_getdomaininfo_ctxt(
    48.7 -    struct exec_domain *, struct vcpu_guest_context *);
    48.8 +    struct vcpu *, struct vcpu_guest_context *);
    48.9  
   48.10  static inline int is_free_domid(domid_t dom)
   48.11  {
   48.12 @@ -152,11 +152,11 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   48.13  
   48.14      case DOM0_CREATEDOMAIN:
   48.15      {
   48.16 -        struct domain      *d;
   48.17 -        unsigned int        pro;
   48.18 -        domid_t             dom;
   48.19 -        struct exec_domain *ed;
   48.20 -        unsigned int        i, cnt[NR_CPUS] = { 0 };
   48.21 +        struct domain *d;
   48.22 +        unsigned int   pro;
   48.23 +        domid_t        dom;
   48.24 +        struct vcpu   *v;
   48.25 +        unsigned int   i, cnt[NR_CPUS] = { 0 };
   48.26  
   48.27  
   48.28          dom = op->u.createdomain.domain;
   48.29 @@ -174,8 +174,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   48.30          /* Do an initial CPU placement. Pick the least-populated CPU. */
   48.31          read_lock(&domlist_lock);
   48.32          for_each_domain ( d )
   48.33 -            for_each_exec_domain ( d, ed )
   48.34 -                cnt[ed->processor]++;
   48.35 +            for_each_vcpu ( d, v )
   48.36 +                cnt[v->processor]++;
   48.37          read_unlock(&domlist_lock);
   48.38          
   48.39          /*
   48.40 @@ -220,7 +220,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   48.41      {
   48.42          domid_t dom = op->u.pincpudomain.domain;
   48.43          struct domain *d = find_domain_by_id(dom);
   48.44 -        struct exec_domain *ed;
   48.45 +        struct vcpu *v;
   48.46          cpumap_t cpumap;
   48.47  
   48.48  
   48.49 @@ -231,22 +231,22 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   48.50          }
   48.51          
   48.52          if ( (op->u.pincpudomain.vcpu >= MAX_VIRT_CPUS) ||
   48.53 -             !d->exec_domain[op->u.pincpudomain.vcpu] )
   48.54 +             !d->vcpu[op->u.pincpudomain.vcpu] )
   48.55          {
   48.56              ret = -EINVAL;
   48.57              put_domain(d);
   48.58              break;
   48.59          }
   48.60  
   48.61 -        ed = d->exec_domain[op->u.pincpudomain.vcpu];
   48.62 -        if ( ed == NULL )
   48.63 +        v = d->vcpu[op->u.pincpudomain.vcpu];
   48.64 +        if ( v == NULL )
   48.65          {
   48.66              ret = -ESRCH;
   48.67              put_domain(d);
   48.68              break;
   48.69          }
   48.70  
   48.71 -        if ( ed == current )
   48.72 +        if ( v == current )
   48.73          {
   48.74              ret = -EINVAL;
   48.75              put_domain(d);
   48.76 @@ -261,22 +261,22 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   48.77              break;
   48.78          }
   48.79  
   48.80 -        /* update cpumap for this ed */
   48.81 -        ed->cpumap = cpumap;
   48.82 +        /* update cpumap for this vcpu */
   48.83 +        v->cpumap = cpumap;
   48.84  
   48.85          if ( cpumap == CPUMAP_RUNANYWHERE )
   48.86 -            clear_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
   48.87 +            clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
   48.88          else
   48.89          {
   48.90              /* pick a new cpu from the usable map */
   48.91              int new_cpu = (int)find_first_set_bit(cpumap) % num_online_cpus();
   48.92  
   48.93 -            exec_domain_pause(ed);
   48.94 -            if ( ed->processor != new_cpu )
   48.95 -                set_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags);
   48.96 -            set_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
   48.97 -            ed->processor = new_cpu;
   48.98 -            exec_domain_unpause(ed);
   48.99 +            vcpu_pause(v);
  48.100 +            if ( v->processor != new_cpu )
  48.101 +                set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags);
  48.102 +            set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
  48.103 +            v->processor = new_cpu;
  48.104 +            vcpu_unpause(v);
  48.105          }
  48.106  
  48.107          put_domain(d);
  48.108 @@ -299,8 +299,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  48.109  
  48.110      case DOM0_GETDOMAININFO:
  48.111      { 
  48.112 -        struct domain             *d;
  48.113 -        struct exec_domain        *ed;
  48.114 +        struct domain *d;
  48.115 +        struct vcpu   *v;
  48.116          u64 cpu_time = 0;
  48.117          int vcpu_count = 0;
  48.118          int flags = DOMFLAGS_PAUSED | DOMFLAGS_BLOCKED;
  48.119 @@ -334,17 +334,17 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  48.120           *   are paused or blocked 
  48.121           * - domain is marked as running if any of its vcpus is running
  48.122           */
  48.123 -        for_each_exec_domain ( d, ed ) {
  48.124 -            op->u.getdomaininfo.vcpu_to_cpu[ed->vcpu_id] = ed->processor;
  48.125 -            op->u.getdomaininfo.cpumap[ed->vcpu_id]      = ed->cpumap;
  48.126 -            if ( !(ed->vcpu_flags & VCPUF_ctrl_pause) )
  48.127 +        for_each_vcpu ( d, v ) {
  48.128 +            op->u.getdomaininfo.vcpu_to_cpu[v->vcpu_id] = v->processor;
  48.129 +            op->u.getdomaininfo.cpumap[v->vcpu_id]      = v->cpumap;
  48.130 +            if ( !(v->vcpu_flags & VCPUF_ctrl_pause) )
  48.131                  flags &= ~DOMFLAGS_PAUSED;
  48.132 -            if ( !(ed->vcpu_flags & VCPUF_blocked) )
  48.133 +            if ( !(v->vcpu_flags & VCPUF_blocked) )
  48.134                  flags &= ~DOMFLAGS_BLOCKED;
  48.135 -            if ( ed->vcpu_flags & VCPUF_running )
  48.136 +            if ( v->vcpu_flags & VCPUF_running )
  48.137                  flags |= DOMFLAGS_RUNNING;
  48.138 -            if ( ed->cpu_time > cpu_time )
  48.139 -                cpu_time += ed->cpu_time;
  48.140 +            if ( v->cpu_time > cpu_time )
  48.141 +                cpu_time += v->cpu_time;
  48.142              vcpu_count++;
  48.143          }
  48.144  
  48.145 @@ -372,7 +372,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  48.146      { 
  48.147          struct vcpu_guest_context *c;
  48.148          struct domain             *d;
  48.149 -        struct exec_domain        *ed;
  48.150 +        struct vcpu               *v;
  48.151  
  48.152          d = find_domain_by_id(op->u.getvcpucontext.domain);
  48.153          if ( d == NULL )
  48.154 @@ -388,15 +388,15 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  48.155              break;
  48.156          }
  48.157          
  48.158 -        ed = d->exec_domain[op->u.getvcpucontext.vcpu];
  48.159 -        if ( ed == NULL )
  48.160 +        v = d->vcpu[op->u.getvcpucontext.vcpu];
  48.161 +        if ( v == NULL )
  48.162          {
  48.163              ret = -ESRCH;
  48.164              put_domain(d);
  48.165              break;
  48.166          }
  48.167  
  48.168 -        op->u.getvcpucontext.cpu_time = ed->cpu_time;
  48.169 +        op->u.getvcpucontext.cpu_time = v->cpu_time;
  48.170  
  48.171          if ( op->u.getvcpucontext.ctxt != NULL )
  48.172          {
  48.173 @@ -407,13 +407,13 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  48.174                  break;
  48.175              }
  48.176  
  48.177 -            if ( ed != current )
  48.178 -                exec_domain_pause(ed);
  48.179 +            if ( v != current )
  48.180 +                vcpu_pause(v);
  48.181  
  48.182 -            arch_getdomaininfo_ctxt(ed,c);
  48.183 +            arch_getdomaininfo_ctxt(v,c);
  48.184  
  48.185 -            if ( ed != current )
  48.186 -                exec_domain_unpause(ed);
  48.187 +            if ( v != current )
  48.188 +                vcpu_unpause(v);
  48.189  
  48.190              if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
  48.191                  ret = -EINVAL;
    49.1 --- a/xen/common/domain.c	Thu Jun 02 19:19:24 2005 +0000
    49.2 +++ b/xen/common/domain.c	Thu Jun 02 21:05:33 2005 +0000
    49.3 @@ -29,18 +29,18 @@ struct domain *dom0;
    49.4  struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
    49.5  {
    49.6      struct domain *d, **pd;
    49.7 -    struct exec_domain *ed;
    49.8 +    struct vcpu *v;
    49.9  
   49.10      if ( (d = alloc_domain_struct()) == NULL )
   49.11          return NULL;
   49.12  
   49.13 -    ed = d->exec_domain[0];
   49.14 +    v = d->vcpu[0];
   49.15  
   49.16      atomic_set(&d->refcnt, 1);
   49.17 -    atomic_set(&ed->pausecnt, 0);
   49.18 +    atomic_set(&v->pausecnt, 0);
   49.19  
   49.20      d->domain_id   = dom_id;
   49.21 -    ed->processor  = cpu;
   49.22 +    v->processor  = cpu;
   49.23   
   49.24      spin_lock_init(&d->time_lock);
   49.25  
   49.26 @@ -61,9 +61,9 @@ struct domain *do_createdomain(domid_t d
   49.27          return NULL;
   49.28      }
   49.29      
   49.30 -    arch_do_createdomain(ed);
   49.31 +    arch_do_createdomain(v);
   49.32      
   49.33 -    sched_add_domain(ed);
   49.34 +    sched_add_domain(v);
   49.35  
   49.36      if ( !is_idle_task(d) )
   49.37      {
   49.38 @@ -107,13 +107,13 @@ struct domain *find_domain_by_id(domid_t
   49.39  
   49.40  void domain_kill(struct domain *d)
   49.41  {
   49.42 -    struct exec_domain *ed;
   49.43 +    struct vcpu *v;
   49.44  
   49.45      domain_pause(d);
   49.46      if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) )
   49.47      {
   49.48 -        for_each_exec_domain(d, ed)
   49.49 -            sched_rem_domain(ed);
   49.50 +        for_each_vcpu(d, v)
   49.51 +            sched_rem_domain(v);
   49.52          domain_relinquish_resources(d);
   49.53          put_domain(d);
   49.54      }
   49.55 @@ -151,7 +151,7 @@ static struct domain *domain_shuttingdow
   49.56  static void domain_shutdown_finalise(void)
   49.57  {
   49.58      struct domain *d;
   49.59 -    struct exec_domain *ed;
   49.60 +    struct vcpu *v;
   49.61  
   49.62      d = domain_shuttingdown[smp_processor_id()];
   49.63      domain_shuttingdown[smp_processor_id()] = NULL;
   49.64 @@ -162,8 +162,8 @@ static void domain_shutdown_finalise(voi
   49.65      BUG_ON(test_bit(_DOMF_shutdown, &d->domain_flags));
   49.66  
   49.67      /* Make sure that every vcpu is descheduled before we finalise. */
   49.68 -    for_each_exec_domain ( d, ed )
   49.69 -        while ( test_bit(_VCPUF_running, &ed->vcpu_flags) )
   49.70 +    for_each_vcpu ( d, v )
   49.71 +        while ( test_bit(_VCPUF_running, &v->vcpu_flags) )
   49.72              cpu_relax();
   49.73  
   49.74      sync_lazy_execstate_cpuset(d->cpuset);
   49.75 @@ -174,7 +174,7 @@ static void domain_shutdown_finalise(voi
   49.76      set_bit(_DOMF_shutdown, &d->domain_flags);
   49.77      clear_bit(_DOMF_shuttingdown, &d->domain_flags);
   49.78  
   49.79 -    send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
   49.80 +    send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
   49.81  }
   49.82  
   49.83  static __init int domain_shutdown_finaliser_init(void)
   49.84 @@ -188,7 +188,7 @@ static __init int domain_shutdown_finali
   49.85  void domain_shutdown(u8 reason)
   49.86  {
   49.87      struct domain *d = current->domain;
   49.88 -    struct exec_domain *ed;
   49.89 +    struct vcpu *v;
   49.90  
   49.91      if ( d->domain_id == 0 )
   49.92      {
   49.93 @@ -219,8 +219,8 @@ void domain_shutdown(u8 reason)
   49.94      }
   49.95  
   49.96      /* Put every vcpu to sleep, but don't wait (avoids inter-vcpu deadlock). */
   49.97 -    for_each_exec_domain ( d, ed )
   49.98 -        domain_sleep_nosync(ed);
   49.99 +    for_each_vcpu ( d, v )
  49.100 +        domain_sleep_nosync(v);
  49.101  }
  49.102  
  49.103  
  49.104 @@ -259,63 +259,63 @@ void domain_destruct(struct domain *d)
  49.105  
  49.106      free_domain_struct(d);
  49.107  
  49.108 -    send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
  49.109 +    send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
  49.110  }
  49.111  
  49.112 -void exec_domain_pause(struct exec_domain *ed)
  49.113 +void vcpu_pause(struct vcpu *v)
  49.114  {
  49.115 -    BUG_ON(ed == current);
  49.116 -    atomic_inc(&ed->pausecnt);
  49.117 -    domain_sleep_sync(ed);
  49.118 +    BUG_ON(v == current);
  49.119 +    atomic_inc(&v->pausecnt);
  49.120 +    domain_sleep_sync(v);
  49.121  }
  49.122  
  49.123  void domain_pause(struct domain *d)
  49.124  {
  49.125 -    struct exec_domain *ed;
  49.126 +    struct vcpu *v;
  49.127  
  49.128 -    for_each_exec_domain( d, ed )
  49.129 +    for_each_vcpu( d, v )
  49.130      {
  49.131 -        BUG_ON(ed == current);
  49.132 -        atomic_inc(&ed->pausecnt);
  49.133 -        domain_sleep_sync(ed);
  49.134 +        BUG_ON(v == current);
  49.135 +        atomic_inc(&v->pausecnt);
  49.136 +        domain_sleep_sync(v);
  49.137      }
  49.138  }
  49.139  
  49.140 -void exec_domain_unpause(struct exec_domain *ed)
  49.141 +void vcpu_unpause(struct vcpu *v)
  49.142  {
  49.143 -    BUG_ON(ed == current);
  49.144 -    if ( atomic_dec_and_test(&ed->pausecnt) )
  49.145 -        domain_wake(ed);
  49.146 +    BUG_ON(v == current);
  49.147 +    if ( atomic_dec_and_test(&v->pausecnt) )
  49.148 +        domain_wake(v);
  49.149  }
  49.150  
  49.151  void domain_unpause(struct domain *d)
  49.152  {
  49.153 -    struct exec_domain *ed;
  49.154 +    struct vcpu *v;
  49.155  
  49.156 -    for_each_exec_domain( d, ed )
  49.157 -        exec_domain_unpause(ed);
  49.158 +    for_each_vcpu( d, v )
  49.159 +        vcpu_unpause(v);
  49.160  }
  49.161  
  49.162  void domain_pause_by_systemcontroller(struct domain *d)
  49.163  {
  49.164 -    struct exec_domain *ed;
  49.165 +    struct vcpu *v;
  49.166  
  49.167 -    for_each_exec_domain ( d, ed )
  49.168 +    for_each_vcpu ( d, v )
  49.169      {
  49.170 -        BUG_ON(ed == current);
  49.171 -        if ( !test_and_set_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
  49.172 -            domain_sleep_sync(ed);
  49.173 +        BUG_ON(v == current);
  49.174 +        if ( !test_and_set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
  49.175 +            domain_sleep_sync(v);
  49.176      }
  49.177  }
  49.178  
  49.179  void domain_unpause_by_systemcontroller(struct domain *d)
  49.180  {
  49.181 -    struct exec_domain *ed;
  49.182 +    struct vcpu *v;
  49.183  
  49.184 -    for_each_exec_domain ( d, ed )
  49.185 +    for_each_vcpu ( d, v )
  49.186      {
  49.187 -        if ( test_and_clear_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
  49.188 -            domain_wake(ed);
  49.189 +        if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
  49.190 +            domain_wake(v);
  49.191      }
  49.192  }
  49.193  
  49.194 @@ -330,13 +330,13 @@ int set_info_guest(struct domain *d, dom
  49.195      int rc = 0;
  49.196      struct vcpu_guest_context *c = NULL;
  49.197      unsigned long vcpu = setdomaininfo->vcpu;
  49.198 -    struct exec_domain *ed; 
  49.199 +    struct vcpu *v; 
  49.200  
  49.201 -    if ( (vcpu >= MAX_VIRT_CPUS) || ((ed = d->exec_domain[vcpu]) == NULL) )
  49.202 +    if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
  49.203          return -EINVAL;
  49.204      
  49.205      if (test_bit(_DOMF_constructed, &d->domain_flags) && 
  49.206 -        !test_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags))
  49.207 +        !test_bit(_VCPUF_ctrl_pause, &v->vcpu_flags))
  49.208          return -EINVAL;
  49.209  
  49.210      if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
  49.211 @@ -348,7 +348,7 @@ int set_info_guest(struct domain *d, dom
  49.212          goto out;
  49.213      }
  49.214      
  49.215 -    if ( (rc = arch_set_info_guest(ed, c)) != 0 )
  49.216 +    if ( (rc = arch_set_info_guest(v, c)) != 0 )
  49.217          goto out;
  49.218  
  49.219      set_bit(_DOMF_constructed, &d->domain_flags);
  49.220 @@ -366,14 +366,14 @@ int set_info_guest(struct domain *d, dom
  49.221  long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt) 
  49.222  {
  49.223      struct domain *d = current->domain;
  49.224 -    struct exec_domain *ed;
  49.225 +    struct vcpu *v;
  49.226      int rc = 0;
  49.227      struct vcpu_guest_context *c;
  49.228  
  49.229 -    if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) )
  49.230 +    if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] != NULL) )
  49.231          return -EINVAL;
  49.232  
  49.233 -    if ( alloc_exec_domain_struct(d, vcpu) == NULL )
  49.234 +    if ( alloc_vcpu_struct(d, vcpu) == NULL )
  49.235          return -ENOMEM;
  49.236  
  49.237      if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
  49.238 @@ -388,31 +388,31 @@ long do_boot_vcpu(unsigned long vcpu, st
  49.239          goto out;
  49.240      }
  49.241  
  49.242 -    ed = d->exec_domain[vcpu];
  49.243 -
  49.244 -    atomic_set(&ed->pausecnt, 0);
  49.245 -    ed->cpumap = CPUMAP_RUNANYWHERE;
  49.246 +    v = d->vcpu[vcpu];
  49.247  
  49.248 -    memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch));
  49.249 +    atomic_set(&v->pausecnt, 0);
  49.250 +    v->cpumap = CPUMAP_RUNANYWHERE;
  49.251  
  49.252 -    arch_do_boot_vcpu(ed);
  49.253 +    memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
  49.254  
  49.255 -    if ( (rc = arch_set_info_guest(ed, c)) != 0 )
  49.256 +    arch_do_boot_vcpu(v);
  49.257 +
  49.258 +    if ( (rc = arch_set_info_guest(v, c)) != 0 )
  49.259          goto out;
  49.260  
  49.261 -    sched_add_domain(ed);
  49.262 +    sched_add_domain(v);
  49.263  
  49.264      /* domain_unpause_by_systemcontroller */
  49.265 -    if ( test_and_clear_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
  49.266 -        domain_wake(ed);
  49.267 +    if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
  49.268 +        domain_wake(v);
  49.269  
  49.270      xfree(c);
  49.271      return 0;
  49.272  
  49.273   out:
  49.274      xfree(c);
  49.275 -    arch_free_exec_domain_struct(d->exec_domain[vcpu]);
  49.276 -    d->exec_domain[vcpu] = NULL;
  49.277 +    arch_free_vcpu_struct(d->vcpu[vcpu]);
  49.278 +    d->vcpu[vcpu] = NULL;
  49.279      return rc;
  49.280  }
  49.281  
    50.1 --- a/xen/common/event_channel.c	Thu Jun 02 19:19:24 2005 +0000
    50.2 +++ b/xen/common/event_channel.c	Thu Jun 02 21:05:33 2005 +0000
    50.3 @@ -31,16 +31,16 @@
    50.4  #define EVENT_CHANNELS_SPREAD 32
    50.5  
    50.6  
    50.7 -static int get_free_port(struct exec_domain *ed)
    50.8 +static int get_free_port(struct vcpu *v)
    50.9  {
   50.10 -    struct domain *d = ed->domain;
   50.11 +    struct domain *d = v->domain;
   50.12      int max, port;
   50.13      event_channel_t *chn;
   50.14  
   50.15      max = d->max_event_channel;
   50.16      chn = d->event_channel;
   50.17  
   50.18 -    for ( port = ed->vcpu_id * EVENT_CHANNELS_SPREAD; port < max; port++ )
   50.19 +    for ( port = v->vcpu_id * EVENT_CHANNELS_SPREAD; port < max; port++ )
   50.20          if ( chn[port].state == ECS_FREE )
   50.21              break;
   50.22  
   50.23 @@ -102,7 +102,7 @@ static long evtchn_bind_interdomain(evtc
   50.24  {
   50.25  #define ERROR_EXIT(_errno) do { rc = (_errno); goto out; } while ( 0 )
   50.26      struct domain *d1, *d2;
   50.27 -    struct exec_domain *ed1, *ed2;
   50.28 +    struct vcpu   *v1, *v2;
   50.29      int            port1 = bind->port1, port2 = bind->port2;
   50.30      domid_t        dom1 = bind->dom1, dom2 = bind->dom2;
   50.31      long           rc = 0;
   50.32 @@ -126,8 +126,8 @@ static long evtchn_bind_interdomain(evtc
   50.33          return -ESRCH;
   50.34      }
   50.35  
   50.36 -    ed1 = d1->exec_domain[0];   /* XXX */
   50.37 -    ed2 = d2->exec_domain[0];   /* XXX */
   50.38 +    v1 = d1->vcpu[0];   /* XXX */
   50.39 +    v2 = d2->vcpu[0];   /* XXX */
   50.40  
   50.41      /* Avoid deadlock by first acquiring lock of domain with smaller id. */
   50.42      if ( d1 < d2 )
   50.43 @@ -145,7 +145,7 @@ static long evtchn_bind_interdomain(evtc
   50.44      /* Obtain, or ensure that we already have, a valid <port1>. */
   50.45      if ( port1 == 0 )
   50.46      {
   50.47 -        if ( (port1 = get_free_port(ed1)) < 0 )
   50.48 +        if ( (port1 = get_free_port(v1)) < 0 )
   50.49              ERROR_EXIT(port1);
   50.50      }
   50.51      else if ( port1 >= d1->max_event_channel )
   50.52 @@ -157,7 +157,7 @@ static long evtchn_bind_interdomain(evtc
   50.53          /* Make port1 non-free while we allocate port2 (in case dom1==dom2). */
   50.54          u16 tmp = d1->event_channel[port1].state;
   50.55          d1->event_channel[port1].state = ECS_INTERDOMAIN;
   50.56 -        port2 = get_free_port(ed2);
   50.57 +        port2 = get_free_port(v2);
   50.58          d1->event_channel[port1].state = tmp;
   50.59          if ( port2 < 0 )
   50.60              ERROR_EXIT(port2);
   50.61 @@ -177,7 +177,7 @@ static long evtchn_bind_interdomain(evtc
   50.62          break;
   50.63  
   50.64      case ECS_INTERDOMAIN:
   50.65 -        if ( d1->event_channel[port1].u.interdomain.remote_dom != ed2 )
   50.66 +        if ( d1->event_channel[port1].u.interdomain.remote_dom != v2 )
   50.67              ERROR_EXIT(-EINVAL);
   50.68          if ( (d1->event_channel[port1].u.interdomain.remote_port != port2) &&
   50.69               (bind->port2 != 0) )
   50.70 @@ -203,7 +203,7 @@ static long evtchn_bind_interdomain(evtc
   50.71          break;
   50.72  
   50.73      case ECS_INTERDOMAIN:
   50.74 -        if ( d2->event_channel[port2].u.interdomain.remote_dom != ed1 )
   50.75 +        if ( d2->event_channel[port2].u.interdomain.remote_dom != v1 )
   50.76              ERROR_EXIT(-EINVAL);
   50.77          if ( (d2->event_channel[port2].u.interdomain.remote_port != port1) &&
   50.78               (bind->port1 != 0) )
   50.79 @@ -219,11 +219,11 @@ static long evtchn_bind_interdomain(evtc
   50.80       * Everything checked out okay -- bind <dom1,port1> to <dom2,port2>.
   50.81       */
   50.82  
   50.83 -    d1->event_channel[port1].u.interdomain.remote_dom  = ed2;
   50.84 +    d1->event_channel[port1].u.interdomain.remote_dom  = v2;
   50.85      d1->event_channel[port1].u.interdomain.remote_port = (u16)port2;
   50.86      d1->event_channel[port1].state                     = ECS_INTERDOMAIN;
   50.87      
   50.88 -    d2->event_channel[port2].u.interdomain.remote_dom  = ed1;
   50.89 +    d2->event_channel[port2].u.interdomain.remote_dom  = v1;
   50.90      d2->event_channel[port2].u.interdomain.remote_port = (u16)port1;
   50.91      d2->event_channel[port2].state                     = ECS_INTERDOMAIN;
   50.92  
   50.93 @@ -245,11 +245,11 @@ static long evtchn_bind_interdomain(evtc
   50.94  
   50.95  static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
   50.96  {
   50.97 -    struct exec_domain *ed = current;
   50.98 -    struct domain *d = ed->domain;
   50.99 +    struct vcpu   *v = current;
  50.100 +    struct domain *d = v->domain;
  50.101      int            port, virq = bind->virq;
  50.102  
  50.103 -    if ( virq >= ARRAY_SIZE(ed->virq_to_evtchn) )
  50.104 +    if ( virq >= ARRAY_SIZE(v->virq_to_evtchn) )
  50.105          return -EINVAL;
  50.106  
  50.107      spin_lock(&d->event_channel_lock);
  50.108 @@ -258,14 +258,14 @@ static long evtchn_bind_virq(evtchn_bind
  50.109       * Port 0 is the fallback port for VIRQs that haven't been explicitly
  50.110       * bound yet.
  50.111       */
  50.112 -    if ( ((port = ed->virq_to_evtchn[virq]) != 0) ||
  50.113 -         ((port = get_free_port(ed)) < 0) )
  50.114 +    if ( ((port = v->virq_to_evtchn[virq]) != 0) ||
  50.115 +         ((port = get_free_port(v)) < 0) )
  50.116          goto out;
  50.117  
  50.118      d->event_channel[port].state  = ECS_VIRQ;
  50.119      d->event_channel[port].u.virq = virq;
  50.120  
  50.121 -    ed->virq_to_evtchn[virq] = port;
  50.122 +    v->virq_to_evtchn[virq] = port;
  50.123  
  50.124   out:
  50.125      spin_unlock(&d->event_channel_lock);
  50.126 @@ -279,19 +279,19 @@ static long evtchn_bind_virq(evtchn_bind
  50.127  
  50.128  static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
  50.129  {
  50.130 -    struct exec_domain *ed = current;
  50.131 -    struct domain *d = ed->domain;
  50.132 -    int            port, ipi_edom = bind->ipi_edom;
  50.133 +    struct vcpu   *v = current;
  50.134 +    struct domain *d = v->domain;
  50.135 +    int            port, ipi_vcpu = bind->ipi_vcpu;
  50.136  
  50.137 -    if ( ipi_edom >= MAX_VIRT_CPUS )
  50.138 +    if ( ipi_vcpu >= MAX_VIRT_CPUS )
  50.139          return -EINVAL;
  50.140  
  50.141      spin_lock(&d->event_channel_lock);
  50.142  
  50.143 -    if ( (port = get_free_port(ed)) >= 0 )
  50.144 +    if ( (port = get_free_port(v)) >= 0 )
  50.145      {
  50.146          d->event_channel[port].state      = ECS_IPI;
  50.147 -        d->event_channel[port].u.ipi_edom = ipi_edom;
  50.148 +        d->event_channel[port].u.ipi_vcpu = ipi_vcpu;
  50.149      }
  50.150  
  50.151      spin_unlock(&d->event_channel_lock);
  50.152 @@ -344,7 +344,7 @@ static long evtchn_bind_pirq(evtchn_bind
  50.153  static long __evtchn_close(struct domain *d1, int port1)
  50.154  {
  50.155      struct domain   *d2 = NULL;
  50.156 -    struct exec_domain *ed;
  50.157 +    struct vcpu *v;
  50.158      event_channel_t *chn1, *chn2;
  50.159      int              port2;
  50.160      long             rc = 0;
  50.161 @@ -376,10 +376,10 @@ static long __evtchn_close(struct domain
  50.162          break;
  50.163  
  50.164      case ECS_VIRQ:
  50.165 -        /* XXX could store exec_domain in chn1[port1].u */
  50.166 -        for_each_exec_domain(d1, ed)
  50.167 -            if (ed->virq_to_evtchn[chn1[port1].u.virq] == port1)
  50.168 -                ed->virq_to_evtchn[chn1[port1].u.virq] = 0;
  50.169 +        /* XXX could store vcpu in chn1[port1].u */
  50.170 +        for_each_vcpu ( d1, v )
  50.171 +            if (v->virq_to_evtchn[chn1[port1].u.virq] == port1)
  50.172 +                v->virq_to_evtchn[chn1[port1].u.virq] = 0;
  50.173          break;
  50.174  
  50.175      case ECS_IPI:
  50.176 @@ -476,7 +476,7 @@ static long evtchn_close(evtchn_close_t 
  50.177  long evtchn_send(int lport)
  50.178  {
  50.179      struct domain *ld = current->domain;
  50.180 -    struct exec_domain *rd;
  50.181 +    struct vcpu *rd;
  50.182      int            rport, ret = 0;
  50.183  
  50.184      spin_lock(&ld->event_channel_lock);
  50.185 @@ -497,7 +497,7 @@ long evtchn_send(int lport)
  50.186          evtchn_set_pending(rd, rport);
  50.187          break;
  50.188      case ECS_IPI:
  50.189 -        rd = ld->exec_domain[ld->event_channel[lport].u.ipi_edom];
  50.190 +        rd = ld->vcpu[ld->event_channel[lport].u.ipi_vcpu];
  50.191          if ( rd  )
  50.192              evtchn_set_pending(rd, lport);
  50.193          else
  50.194 @@ -565,7 +565,7 @@ static long evtchn_status(evtchn_status_
  50.195          break;
  50.196      case ECS_IPI:
  50.197          status->status     = EVTCHNSTAT_ipi;
  50.198 -        status->u.ipi_edom = chn[port].u.ipi_edom;
  50.199 +        status->u.ipi_vcpu = chn[port].u.ipi_vcpu;
  50.200          break;
  50.201      default:
  50.202          BUG();
  50.203 @@ -645,7 +645,7 @@ int init_event_channels(struct domain *d
  50.204  {
  50.205      spin_lock_init(&d->event_channel_lock);
  50.206      /* Call get_free_port to initialize d->event_channel */
  50.207 -    if ( get_free_port(d->exec_domain[0]) != 0 )
  50.208 +    if ( get_free_port(d->vcpu[0]) != 0 )
  50.209          return -EINVAL;
  50.210      d->event_channel[0].state = ECS_RESERVED;
  50.211      return 0;
    51.1 --- a/xen/common/grant_table.c	Thu Jun 02 19:19:24 2005 +0000
    51.2 +++ b/xen/common/grant_table.c	Thu Jun 02 21:05:33 2005 +0000
    51.3 @@ -62,7 +62,7 @@ put_maptrack_handle(
    51.4  static int
    51.5  __gnttab_activate_grant_ref(
    51.6      struct domain          *mapping_d,          /* IN */
    51.7 -    struct exec_domain     *mapping_ed,
    51.8 +    struct vcpu     *mapping_ed,
    51.9      struct domain          *granting_d,
   51.10      grant_ref_t             ref,
   51.11      u16                     dev_hst_ro_flags,
   51.12 @@ -319,7 +319,7 @@ static int
   51.13      domid_t               dom;
   51.14      grant_ref_t           ref;
   51.15      struct domain        *ld, *rd;
   51.16 -    struct exec_domain   *led;
   51.17 +    struct vcpu   *led;
   51.18      u16                   dev_hst_ro_flags;
   51.19      int                   handle;
   51.20      unsigned long         frame = 0, host_virt_addr;
    52.1 --- a/xen/common/keyhandler.c	Thu Jun 02 19:19:24 2005 +0000
    52.2 +++ b/xen/common/keyhandler.c	Thu Jun 02 21:05:33 2005 +0000
    52.3 @@ -99,7 +99,7 @@ static void halt_machine(unsigned char k
    52.4  static void do_task_queues(unsigned char key)
    52.5  {
    52.6      struct domain *d;
    52.7 -    struct exec_domain *ed;
    52.8 +    struct vcpu *v;
    52.9      s_time_t       now = NOW();
   52.10  
   52.11      printk("'%c' pressed -> dumping task queues (now=0x%X:%08X)\n", key,
   52.12 @@ -115,24 +115,24 @@ static void do_task_queues(unsigned char
   52.13  
   52.14          dump_pageframe_info(d);
   52.15                 
   52.16 -        for_each_exec_domain ( d, ed ) {
   52.17 +        for_each_vcpu ( d, v ) {
   52.18              printk("Guest: %p CPU %d [has=%c] flags=%lx "
   52.19 -                   "upcall_pend = %02x, upcall_mask = %02x\n", ed,
   52.20 -                   ed->processor,
   52.21 -                   test_bit(_VCPUF_running, &ed->vcpu_flags) ? 'T':'F',
   52.22 -                   ed->vcpu_flags,
   52.23 -                   ed->vcpu_info->evtchn_upcall_pending, 
   52.24 -                   ed->vcpu_info->evtchn_upcall_mask);
   52.25 -            printk("Notifying guest... %d/%d\n", d->domain_id, ed->vcpu_id); 
   52.26 +                   "upcall_pend = %02x, upcall_mask = %02x\n", v,
   52.27 +                   v->processor,
   52.28 +                   test_bit(_VCPUF_running, &v->vcpu_flags) ? 'T':'F',
   52.29 +                   v->vcpu_flags,
   52.30 +                   v->vcpu_info->evtchn_upcall_pending, 
   52.31 +                   v->vcpu_info->evtchn_upcall_mask);
   52.32 +            printk("Notifying guest... %d/%d\n", d->domain_id, v->vcpu_id); 
   52.33              printk("port %d/%d stat %d %d %d\n",
   52.34 -                   VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
   52.35 -                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], 
   52.36 +                   VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG],
   52.37 +                   test_bit(v->virq_to_evtchn[VIRQ_DEBUG], 
   52.38                              &d->shared_info->evtchn_pending[0]),
   52.39 -                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], 
   52.40 +                   test_bit(v->virq_to_evtchn[VIRQ_DEBUG], 
   52.41                              &d->shared_info->evtchn_mask[0]),
   52.42 -                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, 
   52.43 -                            &ed->vcpu_info->evtchn_pending_sel));
   52.44 -            send_guest_virq(ed, VIRQ_DEBUG);
   52.45 +                   test_bit(v->virq_to_evtchn[VIRQ_DEBUG]>>5, 
   52.46 +                            &v->vcpu_info->evtchn_pending_sel));
   52.47 +            send_guest_virq(v, VIRQ_DEBUG);
   52.48          }
   52.49      }
   52.50  
    53.1 --- a/xen/common/sched_bvt.c	Thu Jun 02 19:19:24 2005 +0000
    53.2 +++ b/xen/common/sched_bvt.c	Thu Jun 02 21:05:33 2005 +0000
    53.3 @@ -27,12 +27,12 @@
    53.4  #include <xen/softirq.h>
    53.5  
    53.6  /* all per-domain BVT-specific scheduling info is stored here */
    53.7 -struct bvt_edom_info
    53.8 +struct bvt_vcpu_info
    53.9  {
   53.10      struct list_head    run_list;         /* runqueue list pointers */
   53.11      u32                 avt;              /* actual virtual time */
   53.12      u32                 evt;              /* effective virtual time */
   53.13 -    struct exec_domain  *exec_domain;
   53.14 +    struct vcpu  *vcpu;
   53.15      struct bvt_dom_info *inf;
   53.16  };
   53.17  
   53.18 @@ -49,7 +49,7 @@ struct bvt_dom_info
   53.19      s_time_t            warpu;            /* unwarp time requirement */
   53.20      struct ac_timer     unwarp_timer;     /* deals with warpu */
   53.21  
   53.22 -    struct bvt_edom_info ed_inf[MAX_VIRT_CPUS];
   53.23 +    struct bvt_vcpu_info vcpu_inf[MAX_VIRT_CPUS];
   53.24  };
   53.25  
   53.26  struct bvt_cpu_info
   53.27 @@ -59,7 +59,7 @@ struct bvt_cpu_info
   53.28  };
   53.29  
   53.30  #define BVT_INFO(p)   ((struct bvt_dom_info *)(p)->sched_priv)
   53.31 -#define EBVT_INFO(p)  ((struct bvt_edom_info *)(p)->sched_priv)
   53.32 +#define EBVT_INFO(p)  ((struct bvt_vcpu_info *)(p)->sched_priv)
   53.33  #define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
   53.34  #define RUNLIST(p)    ((struct list_head *)&(EBVT_INFO(p)->run_list))
   53.35  #define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
   53.36 @@ -70,24 +70,24 @@ struct bvt_cpu_info
   53.37  #define TIME_SLOP      (s32)MICROSECS(50)     /* allow time to slip a bit */
   53.38  static s32 ctx_allow = (s32)MILLISECS(5);     /* context switch allowance */
   53.39  
   53.40 -static inline void __add_to_runqueue_head(struct exec_domain *d)
   53.41 +static inline void __add_to_runqueue_head(struct vcpu *d)
   53.42  {
   53.43      list_add(RUNLIST(d), RUNQUEUE(d->processor));
   53.44  }
   53.45  
   53.46 -static inline void __add_to_runqueue_tail(struct exec_domain *d)
   53.47 +static inline void __add_to_runqueue_tail(struct vcpu *d)
   53.48  {
   53.49      list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
   53.50  }
   53.51  
   53.52 -static inline void __del_from_runqueue(struct exec_domain *d)
   53.53 +static inline void __del_from_runqueue(struct vcpu *d)
   53.54  {
   53.55      struct list_head *runlist = RUNLIST(d);
   53.56      list_del(runlist);
   53.57      runlist->next = NULL;
   53.58  }
   53.59  
   53.60 -static inline int __task_on_runqueue(struct exec_domain *d)
   53.61 +static inline int __task_on_runqueue(struct vcpu *d)
   53.62  {
   53.63      return (RUNLIST(d))->next != NULL;
   53.64  }
   53.65 @@ -97,7 +97,7 @@ static inline int __task_on_runqueue(str
   53.66  static void warp_timer_fn(void *data)
   53.67  {
   53.68      struct bvt_dom_info *inf = data;
   53.69 -    unsigned int cpu = inf->domain->exec_domain[0]->processor;
   53.70 +    unsigned int cpu = inf->domain->vcpu[0]->processor;
   53.71      
   53.72      spin_lock_irq(&schedule_data[cpu].schedule_lock);
   53.73  
   53.74 @@ -118,7 +118,7 @@ static void warp_timer_fn(void *data)
   53.75  static void unwarp_timer_fn(void *data)
   53.76  {
   53.77      struct bvt_dom_info *inf = data;
   53.78 -    unsigned int cpu = inf->domain->exec_domain[0]->processor;
   53.79 +    unsigned int cpu = inf->domain->vcpu[0]->processor;
   53.80  
   53.81      spin_lock_irq(&schedule_data[cpu].schedule_lock);
   53.82  
   53.83 @@ -131,11 +131,11 @@ static void unwarp_timer_fn(void *data)
   53.84      spin_unlock_irq(&schedule_data[cpu].schedule_lock);
   53.85  }
   53.86  
   53.87 -static inline u32 calc_avt(struct exec_domain *d, s_time_t now)
   53.88 +static inline u32 calc_avt(struct vcpu *d, s_time_t now)
   53.89  {
   53.90      u32 ranfor, mcus;
   53.91      struct bvt_dom_info *inf = BVT_INFO(d->domain);
   53.92 -    struct bvt_edom_info *einf = EBVT_INFO(d);
   53.93 +    struct bvt_vcpu_info *einf = EBVT_INFO(d);
   53.94      
   53.95      ranfor = (u32)(now - d->lastschd);
   53.96      mcus = (ranfor + MCU - 1)/MCU;
   53.97 @@ -147,7 +147,7 @@ static inline u32 calc_avt(struct exec_d
   53.98   * Calculate the effective virtual time for a domain. Take into account 
   53.99   * warping limits
  53.100   */
  53.101 -static inline u32 calc_evt(struct exec_domain *d, u32 avt)
  53.102 +static inline u32 calc_evt(struct vcpu *d, u32 avt)
  53.103  {
  53.104      struct bvt_dom_info *inf = BVT_INFO(d->domain);
  53.105      /* TODO The warp routines need to be rewritten GM */
  53.106 @@ -164,9 +164,9 @@ static inline u32 calc_evt(struct exec_d
  53.107   *
  53.108   * Returns non-zero on failure.
  53.109   */
  53.110 -static int bvt_alloc_task(struct exec_domain *ed)
  53.111 +static int bvt_alloc_task(struct vcpu *v)
  53.112  {
  53.113 -    struct domain *d = ed->domain;
  53.114 +    struct domain *d = v->domain;
  53.115  
  53.116      if ( (d->sched_priv == NULL) )
  53.117      {
  53.118 @@ -175,10 +175,10 @@ static int bvt_alloc_task(struct exec_do
  53.119          memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
  53.120      }
  53.121  
  53.122 -    ed->sched_priv = &BVT_INFO(d)->ed_inf[ed->vcpu_id];
  53.123 +    v->sched_priv = &BVT_INFO(d)->vcpu_inf[v->vcpu_id];
  53.124  
  53.125 -    BVT_INFO(d)->ed_inf[ed->vcpu_id].inf = BVT_INFO(d);
  53.126 -    BVT_INFO(d)->ed_inf[ed->vcpu_id].exec_domain = ed;
  53.127 +    BVT_INFO(d)->vcpu_inf[v->vcpu_id].inf = BVT_INFO(d);
  53.128 +    BVT_INFO(d)->vcpu_inf[v->vcpu_id].vcpu = v;
  53.129  
  53.130      return 0;
  53.131  }
  53.132 @@ -186,26 +186,26 @@ static int bvt_alloc_task(struct exec_do
  53.133  /*
  53.134   * Add and remove a domain
  53.135   */
  53.136 -static void bvt_add_task(struct exec_domain *d) 
  53.137 +static void bvt_add_task(struct vcpu *v) 
  53.138  {
  53.139 -    struct bvt_dom_info *inf = BVT_INFO(d->domain);
  53.140 -    struct bvt_edom_info *einf = EBVT_INFO(d);
  53.141 +    struct bvt_dom_info *inf = BVT_INFO(v->domain);
  53.142 +    struct bvt_vcpu_info *einf = EBVT_INFO(v);
  53.143      ASSERT(inf != NULL);
  53.144 -    ASSERT(d   != NULL);
  53.145 +    ASSERT(v   != NULL);
  53.146  
  53.147      /* Allocate per-CPU context if this is the first domain to be added. */
  53.148 -    if ( CPU_INFO(d->processor) == NULL )
  53.149 +    if ( CPU_INFO(v->processor) == NULL )
  53.150      {
  53.151 -        schedule_data[d->processor].sched_priv = xmalloc(struct bvt_cpu_info);
  53.152 -        BUG_ON(CPU_INFO(d->processor) == NULL);
  53.153 -        INIT_LIST_HEAD(RUNQUEUE(d->processor));
  53.154 -        CPU_SVT(d->processor) = 0;
  53.155 +        schedule_data[v->processor].sched_priv = xmalloc(struct bvt_cpu_info);
  53.156 +        BUG_ON(CPU_INFO(v->processor) == NULL);
  53.157 +        INIT_LIST_HEAD(RUNQUEUE(v->processor));
  53.158 +        CPU_SVT(v->processor) = 0;
  53.159      }
  53.160  
  53.161 -    if ( d->vcpu_id == 0 )
  53.162 +    if ( v->vcpu_id == 0 )
  53.163      {
  53.164          inf->mcu_advance = MCU_ADVANCE;
  53.165 -        inf->domain      = d->domain;
  53.166 +        inf->domain      = v->domain;
  53.167          inf->warpback    = 0;
  53.168          /* Set some default values here. */
  53.169          inf->warp        = 0;
  53.170 @@ -213,49 +213,49 @@ static void bvt_add_task(struct exec_dom
  53.171          inf->warpl       = MILLISECS(2000);
  53.172          inf->warpu       = MILLISECS(1000);
  53.173          /* Initialise the warp timers. */
  53.174 -        init_ac_timer(&inf->warp_timer, warp_timer_fn, inf, d->processor);
  53.175 -        init_ac_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, d->processor);
  53.176 +        init_ac_timer(&inf->warp_timer, warp_timer_fn, inf, v->processor);
  53.177 +        init_ac_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, v->processor);
  53.178      }
  53.179  
  53.180 -    einf->exec_domain = d;
  53.181 +    einf->vcpu = v;
  53.182  
  53.183 -    if ( is_idle_task(d->domain) )
  53.184 +    if ( is_idle_task(v->domain) )
  53.185      {
  53.186          einf->avt = einf->evt = ~0U;
  53.187 -        BUG_ON(__task_on_runqueue(d));
  53.188 -        __add_to_runqueue_head(d);
  53.189 +        BUG_ON(__task_on_runqueue(v));
  53.190 +        __add_to_runqueue_head(v);
  53.191      } 
  53.192      else 
  53.193      {
  53.194          /* Set avt and evt to system virtual time. */
  53.195 -        einf->avt = CPU_SVT(d->processor);
  53.196 -        einf->evt = CPU_SVT(d->processor);
  53.197 +        einf->avt = CPU_SVT(v->processor);
  53.198 +        einf->evt = CPU_SVT(v->processor);
  53.199      }
  53.200  }
  53.201  
  53.202 -static void bvt_wake(struct exec_domain *ed)
  53.203 +static void bvt_wake(struct vcpu *v)
  53.204  {
  53.205 -    struct bvt_edom_info *einf = EBVT_INFO(ed);
  53.206 -    struct exec_domain  *curr;
  53.207 +    struct bvt_vcpu_info *einf = EBVT_INFO(v);
  53.208 +    struct vcpu  *curr;
  53.209      s_time_t            now, r_time;
  53.210 -    int                 cpu = ed->processor;
  53.211 +    int                 cpu = v->processor;
  53.212      u32                 curr_evt;
  53.213  
  53.214 -    if ( unlikely(__task_on_runqueue(ed)) )
  53.215 +    if ( unlikely(__task_on_runqueue(v)) )
  53.216          return;
  53.217  
  53.218 -    __add_to_runqueue_head(ed);
  53.219 +    __add_to_runqueue_head(v);
  53.220  
  53.221      now = NOW();
  53.222  
  53.223      /* Set the BVT parameters. AVT should always be updated 
  53.224         if CPU migration ocurred.*/
  53.225      if ( einf->avt < CPU_SVT(cpu) || 
  53.226 -         unlikely(test_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags)) )
  53.227 +         unlikely(test_bit(_VCPUF_cpu_migrated, &v->vcpu_flags)) )
  53.228          einf->avt = CPU_SVT(cpu);
  53.229  
  53.230      /* Deal with warping here. */
  53.231 -    einf->evt = calc_evt(ed, einf->avt);
  53.232 +    einf->evt = calc_evt(v, einf->avt);
  53.233      
  53.234      curr = schedule_data[cpu].curr;
  53.235      curr_evt = calc_evt(curr, calc_avt(curr, now));
  53.236 @@ -272,12 +272,12 @@ static void bvt_wake(struct exec_domain 
  53.237  }
  53.238  
  53.239  
  53.240 -static void bvt_sleep(struct exec_domain *ed)
  53.241 +static void bvt_sleep(struct vcpu *v)
  53.242  {
  53.243 -    if ( test_bit(_VCPUF_running, &ed->vcpu_flags) )
  53.244 -        cpu_raise_softirq(ed->processor, SCHEDULE_SOFTIRQ);
  53.245 -    else  if ( __task_on_runqueue(ed) )
  53.246 -        __del_from_runqueue(ed);
  53.247 +    if ( test_bit(_VCPUF_running, &v->vcpu_flags) )
  53.248 +        cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
  53.249 +    else  if ( __task_on_runqueue(v) )
  53.250 +        __del_from_runqueue(v);
  53.251  }
  53.252  
  53.253  /**
  53.254 @@ -361,15 +361,15 @@ static int bvt_adjdom(
  53.255  static struct task_slice bvt_do_schedule(s_time_t now)
  53.256  {
  53.257      struct domain *d;
  53.258 -    struct exec_domain      *prev = current, *next = NULL, *next_prime, *ed; 
  53.259 +    struct vcpu      *prev = current, *next = NULL, *next_prime, *ed; 
  53.260      int                 cpu = prev->processor;
  53.261      s32                 r_time;     /* time for new dom to run */
  53.262      u32                 next_evt, next_prime_evt, min_avt;
  53.263      struct bvt_dom_info *prev_inf       = BVT_INFO(prev->domain);
  53.264 -    struct bvt_edom_info *prev_einf       = EBVT_INFO(prev);
  53.265 -    struct bvt_edom_info *p_einf          = NULL;
  53.266 -    struct bvt_edom_info *next_einf       = NULL;
  53.267 -    struct bvt_edom_info *next_prime_einf = NULL;
  53.268 +    struct bvt_vcpu_info *prev_einf       = EBVT_INFO(prev);
  53.269 +    struct bvt_vcpu_info *p_einf          = NULL;
  53.270 +    struct bvt_vcpu_info *next_einf       = NULL;
  53.271 +    struct bvt_vcpu_info *next_prime_einf = NULL;
  53.272      struct task_slice     ret;
  53.273  
  53.274      ASSERT(prev->sched_priv != NULL);
  53.275 @@ -435,8 +435,8 @@ static struct task_slice bvt_do_schedule
  53.276          set_ac_timer(&next_einf->inf->warp_timer, now + next_einf->inf->warpl);
  53.277     
  53.278      /* Extract the domain pointers from the dom infos */
  53.279 -    next        = next_einf->exec_domain;
  53.280 -    next_prime  = next_prime_einf->exec_domain;
  53.281 +    next        = next_einf->vcpu;
  53.282 +    next_prime  = next_prime_einf->vcpu;
  53.283      
  53.284      /* Update system virtual time. */
  53.285      if ( min_avt != ~0U )
  53.286 @@ -451,7 +451,7 @@ static struct task_slice bvt_do_schedule
  53.287          
  53.288          for_each_domain ( d )
  53.289          {
  53.290 -            for_each_exec_domain (d, ed) {
  53.291 +            for_each_vcpu (d, ed) {
  53.292                  if ( ed->processor == cpu )
  53.293                  {
  53.294                      p_einf = EBVT_INFO(ed);
  53.295 @@ -499,9 +499,9 @@ static struct task_slice bvt_do_schedule
  53.296  }
  53.297  
  53.298  
  53.299 -static void bvt_dump_runq_el(struct exec_domain *p)
  53.300 +static void bvt_dump_runq_el(struct vcpu *p)
  53.301  {
  53.302 -    struct bvt_edom_info *inf = EBVT_INFO(p);
  53.303 +    struct bvt_vcpu_info *inf = EBVT_INFO(p);
  53.304      
  53.305      printk("mcua=%d ev=0x%08X av=0x%08X ",
  53.306             inf->inf->mcu_advance, inf->evt, inf->avt);
  53.307 @@ -516,8 +516,8 @@ static void bvt_dump_cpu_state(int i)
  53.308  {
  53.309      struct list_head *queue;
  53.310      int loop = 0;
  53.311 -    struct bvt_edom_info *ed_inf;
  53.312 -    struct exec_domain *ed;
  53.313 +    struct bvt_vcpu_info *vcpu_inf;
  53.314 +    struct vcpu *v;
  53.315      
  53.316      printk("svt=0x%08lX ", CPU_SVT(i));
  53.317  
  53.318 @@ -525,15 +525,16 @@ static void bvt_dump_cpu_state(int i)
  53.319      printk("QUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
  53.320             (unsigned long) queue->next, (unsigned long) queue->prev);
  53.321  
  53.322 -    list_for_each_entry ( ed_inf, queue, run_list )
  53.323 +    list_for_each_entry ( vcpu_inf, queue, run_list )
  53.324      {
  53.325 -        ed = ed_inf->exec_domain;
  53.326 -        printk("%3d: %u has=%c ", loop++, ed->domain->domain_id,
  53.327 -               test_bit(_VCPUF_running, &ed->vcpu_flags) ? 'T':'F');
  53.328 -        bvt_dump_runq_el(ed);
  53.329 -        printk("c=0x%X%08X\n", (u32)(ed->cpu_time>>32), (u32)ed->cpu_time);
  53.330 +        v = vcpu_inf->vcpu;
  53.331 +        printk("%3d: %u has=%c ", loop++, v->domain->domain_id,
  53.332 +               test_bit(_VCPUF_running, &v->vcpu_flags) ? 'T':'F');
  53.333 +        bvt_dump_runq_el(v);
  53.334 +        printk("c=0x%X%08X\n", (u32)(v->cpu_time>>32), (u32)v->cpu_time);
  53.335          printk("         l: %p n: %p  p: %p\n",
  53.336 -               &ed_inf->run_list, ed_inf->run_list.next, ed_inf->run_list.prev);
  53.337 +               &vcpu_inf->run_list, vcpu_inf->run_list.next,
  53.338 +               vcpu_inf->run_list.prev);
  53.339      }
  53.340  }
  53.341  
    54.1 --- a/xen/common/sched_sedf.c	Thu Jun 02 19:19:24 2005 +0000
    54.2 +++ b/xen/common/sched_sedf.c	Thu Jun 02 21:05:33 2005 +0000
    54.3 @@ -64,9 +64,9 @@
    54.4  struct sedf_dom_info {
    54.5      struct domain  *domain;
    54.6  };
    54.7 -struct sedf_edom_info
    54.8 +struct sedf_vcpu_info
    54.9  {
   54.10 -    struct exec_domain *exec_domain;
   54.11 +    struct vcpu *vcpu;
   54.12      struct list_head list;
   54.13      struct list_head extralist[2];
   54.14   
   54.15 @@ -119,14 +119,14 @@ struct sedf_cpu_info {
   54.16      s_time_t         current_slice_expires;
   54.17  };
   54.18  
   54.19 -#define EDOM_INFO(d)  ((struct sedf_edom_info *)((d)->sched_priv))
   54.20 +#define EDOM_INFO(d)  ((struct sedf_vcpu_info *)((d)->sched_priv))
   54.21  #define CPU_INFO(cpu) ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
   54.22  #define LIST(d)   (&EDOM_INFO(d)->list)
   54.23  #define EXTRALIST(d,i)  (&(EDOM_INFO(d)->extralist[i]))
   54.24  #define RUNQ(cpu)     (&CPU_INFO(cpu)->runnableq)
   54.25  #define WAITQ(cpu)     (&CPU_INFO(cpu)->waitq)
   54.26  #define EXTRAQ(cpu,i)    (&(CPU_INFO(cpu)->extraq[i]))
   54.27 -#define IDLETASK(cpu)  ((struct exec_domain *)schedule_data[cpu].idle)
   54.28 +#define IDLETASK(cpu)  ((struct vcpu *)schedule_data[cpu].idle)
   54.29  
   54.30  #define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period)
   54.31  
   54.32 @@ -140,24 +140,24 @@ struct sedf_cpu_info {
   54.33  
   54.34  static void sedf_dump_cpu_state(int i);
   54.35  
   54.36 -static inline int extraq_on(struct exec_domain *d, int i) {
   54.37 +static inline int extraq_on(struct vcpu *d, int i) {
   54.38      return ((EXTRALIST(d,i)->next != NULL) &&
   54.39              (EXTRALIST(d,i)->next != EXTRALIST(d,i)));
   54.40  }
   54.41  
   54.42 -static inline void extraq_add_head(struct exec_domain *d, int i)
   54.43 +static inline void extraq_add_head(struct vcpu *d, int i)
   54.44  {
   54.45      list_add(EXTRALIST(d,i), EXTRAQ(d->processor,i));
   54.46      ASSERT(extraq_on(d, i));
   54.47  }
   54.48  
   54.49 -static inline void extraq_add_tail(struct exec_domain *d, int i)
   54.50 +static inline void extraq_add_tail(struct vcpu *d, int i)
   54.51  {
   54.52      list_add_tail(EXTRALIST(d,i), EXTRAQ(d->processor,i));
   54.53      ASSERT(extraq_on(d, i));
   54.54  }
   54.55  
   54.56 -static inline void extraq_del(struct exec_domain *d, int i)
   54.57 +static inline void extraq_del(struct vcpu *d, int i)
   54.58  {
   54.59      struct list_head *list = EXTRALIST(d,i);
   54.60      ASSERT(extraq_on(d,i));
   54.61 @@ -174,9 +174,9 @@ static inline void extraq_del(struct exe
   54.62     each entry, in order to avoid overflow. The algorithm works by simply
   54.63     charging each domain that recieved extratime with an inverse of its weight.
   54.64   */ 
   54.65 -static inline void extraq_add_sort_update(struct exec_domain *d, int i, int sub) {
   54.66 +static inline void extraq_add_sort_update(struct vcpu *d, int i, int sub) {
   54.67      struct list_head      *cur;
   54.68 -    struct sedf_edom_info *curinf;
   54.69 +    struct sedf_vcpu_info *curinf;
   54.70   
   54.71      ASSERT(!extraq_on(d,i));
   54.72      PRINT(3, "Adding domain %i.%i (score= %i, short_pen= %"PRIi64")"
   54.73 @@ -186,14 +186,14 @@ static inline void extraq_add_sort_updat
   54.74      /*iterate through all elements to find our "hole" and on our way
   54.75        update all the other scores*/
   54.76      list_for_each(cur,EXTRAQ(d->processor,i)){
   54.77 -        curinf = list_entry(cur,struct sedf_edom_info,extralist[i]);
   54.78 +        curinf = list_entry(cur,struct sedf_vcpu_info,extralist[i]);
   54.79          curinf->score[i] -= sub;
   54.80          if (EDOM_INFO(d)->score[i] < curinf->score[i])
   54.81              break;
   54.82          else
   54.83              PRINT(4,"\tbehind domain %i.%i (score= %i)\n",
   54.84 -                  curinf->exec_domain->domain->domain_id,
   54.85 -                  curinf->exec_domain->vcpu_id, curinf->score[i]);
   54.86 +                  curinf->vcpu->domain->domain_id,
   54.87 +                  curinf->vcpu->vcpu_id, curinf->score[i]);
   54.88      }
   54.89      /*cur now contains the element, before which we'll enqueue*/
   54.90      PRINT(3, "\tlist_add to %p\n", cur->prev);
   54.91 @@ -203,16 +203,16 @@ static inline void extraq_add_sort_updat
   54.92      if ((cur != EXTRAQ(d->processor,i)) && sub)
   54.93          for (cur = cur->next; cur != EXTRAQ(d->processor,i);
   54.94               cur = cur-> next) {
   54.95 -            curinf = list_entry(cur,struct sedf_edom_info,
   54.96 +            curinf = list_entry(cur,struct sedf_vcpu_info,
   54.97                                  extralist[i]);
   54.98              curinf->score[i] -= sub;
   54.99              PRINT(4, "\tupdating domain %i.%i (score= %u)\n",
  54.100 -                  curinf->exec_domain->domain->domain_id, 
  54.101 -                  curinf->exec_domain->vcpu_id, curinf->score[i]);
  54.102 +                  curinf->vcpu->domain->domain_id, 
  54.103 +                  curinf->vcpu->vcpu_id, curinf->score[i]);
  54.104          }
  54.105      ASSERT(extraq_on(d,i));
  54.106  }
  54.107 -static inline void extraq_check(struct exec_domain *d) {
  54.108 +static inline void extraq_check(struct vcpu *d) {
  54.109      if (extraq_on(d, EXTRA_UTIL_Q)) {
  54.110          PRINT(2,"Dom %i.%i is on L1 extraQ\n",d->domain->domain_id, d->vcpu_id);
  54.111          if (!(EDOM_INFO(d)->status & EXTRA_AWARE) &&
  54.112 @@ -240,9 +240,9 @@ static inline void extraq_check(struct e
  54.113      }
  54.114  }
  54.115  
  54.116 -static inline void extraq_check_add_unblocked(struct exec_domain *d, 
  54.117 +static inline void extraq_check_add_unblocked(struct vcpu *d, 
  54.118                                                int priority) {
  54.119 -    struct sedf_edom_info *inf = EDOM_INFO(d);
  54.120 +    struct sedf_vcpu_info *inf = EDOM_INFO(d);
  54.121      if (inf->status & EXTRA_AWARE) 
  54.122  #if (EXTRA == EXTRA_ROUNDR)
  54.123          if (priority)
  54.124 @@ -259,10 +259,10 @@ static inline void extraq_check_add_unbl
  54.125  #endif
  54.126  }
  54.127  
  54.128 -static inline int __task_on_queue(struct exec_domain *d) {
  54.129 +static inline int __task_on_queue(struct vcpu *d) {
  54.130      return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d)));
  54.131  }
  54.132 -static inline void __del_from_queue(struct exec_domain *d)
  54.133 +static inline void __del_from_queue(struct vcpu *d)
  54.134  {
  54.135      struct list_head *list = LIST(d);
  54.136      ASSERT(__task_on_queue(d));
  54.137 @@ -290,9 +290,9 @@ static inline void list_insert_sort(stru
  54.138  #define DOMAIN_COMPARER(name, field, comp1, comp2)          \
  54.139  int name##_comp(struct list_head* el1, struct list_head* el2) \
  54.140  {                                                           \
  54.141 - struct sedf_edom_info *d1, *d2;                     \
  54.142 - d1 = list_entry(el1,struct sedf_edom_info, field);  \
  54.143 - d2 = list_entry(el2,struct sedf_edom_info, field);  \
  54.144 + struct sedf_vcpu_info *d1, *d2;                     \
  54.145 + d1 = list_entry(el1,struct sedf_vcpu_info, field);  \
  54.146 + d2 = list_entry(el2,struct sedf_vcpu_info, field);  \
  54.147   if ((comp1) == (comp2))                             \
  54.148    return 0;                                   \
  54.149   if ((comp1) < (comp2))                              \
  54.150 @@ -305,7 +305,7 @@ int name##_comp(struct list_head* el1, s
  54.151     absol. deadline - period
  54.152   */ 
  54.153  DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2))
  54.154 -    static inline void __add_to_waitqueue_sort(struct exec_domain *d) {
  54.155 +    static inline void __add_to_waitqueue_sort(struct vcpu *d) {
  54.156      ASSERT(!__task_on_queue(d));
  54.157      PRINT(3,"Adding domain %i.%i (bop= %"PRIu64") to waitq\n",
  54.158            d->domain->domain_id, d->vcpu_id, PERIOD_BEGIN(EDOM_INFO(d)));
  54.159 @@ -319,14 +319,14 @@ DOMAIN_COMPARER(waitq, list, PERIOD_BEGI
  54.160     task will run. As we are implementing EDF, this list is sorted by deadlines.
  54.161   */ 
  54.162  DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs)
  54.163 -    static inline void __add_to_runqueue_sort(struct exec_domain *d) {
  54.164 +    static inline void __add_to_runqueue_sort(struct vcpu *d) {
  54.165      PRINT(3,"Adding domain %i.%i (deadl= %"PRIu64") to runq\n",
  54.166            d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->deadl_abs);
  54.167      list_insert_sort(RUNQ(d->processor), LIST(d), runq_comp);
  54.168  }
  54.169  
  54.170  /* Allocates memory for per domain private scheduling data*/
  54.171 -static int sedf_alloc_task(struct exec_domain *d) {
  54.172 +static int sedf_alloc_task(struct vcpu *d) {
  54.173      PRINT(2,"sedf_alloc_task was called, domain-id %i.%i\n",d->domain->domain_id,
  54.174            d->vcpu_id);
  54.175      if (d->domain->sched_priv == NULL) {
  54.176 @@ -335,17 +335,17 @@ static int sedf_alloc_task(struct exec_d
  54.177              return -1;
  54.178          memset(d->domain->sched_priv, 0, sizeof(struct sedf_dom_info));
  54.179      }
  54.180 -    if ((d->sched_priv = xmalloc(struct sedf_edom_info)) == NULL )
  54.181 +    if ((d->sched_priv = xmalloc(struct sedf_vcpu_info)) == NULL )
  54.182          return -1;
  54.183 -    memset(d->sched_priv, 0, sizeof(struct sedf_edom_info));
  54.184 +    memset(d->sched_priv, 0, sizeof(struct sedf_vcpu_info));
  54.185      return 0;
  54.186  }
  54.187  
  54.188  /* Setup the sedf_dom_info */
  54.189 -static void sedf_add_task(struct exec_domain *d)
  54.190 +static void sedf_add_task(struct vcpu *d)
  54.191  {
  54.192 -    struct sedf_edom_info *inf = EDOM_INFO(d);
  54.193 -    inf->exec_domain = d;
  54.194 +    struct sedf_vcpu_info *inf = EDOM_INFO(d);
  54.195 +    inf->vcpu = d;
  54.196   
  54.197      PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",d->domain->domain_id,
  54.198            d->vcpu_id);
  54.199 @@ -401,15 +401,15 @@ static void sedf_free_task(struct domain
  54.200      xfree(d->sched_priv);
  54.201   
  54.202      for (i = 0; i < MAX_VIRT_CPUS; i++)
  54.203 -        if ( d->exec_domain[i] ) {
  54.204 -            ASSERT(d->exec_domain[i]->sched_priv != NULL);
  54.205 -            xfree(d->exec_domain[i]->sched_priv);
  54.206 +        if ( d->vcpu[i] ) {
  54.207 +            ASSERT(d->vcpu[i]->sched_priv != NULL);
  54.208 +            xfree(d->vcpu[i]->sched_priv);
  54.209          }
  54.210  }
  54.211  
  54.212  /* handles the rescheduling, bookkeeping of domains running in their realtime-time :)*/
  54.213 -static inline void desched_edf_dom (s_time_t now, struct exec_domain* d) {
  54.214 -    struct sedf_edom_info* inf = EDOM_INFO(d);
  54.215 +static inline void desched_edf_dom (s_time_t now, struct vcpu* d) {
  54.216 +    struct sedf_vcpu_info* inf = EDOM_INFO(d);
  54.217      /*current domain is running in real time mode*/
  54.218   
  54.219      ASSERT(__task_on_queue(d));
  54.220 @@ -470,18 +470,18 @@ static inline void desched_edf_dom (s_ti
  54.221  static inline void update_queues(s_time_t now, struct list_head* runq, 
  54.222                                   struct list_head* waitq) {
  54.223      struct list_head     *cur,*tmp;
  54.224 -    struct sedf_edom_info *curinf;
  54.225 +    struct sedf_vcpu_info *curinf;
  54.226   
  54.227      PRINT(3,"Updating waitq..\n");
  54.228      /*check for the first elements of the waitqueue, whether their
  54.229        next period has already started*/
  54.230      list_for_each_safe(cur, tmp, waitq) {
  54.231 -        curinf = list_entry(cur, struct sedf_edom_info, list);
  54.232 +        curinf = list_entry(cur, struct sedf_vcpu_info, list);
  54.233          PRINT(4,"\tLooking @ dom %i.%i\n",
  54.234 -              curinf->exec_domain->domain->domain_id, curinf->exec_domain->vcpu_id);
  54.235 +              curinf->vcpu->domain->domain_id, curinf->vcpu->vcpu_id);
  54.236          if (PERIOD_BEGIN(curinf) <= now) {
  54.237 -            __del_from_queue(curinf->exec_domain);
  54.238 -            __add_to_runqueue_sort(curinf->exec_domain);
  54.239 +            __del_from_queue(curinf->vcpu);
  54.240 +            __add_to_runqueue_sort(curinf->vcpu);
  54.241          }
  54.242          else
  54.243              break;
  54.244 @@ -491,20 +491,20 @@ static inline void update_queues(s_time_
  54.245      /*process the runq, find domains that are on
  54.246        the runqueue which shouldn't be there*/
  54.247      list_for_each_safe(cur, tmp, runq) {
  54.248 -        curinf = list_entry(cur,struct sedf_edom_info,list);
  54.249 +        curinf = list_entry(cur,struct sedf_vcpu_info,list);
  54.250          PRINT(4,"\tLooking @ dom %i.%i\n",
  54.251 -              curinf->exec_domain->domain->domain_id, curinf->exec_domain->vcpu_id);
  54.252 +              curinf->vcpu->domain->domain_id, curinf->vcpu->vcpu_id);
  54.253          if (unlikely(curinf->slice == 0)) {
  54.254              /*ignore domains with empty slice*/
  54.255              PRINT(4,"\tUpdating zero-slice domain %i.%i\n",
  54.256 -                  curinf->exec_domain->domain->domain_id,
  54.257 -                  curinf->exec_domain->vcpu_id);
  54.258 -            __del_from_queue(curinf->exec_domain);
  54.259 +                  curinf->vcpu->domain->domain_id,
  54.260 +                  curinf->vcpu->vcpu_id);
  54.261 +            __del_from_queue(curinf->vcpu);
  54.262     
  54.263              /*move them to their next period*/
  54.264              curinf->deadl_abs += curinf->period;
  54.265              /*and put them back into the queue*/
  54.266 -            __add_to_waitqueue_sort(curinf->exec_domain);
  54.267 +            __add_to_waitqueue_sort(curinf->vcpu);
  54.268              continue;
  54.269          }
  54.270          if (unlikely((curinf->deadl_abs < now) ||
  54.271 @@ -515,11 +515,11 @@ static inline void update_queues(s_time_
  54.272              PRINT(4,"\tDomain %i.%i exceeded it's deadline/"
  54.273                    "slice (%"PRIu64" / %"PRIu64") now: %"PRIu64
  54.274                    " cputime: %"PRIu64"\n",
  54.275 -                  curinf->exec_domain->domain->domain_id,
  54.276 -                  curinf->exec_domain->vcpu_id,
  54.277 +                  curinf->vcpu->domain->domain_id,
  54.278 +                  curinf->vcpu->vcpu_id,
  54.279                    curinf->deadl_abs, curinf->slice, now,
  54.280                    curinf->cputime);
  54.281 -            __del_from_queue(curinf->exec_domain);
  54.282 +            __del_from_queue(curinf->vcpu);
  54.283              /*common case: we miss one period!*/
  54.284              curinf->deadl_abs += curinf->period;
  54.285     
  54.286 @@ -534,9 +534,9 @@ static inline void update_queues(s_time_
  54.287              /*give a fresh slice*/
  54.288              curinf->cputime = 0;
  54.289              if (PERIOD_BEGIN(curinf) > now)
  54.290 -                __add_to_waitqueue_sort(curinf->exec_domain);
  54.291 +                __add_to_waitqueue_sort(curinf->vcpu);
  54.292              else
  54.293 -                __add_to_runqueue_sort(curinf->exec_domain);
  54.294 +                __add_to_runqueue_sort(curinf->vcpu);
  54.295          }
  54.296          else
  54.297              break;
  54.298 @@ -551,8 +551,8 @@ static inline void update_queues(s_time_
  54.299       weighted ext.: insert in sorted list by score
  54.300     if the domain is blocked / has regained its short-block-loss
  54.301     time it is not put on any queue */
  54.302 -static inline void desched_extra_dom(s_time_t now, struct exec_domain* d) {
  54.303 -    struct sedf_edom_info *inf = EDOM_INFO(d);
  54.304 +static inline void desched_extra_dom(s_time_t now, struct vcpu* d) {
  54.305 +    struct sedf_vcpu_info *inf = EDOM_INFO(d);
  54.306      int    i    = extra_get_cur_q(inf);
  54.307   
  54.308  #if (EXTRA == EXTRA_SLICE_WEIGHT || EXTRA == EXTRA_BLOCK_WEIGHT)
  54.309 @@ -582,11 +582,11 @@ static inline void desched_extra_dom(s_t
  54.310          /*inf->short_block_lost_tot -= EXTRA_QUANTUM;*/
  54.311          inf->short_block_lost_tot -= now - inf->sched_start_abs;
  54.312          PRINT(3,"Domain %i.%i: Short_block_loss: %"PRIi64"\n", 
  54.313 -              inf->exec_domain->domain->domain_id, inf->exec_domain->vcpu_id,
  54.314 +              inf->vcpu->domain->domain_id, inf->vcpu->vcpu_id,
  54.315                inf->short_block_lost_tot);
  54.316          if (inf->short_block_lost_tot <= 0) {
  54.317              PRINT(4,"Domain %i.%i compensated short block loss!\n",
  54.318 -                  inf->exec_domain->domain->domain_id, inf->exec_domain->vcpu_id);
  54.319 +                  inf->vcpu->domain->domain_id, inf->vcpu->vcpu_id);
  54.320              /*we have (over-)compensated our block penalty*/
  54.321              inf->short_block_lost_tot = 0;
  54.322              /*we don't want a place on the penalty queue anymore!*/
  54.323 @@ -646,7 +646,7 @@ static inline void desched_extra_dom(s_t
  54.324  static inline struct task_slice sedf_do_extra_schedule (s_time_t now,
  54.325                                                          s_time_t end_xt, struct list_head *extraq[], int cpu) {
  54.326      struct task_slice   ret;
  54.327 -    struct sedf_edom_info *runinf;
  54.328 +    struct sedf_vcpu_info *runinf;
  54.329   
  54.330      /* Enough time left to use for extratime? */
  54.331      if (end_xt - now < EXTRA_QUANTUM)
  54.332 @@ -656,9 +656,9 @@ static inline struct task_slice sedf_do_
  54.333          /*we still have elements on the level 0 extraq 
  54.334            => let those run first!*/
  54.335          runinf   = list_entry(extraq[EXTRA_PEN_Q]->next, 
  54.336 -                              struct sedf_edom_info, extralist[EXTRA_PEN_Q]);
  54.337 +                              struct sedf_vcpu_info, extralist[EXTRA_PEN_Q]);
  54.338          runinf->status |= EXTRA_RUN_PEN;
  54.339 -        ret.task = runinf->exec_domain;
  54.340 +        ret.task = runinf->vcpu;
  54.341          ret.time = EXTRA_QUANTUM;
  54.342  #ifdef SEDF_STATS
  54.343          runinf->pen_extra_slices++;
  54.344 @@ -668,9 +668,9 @@ static inline struct task_slice sedf_do_
  54.345          if (!list_empty(extraq[EXTRA_UTIL_Q])) {
  54.346              /*use elements from the normal extraqueue*/
  54.347              runinf   = list_entry(extraq[EXTRA_UTIL_Q]->next,
  54.348 -                                  struct sedf_edom_info, extralist[EXTRA_UTIL_Q]);
  54.349 +                                  struct sedf_vcpu_info, extralist[EXTRA_UTIL_Q]);
  54.350              runinf->status |= EXTRA_RUN_UTIL;
  54.351 -            ret.task = runinf->exec_domain;
  54.352 +            ret.task = runinf->vcpu;
  54.353              ret.time = EXTRA_QUANTUM;
  54.354          }
  54.355          else
  54.356 @@ -698,7 +698,7 @@ static struct task_slice sedf_do_schedul
  54.357      struct list_head     *runq     = RUNQ(cpu);
  54.358      struct list_head     *waitq    = WAITQ(cpu);
  54.359  #if (EXTRA > EXTRA_OFF)
  54.360 -    struct sedf_edom_info *inf     = EDOM_INFO(current);
  54.361 +    struct sedf_vcpu_info *inf     = EDOM_INFO(current);
  54.362      struct list_head     *extraq[] = {EXTRAQ(cpu, EXTRA_PEN_Q),
  54.363                                        EXTRAQ(cpu, EXTRA_UTIL_Q)};
  54.364  #endif
  54.365 @@ -732,14 +732,14 @@ static struct task_slice sedf_do_schedul
  54.366   
  54.367      /*now simply pick the first domain from the runqueue, which has the
  54.368        earliest deadline, because the list is sorted*/
  54.369 -    struct sedf_edom_info *runinf, *waitinf;
  54.370 +    struct sedf_vcpu_info *runinf, *waitinf;
  54.371   
  54.372      if (!list_empty(runq)) {
  54.373 -        runinf   = list_entry(runq->next,struct sedf_edom_info,list);
  54.374 -        ret.task = runinf->exec_domain;
  54.375 +        runinf   = list_entry(runq->next,struct sedf_vcpu_info,list);
  54.376 +        ret.task = runinf->vcpu;
  54.377          if (!list_empty(waitq)) {
  54.378              waitinf  = list_entry(waitq->next,
  54.379 -                                  struct sedf_edom_info,list);
  54.380 +                                  struct sedf_vcpu_info,list);
  54.381              /*rerun scheduler, when scheduled domain reaches it's
  54.382                end of slice or the first domain from the waitqueue
  54.383                gets ready*/
  54.384 @@ -754,7 +754,7 @@ static struct task_slice sedf_do_schedul
  54.385      }
  54.386   
  54.387      if (!list_empty(waitq)) {
  54.388 -        waitinf  = list_entry(waitq->next,struct sedf_edom_info, list);
  54.389 +        waitinf  = list_entry(waitq->next,struct sedf_vcpu_info, list);
  54.390          /*we could not find any suitable domain 
  54.391            => look for domains that are aware of extratime*/
  54.392  #if (EXTRA > EXTRA_OFF)
  54.393 @@ -789,7 +789,7 @@ static struct task_slice sedf_do_schedul
  54.394      return ret;
  54.395  }
  54.396  
  54.397 -static void sedf_sleep(struct exec_domain *d) {
  54.398 +static void sedf_sleep(struct vcpu *d) {
  54.399      PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id);
  54.400   
  54.401      if (is_idle_task(d->domain))
  54.402 @@ -886,12 +886,12 @@ static void sedf_sleep(struct exec_domai
  54.403   *      opposed to approaches 1,2a,2b
  54.404   */
  54.405  static inline void unblock_short_vcons
  54.406 -(struct sedf_edom_info* inf, s_time_t now) {
  54.407 +(struct sedf_vcpu_info* inf, s_time_t now) {
  54.408      inf->deadl_abs += inf->period;
  54.409      inf->cputime = 0;
  54.410  }
  54.411  
  54.412 -static inline void unblock_short_cons(struct sedf_edom_info* inf, s_time_t now)
  54.413 +static inline void unblock_short_cons(struct sedf_vcpu_info* inf, s_time_t now)
  54.414  {
  54.415      /*treat blocked time as consumed by the domain*/
  54.416      inf->cputime += now - inf->block_abs; 
  54.417 @@ -905,7 +905,7 @@ static inline void unblock_short_cons(st
  54.418          inf->short_cont++;
  54.419  #endif
  54.420  }
  54.421 -static inline void unblock_short_extra_support (struct sedf_edom_info* inf,
  54.422 +static inline void unblock_short_extra_support (struct sedf_vcpu_info* inf,
  54.423                                                  s_time_t now) {
  54.424      /*this unblocking scheme tries to support the domain, by assigning it
  54.425      a priority in extratime distribution according to the loss of time
  54.426 @@ -933,9 +933,9 @@ static inline void unblock_short_extra_s
  54.427  #ifdef SEDF_STATS
  54.428              inf->pen_extra_blocks++;
  54.429  #endif
  54.430 -            if (extraq_on(inf->exec_domain, EXTRA_PEN_Q))
  54.431 +            if (extraq_on(inf->vcpu, EXTRA_PEN_Q))
  54.432                  /*remove domain for possible resorting!*/
  54.433 -                extraq_del(inf->exec_domain, EXTRA_PEN_Q);
  54.434 +                extraq_del(inf->vcpu, EXTRA_PEN_Q);
  54.435              else
  54.436                  /*remember that we want to be on the penalty q
  54.437                    so that we can continue when we (un-)block
  54.438 @@ -943,14 +943,14 @@ static inline void unblock_short_extra_s
  54.439                  inf->status |= EXTRA_WANT_PEN_Q;
  54.440     
  54.441              /*(re-)add domain to the penalty extraq*/
  54.442 -            extraq_add_sort_update(inf->exec_domain,
  54.443 +            extraq_add_sort_update(inf->vcpu,
  54.444                                     EXTRA_PEN_Q, 0);
  54.445          }
  54.446      }
  54.447      /*give it a fresh slice in the next period!*/
  54.448      inf->cputime = 0;
  54.449  }
  54.450 -static inline void unblock_long_vcons(struct sedf_edom_info* inf, s_time_t now)
  54.451 +static inline void unblock_long_vcons(struct sedf_vcpu_info* inf, s_time_t now)
  54.452  {
  54.453      /* align to next future period */
  54.454      inf->deadl_abs += (DIV_UP(now - inf->deadl_abs, inf->period) +1)
  54.455 @@ -958,7 +958,7 @@ static inline void unblock_long_vcons(st
  54.456      inf->cputime = 0;
  54.457  }
  54.458  
  54.459 -static inline void unblock_long_cons_a (struct sedf_edom_info* inf,
  54.460 +static inline void unblock_long_cons_a (struct sedf_vcpu_info* inf,
  54.461                                          s_time_t now) {
  54.462      /*treat the time the domain was blocked in the
  54.463     CURRENT period as consumed by the domain*/
  54.464 @@ -969,13 +969,13 @@ static inline void unblock_long_cons_a (
  54.465          unblock_long_vcons(inf, now);
  54.466      }
  54.467  }
  54.468 -static inline void unblock_long_cons_b(struct sedf_edom_info* inf,s_time_t now) {
  54.469 +static inline void unblock_long_cons_b(struct sedf_vcpu_info* inf,s_time_t now) {
  54.470      /*Conservative 2b*/
  54.471      /*Treat the unblocking time as a start of a new period */
  54.472      inf->deadl_abs = now + inf->period;
  54.473      inf->cputime = 0;
  54.474  }
  54.475 -static inline void unblock_long_cons_c(struct sedf_edom_info* inf,s_time_t now) {
  54.476 +static inline void unblock_long_cons_c(struct sedf_vcpu_info* inf,s_time_t now) {
  54.477      if (likely(inf->latency)) {
  54.478          /*scale the slice and period accordingly to the latency hint*/
  54.479          /*reduce period temporarily to the latency hint*/
  54.480 @@ -995,7 +995,7 @@ static inline void unblock_long_cons_c(s
  54.481      }
  54.482  }
  54.483  /*a new idea of dealing with short blocks: burst period scaling*/
  54.484 -static inline void unblock_short_burst(struct sedf_edom_info* inf, s_time_t now)
  54.485 +static inline void unblock_short_burst(struct sedf_vcpu_info* inf, s_time_t now)
  54.486  {
  54.487      /*treat blocked time as consumed by the domain*/
  54.488      inf->cputime += now - inf->block_abs;
  54.489 @@ -1035,7 +1035,7 @@ static inline void unblock_short_burst(s
  54.490      }
  54.491      inf->unblock_abs = now;
  54.492  }
  54.493 -static inline void unblock_long_burst(struct sedf_edom_info* inf, s_time_t now) {
  54.494 +static inline void unblock_long_burst(struct sedf_vcpu_info* inf, s_time_t now) {
  54.495      if (unlikely(inf->latency && (inf->period > inf->latency))) {
  54.496          /*scale the slice and period accordingly to the latency hint*/
  54.497          inf->period = inf->latency;
  54.498 @@ -1062,8 +1062,8 @@ static inline void unblock_long_burst(st
  54.499  #define DOMAIN_EXTRA_PEN  2
  54.500  #define DOMAIN_EXTRA_UTIL  3
  54.501  #define DOMAIN_IDLE   4
  54.502 -static inline int get_run_type(struct exec_domain* d) {
  54.503 -    struct sedf_edom_info* inf = EDOM_INFO(d);
  54.504 +static inline int get_run_type(struct vcpu* d) {
  54.505 +    struct sedf_vcpu_info* inf = EDOM_INFO(d);
  54.506      if (is_idle_task(d->domain))
  54.507          return DOMAIN_IDLE;
  54.508      if (inf->status & EXTRA_RUN_PEN)
  54.509 @@ -1081,9 +1081,9 @@ static inline int get_run_type(struct ex
  54.510    In the same class priorities are assigned as following:
  54.511     EDF: early deadline > late deadline
  54.512     L0 extra-time: lower score > higher score*/
  54.513 -static inline int should_switch(struct exec_domain* cur,
  54.514 -                                struct exec_domain* other, s_time_t now) {
  54.515 -    struct sedf_edom_info *cur_inf, *other_inf;
  54.516 +static inline int should_switch(struct vcpu* cur,
  54.517 +                                struct vcpu* other, s_time_t now) {
  54.518 +    struct sedf_vcpu_info *cur_inf, *other_inf;
  54.519      cur_inf   = EDOM_INFO(cur);
  54.520      other_inf = EDOM_INFO(other);
  54.521   
  54.522 @@ -1115,9 +1115,9 @@ static inline int should_switch(struct e
  54.523      }
  54.524      return 1;
  54.525  }
  54.526 -void sedf_wake(struct exec_domain *d) {
  54.527 +void sedf_wake(struct vcpu *d) {
  54.528      s_time_t              now = NOW();
  54.529 -    struct sedf_edom_info* inf = EDOM_INFO(d);
  54.530 +    struct sedf_vcpu_info* inf = EDOM_INFO(d);
  54.531   
  54.532      PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id);
  54.533   
  54.534 @@ -1231,7 +1231,7 @@ void sedf_wake(struct exec_domain *d) {
  54.535  }
  54.536  
  54.537  /*Print a lot of use-{full, less} information about a domains in the system*/
  54.538 -static void sedf_dump_domain(struct exec_domain *d) {
  54.539 +static void sedf_dump_domain(struct vcpu *d) {
  54.540      printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id,
  54.541             test_bit(_VCPUF_running, &d->vcpu_flags) ? 'T':'F');
  54.542      printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64" w=%hu c=%"PRIu64" sc=%i xtr(%s)=%"PRIu64" ew=%hu",
  54.543 @@ -1267,9 +1267,9 @@ static void sedf_dump_domain(struct exec
  54.544  static void sedf_dump_cpu_state(int i)
  54.545  {
  54.546      struct list_head      *list, *queue, *tmp;
  54.547 -    struct sedf_edom_info *d_inf;
  54.548 +    struct sedf_vcpu_info *d_inf;
  54.549      struct domain         *d;
  54.550 -    struct exec_domain    *ed;
  54.551 +    struct vcpu    *ed;
  54.552      int loop = 0;
  54.553   
  54.554      printk("now=%"PRIu64"\n",NOW());
  54.555 @@ -1278,8 +1278,8 @@ static void sedf_dump_cpu_state(int i)
  54.556             (unsigned long) queue->next, (unsigned long) queue->prev);
  54.557      list_for_each_safe ( list, tmp, queue ) {
  54.558          printk("%3d: ",loop++);
  54.559 -        d_inf = list_entry(list, struct sedf_edom_info, list);
  54.560 -        sedf_dump_domain(d_inf->exec_domain);
  54.561 +        d_inf = list_entry(list, struct sedf_vcpu_info, list);
  54.562 +        sedf_dump_domain(d_inf->vcpu);
  54.563      }
  54.564   
  54.565      queue = WAITQ(i); loop = 0;
  54.566 @@ -1287,8 +1287,8 @@ static void sedf_dump_cpu_state(int i)
  54.567             (unsigned long) queue->next, (unsigned long) queue->prev);
  54.568      list_for_each_safe ( list, tmp, queue ) {
  54.569          printk("%3d: ",loop++);
  54.570 -        d_inf = list_entry(list, struct sedf_edom_info, list);
  54.571 -        sedf_dump_domain(d_inf->exec_domain);
  54.572 +        d_inf = list_entry(list, struct sedf_vcpu_info, list);
  54.573 +        sedf_dump_domain(d_inf->vcpu);
  54.574      }
  54.575   
  54.576      queue = EXTRAQ(i,EXTRA_PEN_Q); loop = 0;
  54.577 @@ -1296,10 +1296,10 @@ static void sedf_dump_cpu_state(int i)
  54.578             (unsigned long)queue, (unsigned long) queue->next,
  54.579             (unsigned long) queue->prev);
  54.580      list_for_each_safe ( list, tmp, queue ) {
  54.581 -        d_inf = list_entry(list, struct sedf_edom_info,
  54.582 +        d_inf = list_entry(list, struct sedf_vcpu_info,
  54.583                             extralist[EXTRA_PEN_Q]);
  54.584          printk("%3d: ",loop++);
  54.585 -        sedf_dump_domain(d_inf->exec_domain);
  54.586 +        sedf_dump_domain(d_inf->vcpu);
  54.587      }
  54.588   
  54.589      queue = EXTRAQ(i,EXTRA_UTIL_Q); loop = 0;
  54.590 @@ -1307,16 +1307,16 @@ static void sedf_dump_cpu_state(int i)
  54.591             (unsigned long)queue, (unsigned long) queue->next,
  54.592             (unsigned long) queue->prev);
  54.593      list_for_each_safe ( list, tmp, queue ) {
  54.594 -        d_inf = list_entry(list, struct sedf_edom_info,
  54.595 +        d_inf = list_entry(list, struct sedf_vcpu_info,
  54.596                             extralist[EXTRA_UTIL_Q]);
  54.597          printk("%3d: ",loop++);
  54.598 -        sedf_dump_domain(d_inf->exec_domain);
  54.599 +        sedf_dump_domain(d_inf->vcpu);
  54.600      }
  54.601   
  54.602      loop = 0;
  54.603      printk("\nnot on Q\n");
  54.604      for_each_domain(d)
  54.605 -        for_each_exec_domain(d, ed)
  54.606 +        for_each_vcpu(d, ed)
  54.607      {
  54.608          if (!__task_on_queue(ed) && (ed->processor == i)) {
  54.609              printk("%3d: ",loop++);
  54.610 @@ -1326,7 +1326,7 @@ static void sedf_dump_cpu_state(int i)
  54.611  }
  54.612  /*Adjusts periods and slices of the domains accordingly to their weights*/
  54.613  static inline int sedf_adjust_weights(struct sched_adjdom_cmd *cmd) {
  54.614 -    struct exec_domain *p;
  54.615 +    struct vcpu *p;
  54.616      struct domain      *d;
  54.617      int                 sumw[NR_CPUS];
  54.618      s_time_t            sumt[NR_CPUS];
  54.619 @@ -1338,7 +1338,7 @@ static inline int sedf_adjust_weights(st
  54.620      }
  54.621      /*sum up all weights*/
  54.622      for_each_domain(d)
  54.623 -        for_each_exec_domain(d, p) {
  54.624 +        for_each_vcpu(d, p) {
  54.625          if (EDOM_INFO(p)->weight)
  54.626              sumw[p->processor] += EDOM_INFO(p)->weight;
  54.627          else {
  54.628 @@ -1356,7 +1356,7 @@ static inline int sedf_adjust_weights(st
  54.629      }
  54.630      /*adjust all slices (and periods) to the new weight*/
  54.631      for_each_domain(d) 
  54.632 -        for_each_exec_domain(d, p) {
  54.633 +        for_each_vcpu(d, p) {
  54.634          if (EDOM_INFO(p)->weight) {
  54.635              EDOM_INFO(p)->period_orig = 
  54.636                  EDOM_INFO(p)->period  = WEIGHT_PERIOD;
  54.637 @@ -1372,7 +1372,7 @@ static inline int sedf_adjust_weights(st
  54.638  
  54.639  /* set or fetch domain scheduling parameters */
  54.640  static int sedf_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd) {
  54.641 -    struct exec_domain *ed;
  54.642 +    struct vcpu *v;
  54.643  
  54.644      PRINT(2,"sedf_adjdom was called, domain-id %i new period %"PRIu64" "\
  54.645            "new slice %"PRIu64"\nlatency %"PRIu64" extra:%s\n",
  54.646 @@ -1387,51 +1387,51 @@ static int sedf_adjdom(struct domain *p,
  54.647              if ((cmd->u.sedf.extratime & EXTRA_AWARE) &&
  54.648                  (! cmd->u.sedf.period)) {
  54.649                  /*weight driven domains with xtime ONLY!*/
  54.650 -                for_each_exec_domain(p, ed) {
  54.651 -                    EDOM_INFO(ed)->extraweight = cmd->u.sedf.weight;
  54.652 -                    EDOM_INFO(ed)->weight = 0;
  54.653 -                    EDOM_INFO(ed)->slice = 0;
  54.654 -                    EDOM_INFO(ed)->period = WEIGHT_PERIOD;
  54.655 +                for_each_vcpu(p, v) {
  54.656 +                    EDOM_INFO(v)->extraweight = cmd->u.sedf.weight;
  54.657 +                    EDOM_INFO(v)->weight = 0;
  54.658 +                    EDOM_INFO(v)->slice = 0;
  54.659 +                    EDOM_INFO(v)->period = WEIGHT_PERIOD;
  54.660                  }
  54.661              } else {
  54.662                  /*weight driven domains with real-time execution*/
  54.663 -                for_each_exec_domain(p, ed)
  54.664 -                    EDOM_INFO(ed)->weight = cmd->u.sedf.weight;
  54.665 +                for_each_vcpu(p, v)
  54.666 +                    EDOM_INFO(v)->weight = cmd->u.sedf.weight;
  54.667              }
  54.668          }
  54.669          else {
  54.670              /*time driven domains*/
  54.671 -            for_each_exec_domain(p, ed) {
  54.672 +            for_each_vcpu(p, v) {
  54.673                  /* sanity checking! */
  54.674                  if(cmd->u.sedf.slice > cmd->u.sedf.period )
  54.675                      return -EINVAL;
  54.676 -                EDOM_INFO(ed)->weight = 0;
  54.677 -                EDOM_INFO(ed)->extraweight = 0;
  54.678 -                EDOM_INFO(ed)->period_orig = 
  54.679 -                    EDOM_INFO(ed)->period   = cmd->u.sedf.period;
  54.680 -                EDOM_INFO(ed)->slice_orig  = 
  54.681 -                    EDOM_INFO(ed)->slice    = cmd->u.sedf.slice;
  54.682 +                EDOM_INFO(v)->weight = 0;
  54.683 +                EDOM_INFO(v)->extraweight = 0;
  54.684 +                EDOM_INFO(v)->period_orig = 
  54.685 +                    EDOM_INFO(v)->period   = cmd->u.sedf.period;
  54.686 +                EDOM_INFO(v)->slice_orig  = 
  54.687 +                    EDOM_INFO(v)->slice    = cmd->u.sedf.slice;
  54.688              }
  54.689          }
  54.690          if (sedf_adjust_weights(cmd))
  54.691              return -EINVAL;
  54.692     
  54.693 -        for_each_exec_domain(p, ed) {
  54.694 -            EDOM_INFO(ed)->status  = 
  54.695 -                (EDOM_INFO(ed)->status &
  54.696 +        for_each_vcpu(p, v) {
  54.697 +            EDOM_INFO(v)->status  = 
  54.698 +                (EDOM_INFO(v)->status &
  54.699                   ~EXTRA_AWARE) | (cmd->u.sedf.extratime & EXTRA_AWARE);
  54.700 -            EDOM_INFO(ed)->latency = cmd->u.sedf.latency;
  54.701 -            extraq_check(ed);
  54.702 +            EDOM_INFO(v)->latency = cmd->u.sedf.latency;
  54.703 +            extraq_check(v);
  54.704          }
  54.705      }
  54.706      else if ( cmd->direction == SCHED_INFO_GET )
  54.707      {
  54.708 -        cmd->u.sedf.period    = EDOM_INFO(p->exec_domain[0])->period;
  54.709 -        cmd->u.sedf.slice     = EDOM_INFO(p->exec_domain[0])->slice;
  54.710 -        cmd->u.sedf.extratime = EDOM_INFO(p->exec_domain[0])->status
  54.711 +        cmd->u.sedf.period    = EDOM_INFO(p->vcpu[0])->period;
  54.712 +        cmd->u.sedf.slice     = EDOM_INFO(p->vcpu[0])->slice;
  54.713 +        cmd->u.sedf.extratime = EDOM_INFO(p->vcpu[0])->status
  54.714              & EXTRA_AWARE;
  54.715 -        cmd->u.sedf.latency   = EDOM_INFO(p->exec_domain[0])->latency;
  54.716 -        cmd->u.sedf.weight    = EDOM_INFO(p->exec_domain[0])->weight;
  54.717 +        cmd->u.sedf.latency   = EDOM_INFO(p->vcpu[0])->latency;
  54.718 +        cmd->u.sedf.weight    = EDOM_INFO(p->vcpu[0])->weight;
  54.719      }
  54.720      PRINT(2,"sedf_adjdom_finished\n");
  54.721      return 0;
    55.1 --- a/xen/common/schedule.c	Thu Jun 02 19:19:24 2005 +0000
    55.2 +++ b/xen/common/schedule.c	Thu Jun 02 21:05:33 2005 +0000
    55.3 @@ -83,57 +83,57 @@ void free_domain_struct(struct domain *d
    55.4  
    55.5      SCHED_OP(free_task, d);
    55.6      for (i = 0; i < MAX_VIRT_CPUS; i++)
    55.7 -        if ( d->exec_domain[i] )
    55.8 -            arch_free_exec_domain_struct(d->exec_domain[i]);
    55.9 +        if ( d->vcpu[i] )
   55.10 +            arch_free_vcpu_struct(d->vcpu[i]);
   55.11  
   55.12      xfree(d);
   55.13  }
   55.14  
   55.15 -struct exec_domain *alloc_exec_domain_struct(
   55.16 +struct vcpu *alloc_vcpu_struct(
   55.17      struct domain *d, unsigned long vcpu)
   55.18  {
   55.19 -    struct exec_domain *ed, *edc;
   55.20 +    struct vcpu *v, *vc;
   55.21  
   55.22 -    ASSERT( d->exec_domain[vcpu] == NULL );
   55.23 +    ASSERT( d->vcpu[vcpu] == NULL );
   55.24  
   55.25 -    if ( (ed = arch_alloc_exec_domain_struct()) == NULL )
   55.26 +    if ( (v = arch_alloc_vcpu_struct()) == NULL )
   55.27          return NULL;
   55.28  
   55.29 -    memset(ed, 0, sizeof(*ed));
   55.30 +    memset(v, 0, sizeof(*v));
   55.31  
   55.32 -    d->exec_domain[vcpu] = ed;
   55.33 -    ed->domain = d;
   55.34 -    ed->vcpu_id = vcpu;
   55.35 +    d->vcpu[vcpu] = v;
   55.36 +    v->domain = d;
   55.37 +    v->vcpu_id = vcpu;
   55.38  
   55.39 -    if ( SCHED_OP(alloc_task, ed) < 0 )
   55.40 +    if ( SCHED_OP(alloc_task, v) < 0 )
   55.41          goto out;
   55.42  
   55.43      if ( vcpu != 0 )
   55.44      {
   55.45 -        ed->vcpu_info = &d->shared_info->vcpu_data[ed->vcpu_id];
   55.46 +        v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
   55.47  
   55.48 -        for_each_exec_domain( d, edc )
   55.49 +        for_each_vcpu( d, vc )
   55.50          {
   55.51 -            if ( (edc->next_in_list == NULL) ||
   55.52 -                 (edc->next_in_list->vcpu_id > vcpu) )
   55.53 +            if ( (vc->next_in_list == NULL) ||
   55.54 +                 (vc->next_in_list->vcpu_id > vcpu) )
   55.55                  break;
   55.56          }
   55.57 -        ed->next_in_list  = edc->next_in_list;
   55.58 -        edc->next_in_list = ed;
   55.59 +        v->next_in_list  = vc->next_in_list;
   55.60 +        vc->next_in_list = v;
   55.61  
   55.62 -        if (test_bit(_VCPUF_cpu_pinned, &edc->vcpu_flags)) {
   55.63 -            ed->processor = (edc->processor + 1) % num_online_cpus();
   55.64 -            set_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
   55.65 +        if (test_bit(_VCPUF_cpu_pinned, &vc->vcpu_flags)) {
   55.66 +            v->processor = (vc->processor + 1) % num_online_cpus();
   55.67 +            set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
   55.68          } else {
   55.69 -            ed->processor = (edc->processor + 1) % num_online_cpus();
   55.70 +            v->processor = (vc->processor + 1) % num_online_cpus();
   55.71          }
   55.72      }
   55.73  
   55.74 -    return ed;
   55.75 +    return v;
   55.76  
   55.77   out:
   55.78 -    d->exec_domain[vcpu] = NULL;
   55.79 -    arch_free_exec_domain_struct(ed);
   55.80 +    d->vcpu[vcpu] = NULL;
   55.81 +    arch_free_vcpu_struct(v);
   55.82  
   55.83      return NULL;
   55.84  }
   55.85 @@ -147,7 +147,7 @@ struct domain *alloc_domain_struct(void)
   55.86      
   55.87      memset(d, 0, sizeof(*d));
   55.88  
   55.89 -    if ( alloc_exec_domain_struct(d, 0) == NULL )
   55.90 +    if ( alloc_vcpu_struct(d, 0) == NULL )
   55.91          goto out;
   55.92  
   55.93      return d;
   55.94 @@ -160,92 +160,92 @@ struct domain *alloc_domain_struct(void)
   55.95  /*
   55.96   * Add and remove a domain
   55.97   */
   55.98 -void sched_add_domain(struct exec_domain *ed) 
   55.99 +void sched_add_domain(struct vcpu *v) 
  55.100  {
  55.101 -    struct domain *d = ed->domain;
  55.102 +    struct domain *d = v->domain;
  55.103  
  55.104      /* Initialise the per-domain timer. */
  55.105 -    init_ac_timer(&ed->timer, dom_timer_fn, ed, ed->processor);
  55.106 +    init_ac_timer(&v->timer, dom_timer_fn, v, v->processor);
  55.107  
  55.108      if ( is_idle_task(d) )
  55.109      {
  55.110 -        schedule_data[ed->processor].curr = ed;
  55.111 -        schedule_data[ed->processor].idle = ed;
  55.112 -        set_bit(_VCPUF_running, &ed->vcpu_flags);
  55.113 +        schedule_data[v->processor].curr = v;
  55.114 +        schedule_data[v->processor].idle = v;
  55.115 +        set_bit(_VCPUF_running, &v->vcpu_flags);
  55.116      }
  55.117      else
  55.118      {
  55.119          /* Must be unpaused by control software to start execution. */
  55.120 -        set_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags);
  55.121 +        set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
  55.122      }
  55.123  
  55.124 -    SCHED_OP(add_task, ed);
  55.125 -    TRACE_2D(TRC_SCHED_DOM_ADD, d->domain_id, ed->vcpu_id);
  55.126 +    SCHED_OP(add_task, v);
  55.127 +    TRACE_2D(TRC_SCHED_DOM_ADD, d->domain_id, v->vcpu_id);
  55.128  }
  55.129  
  55.130 -void sched_rem_domain(struct exec_domain *ed) 
  55.131 +void sched_rem_domain(struct vcpu *v) 
  55.132  {
  55.133 -    rem_ac_timer(&ed->timer);
  55.134 -    SCHED_OP(rem_task, ed);
  55.135 -    TRACE_2D(TRC_SCHED_DOM_REM, ed->domain->domain_id, ed->vcpu_id);
  55.136 +    rem_ac_timer(&v->timer);
  55.137 +    SCHED_OP(rem_task, v);
  55.138 +    TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
  55.139  }
  55.140  
  55.141 -void domain_sleep_nosync(struct exec_domain *ed)
  55.142 +void domain_sleep_nosync(struct vcpu *v)
  55.143  {
  55.144      unsigned long flags;
  55.145  
  55.146 -    spin_lock_irqsave(&schedule_data[ed->processor].schedule_lock, flags);
  55.147 -    if ( likely(!domain_runnable(ed)) )
  55.148 -        SCHED_OP(sleep, ed);
  55.149 -    spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
  55.150 +    spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags);
  55.151 +    if ( likely(!domain_runnable(v)) )
  55.152 +        SCHED_OP(sleep, v);
  55.153 +    spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags);
  55.154  
  55.155 -    TRACE_2D(TRC_SCHED_SLEEP, ed->domain->domain_id, ed->vcpu_id);
  55.156 +    TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
  55.157  } 
  55.158  
  55.159 -void domain_sleep_sync(struct exec_domain *ed)
  55.160 +void domain_sleep_sync(struct vcpu *v)
  55.161  {
  55.162 -    domain_sleep_nosync(ed);
  55.163 +    domain_sleep_nosync(v);
  55.164  
  55.165 -    while ( test_bit(_VCPUF_running, &ed->vcpu_flags) && !domain_runnable(ed) )
  55.166 +    while ( test_bit(_VCPUF_running, &v->vcpu_flags) && !domain_runnable(v) )
  55.167          cpu_relax();
  55.168  
  55.169 -    sync_lazy_execstate_cpuset(ed->domain->cpuset & (1UL << ed->processor));
  55.170 +    sync_lazy_execstate_cpuset(v->domain->cpuset & (1UL << v->processor));
  55.171  }
  55.172  
  55.173 -void domain_wake(struct exec_domain *ed)
  55.174 +void domain_wake(struct vcpu *v)
  55.175  {
  55.176      unsigned long flags;
  55.177  
  55.178 -    spin_lock_irqsave(&schedule_data[ed->processor].schedule_lock, flags);
  55.179 -    if ( likely(domain_runnable(ed)) )
  55.180 +    spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags);
  55.181 +    if ( likely(domain_runnable(v)) )
  55.182      {
  55.183 -        SCHED_OP(wake, ed);
  55.184 +        SCHED_OP(wake, v);
  55.185  #ifdef WAKE_HISTO
  55.186 -        ed->wokenup = NOW();
  55.187 +        v->wokenup = NOW();
  55.188  #endif
  55.189      }
  55.190 -    clear_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags);
  55.191 -    spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
  55.192 +    clear_bit(_VCPUF_cpu_migrated, &v->vcpu_flags);
  55.193 +    spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags);
  55.194  
  55.195 -    TRACE_2D(TRC_SCHED_WAKE, ed->domain->domain_id, ed->vcpu_id);
  55.196 +    TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
  55.197  }
  55.198  
  55.199  /* Block the currently-executing domain until a pertinent event occurs. */
  55.200  long do_block(void)
  55.201  {
  55.202 -    struct exec_domain *ed = current;
  55.203 +    struct vcpu *v = current;
  55.204  
  55.205 -    ed->vcpu_info->evtchn_upcall_mask = 0;
  55.206 -    set_bit(_VCPUF_blocked, &ed->vcpu_flags);
  55.207 +    v->vcpu_info->evtchn_upcall_mask = 0;
  55.208 +    set_bit(_VCPUF_blocked, &v->vcpu_flags);
  55.209  
  55.210      /* Check for events /after/ blocking: avoids wakeup waiting race. */
  55.211 -    if ( event_pending(ed) )
  55.212 +    if ( event_pending(v) )
  55.213      {
  55.214 -        clear_bit(_VCPUF_blocked, &ed->vcpu_flags);
  55.215 +        clear_bit(_VCPUF_blocked, &v->vcpu_flags);
  55.216      }
  55.217      else
  55.218      {
  55.219 -        TRACE_2D(TRC_SCHED_BLOCK, ed->domain->domain_id, ed->vcpu_id);
  55.220 +        TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
  55.221          __enter_scheduler();
  55.222      }
  55.223  
  55.224 @@ -300,12 +300,12 @@ long do_sched_op(unsigned long op)
  55.225  /* Per-domain one-shot-timer hypercall. */
  55.226  long do_set_timer_op(s_time_t timeout)
  55.227  {
  55.228 -    struct exec_domain *ed = current;
  55.229 +    struct vcpu *v = current;
  55.230  
  55.231      if ( timeout == 0 )
  55.232 -        rem_ac_timer(&ed->timer);
  55.233 +        rem_ac_timer(&v->timer);
  55.234      else
  55.235 -        set_ac_timer(&ed->timer, timeout);
  55.236 +        set_ac_timer(&v->timer, timeout);
  55.237  
  55.238      return 0;
  55.239  }
  55.240 @@ -331,7 +331,7 @@ long sched_ctl(struct sched_ctl_cmd *cmd
  55.241  long sched_adjdom(struct sched_adjdom_cmd *cmd)
  55.242  {
  55.243      struct domain *d;
  55.244 -    struct exec_domain *ed;
  55.245 +    struct vcpu *v;
  55.246      int cpu;
  55.247  #if NR_CPUS <=32
  55.248      unsigned long have_lock;
  55.249 @@ -354,12 +354,12 @@ long sched_adjdom(struct sched_adjdom_cm
  55.250      if ( d == NULL )
  55.251          return -ESRCH;
  55.252  
  55.253 -    /* acquire locks on all CPUs on which exec_domains of this domain run */
  55.254 +    /* acquire locks on all CPUs on which vcpus of this domain run */
  55.255      do {
  55.256          succ = 0;
  55.257          __clear_cpu_bits(have_lock);
  55.258 -        for_each_exec_domain(d, ed) {
  55.259 -            cpu = ed->processor;
  55.260 +        for_each_vcpu(d, v) {
  55.261 +            cpu = v->processor;
  55.262              if (!__get_cpu_bit(cpu, have_lock)) {
  55.263                  /* if we don't have a lock on this CPU: acquire it*/
  55.264                  if (spin_trylock(&schedule_data[cpu].schedule_lock)) {
  55.265 @@ -379,9 +379,9 @@ long sched_adjdom(struct sched_adjdom_cm
  55.266              }
  55.267          }
  55.268      } while (!succ);
  55.269 -    //spin_lock_irq(&schedule_data[d->exec_domain[0]->processor].schedule_lock);
  55.270 +    //spin_lock_irq(&schedule_data[d->vcpu[0]->processor].schedule_lock);
  55.271      SCHED_OP(adjdom, d, cmd);
  55.272 -    //spin_unlock_irq(&schedule_data[d->exec_domain[0]->processor].schedule_lock);
  55.273 +    //spin_unlock_irq(&schedule_data[d->vcpu[0]->processor].schedule_lock);
  55.274      for (cpu = 0; cpu < NR_CPUS; cpu++)
  55.275          if (__get_cpu_bit(cpu, have_lock))
  55.276              spin_unlock(&schedule_data[cpu].schedule_lock);
  55.277 @@ -399,7 +399,7 @@ long sched_adjdom(struct sched_adjdom_cm
  55.278   */
  55.279  static void __enter_scheduler(void)
  55.280  {
  55.281 -    struct exec_domain *prev = current, *next = NULL;
  55.282 +    struct vcpu *prev = current, *next = NULL;
  55.283      int                 cpu = prev->processor;
  55.284      s_time_t            now;
  55.285      struct task_slice   next_slice;
  55.286 @@ -477,7 +477,7 @@ static void __enter_scheduler(void)
  55.287  /* No locking needed -- pointer comparison is safe :-) */
  55.288  int idle_cpu(int cpu)
  55.289  {
  55.290 -    struct exec_domain *p = schedule_data[cpu].curr;
  55.291 +    struct vcpu *p = schedule_data[cpu].curr;
  55.292      return p == idle_task[cpu];
  55.293  }
  55.294  
  55.295 @@ -499,15 +499,15 @@ static void s_timer_fn(void *unused)
  55.296  /* Periodic tick timer: send timer event to current domain */
  55.297  static void t_timer_fn(void *unused)
  55.298  {
  55.299 -    struct exec_domain *ed  = current;
  55.300 -    unsigned int        cpu = ed->processor;
  55.301 +    struct vcpu  *v  = current;
  55.302 +    unsigned int  cpu = v->processor;
  55.303  
  55.304      schedule_data[cpu].tick++;
  55.305  
  55.306 -    if ( !is_idle_task(ed->domain) )
  55.307 +    if ( !is_idle_task(v->domain) )
  55.308      {
  55.309 -        update_dom_time(ed);
  55.310 -        send_guest_virq(ed, VIRQ_TIMER);
  55.311 +        update_dom_time(v);
  55.312 +        send_guest_virq(v, VIRQ_TIMER);
  55.313      }
  55.314  
  55.315      page_scrub_schedule_work();
  55.316 @@ -518,10 +518,10 @@ static void t_timer_fn(void *unused)
  55.317  /* Domain timer function, sends a virtual timer interrupt to domain */
  55.318  static void dom_timer_fn(void *data)
  55.319  {
  55.320 -    struct exec_domain *ed = data;
  55.321 +    struct vcpu *v = data;
  55.322  
  55.323 -    update_dom_time(ed);
  55.324 -    send_guest_virq(ed, VIRQ_TIMER);
  55.325 +    update_dom_time(v);
  55.326 +    send_guest_virq(v, VIRQ_TIMER);
  55.327  }
  55.328  
  55.329  /* Initialise the data structures. */
    56.1 --- a/xen/drivers/char/console.c	Thu Jun 02 19:19:24 2005 +0000
    56.2 +++ b/xen/drivers/char/console.c	Thu Jun 02 21:05:33 2005 +0000
    56.3 @@ -269,7 +269,7 @@ static void __serial_rx(char c, struct c
    56.4      if ( (serial_rx_prod-serial_rx_cons) != SERIAL_RX_SIZE )
    56.5          serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod++)] = c;
    56.6      /* Always notify the guest: prevents receive path from getting stuck. */
    56.7 -    send_guest_virq(dom0->exec_domain[0], VIRQ_CONSOLE);
    56.8 +    send_guest_virq(dom0->vcpu[0], VIRQ_CONSOLE);
    56.9  }
   56.10  
   56.11  static void serial_rx(char c, struct cpu_user_regs *regs)
    57.1 --- a/xen/include/asm-ia64/config.h	Thu Jun 02 19:19:24 2005 +0000
    57.2 +++ b/xen/include/asm-ia64/config.h	Thu Jun 02 21:05:33 2005 +0000
    57.3 @@ -107,13 +107,13 @@ struct page;
    57.4  
    57.5  // initial task has a different name in Xen
    57.6  //#define	idle0_task	init_task
    57.7 -#define	idle0_exec_domain	init_task
    57.8 +#define	idle0_vcpu	init_task
    57.9  
   57.10  // avoid redefining task_t in asm/thread_info.h
   57.11  #define task_t	struct domain
   57.12  
   57.13  // avoid redefining task_struct in asm/current.h
   57.14 -#define task_struct exec_domain
   57.15 +#define task_struct vcpu
   57.16  
   57.17  // linux/include/asm-ia64/machvec.h (linux/arch/ia64/lib/io.c)
   57.18  #define platform_inb	__ia64_inb
    58.1 --- a/xen/include/asm-ia64/domain.h	Thu Jun 02 19:19:24 2005 +0000
    58.2 +++ b/xen/include/asm-ia64/domain.h	Thu Jun 02 21:05:33 2005 +0000
    58.3 @@ -8,10 +8,10 @@
    58.4  #include <asm/regionreg.h>
    58.5  #endif // CONFIG_VTI
    58.6  
    58.7 -extern void arch_do_createdomain(struct exec_domain *);
    58.8 +extern void arch_do_createdomain(struct vcpu *);
    58.9  
   58.10  extern int arch_final_setup_guestos(
   58.11 -    struct exec_domain *, struct vcpu_guest_context *);
   58.12 +    struct vcpu *, struct vcpu_guest_context *);
   58.13  
   58.14  extern void domain_relinquish_resources(struct domain *);
   58.15  
   58.16 @@ -55,7 +55,7 @@ struct arch_domain {
   58.17  #define xen_vaend arch.xen_vaend
   58.18  #define shared_info_va arch.shared_info_va
   58.19  
   58.20 -struct arch_exec_domain {
   58.21 +struct arch_vcpu {
   58.22  #if 1
   58.23  	TR_ENTRY itrs[NITRS];
   58.24  	TR_ENTRY dtrs[NDTRS];
   58.25 @@ -77,7 +77,7 @@ struct arch_exec_domain {
   58.26      struct mm_struct *active_mm;
   58.27      struct thread_struct _thread;	// this must be last
   58.28  #ifdef CONFIG_VTI
   58.29 -    void (*schedule_tail) (struct exec_domain *);
   58.30 +    void (*schedule_tail) (struct vcpu *);
   58.31      struct trap_bounce trap_bounce;
   58.32      thash_cb_t *vtlb;
   58.33      //for phycial  emulation
    59.1 --- a/xen/include/asm-ia64/vcpu.h	Thu Jun 02 19:19:24 2005 +0000
    59.2 +++ b/xen/include/asm-ia64/vcpu.h	Thu Jun 02 21:05:33 2005 +0000
    59.3 @@ -10,8 +10,8 @@
    59.4  typedef	unsigned long UINT64;
    59.5  typedef	unsigned int UINT;
    59.6  typedef	int BOOLEAN;
    59.7 -struct exec_domain;
    59.8 -typedef	struct exec_domain VCPU;
    59.9 +struct vcpu;
   59.10 +typedef	struct vcpu VCPU;
   59.11  
   59.12  // NOTE: The actual VCPU structure (struct virtualcpu) is defined in
   59.13  // thread.h.  Moving it to here caused a lot of files to change, so
    60.1 --- a/xen/include/asm-ia64/vmmu.h	Thu Jun 02 19:19:24 2005 +0000
    60.2 +++ b/xen/include/asm-ia64/vmmu.h	Thu Jun 02 21:05:33 2005 +0000
    60.3 @@ -155,7 +155,7 @@ typedef u64 *(TTAG_FN)(PTA pta, u64 va, 
    60.4  typedef u64 *(GET_MFN_FN)(domid_t d, u64 gpfn, u64 pages);
    60.5  typedef void *(REM_NOTIFIER_FN)(struct hash_cb *hcb, thash_data_t *entry);
    60.6  typedef void (RECYCLE_FN)(struct hash_cb *hc, u64 para);
    60.7 -typedef rr_t (GET_RR_FN)(struct exec_domain *vcpu, u64 reg);
    60.8 +typedef rr_t (GET_RR_FN)(struct vcpu *vcpu, u64 reg);
    60.9  typedef thash_data_t *(FIND_OVERLAP_FN)(struct thash_cb *hcb, 
   60.10          u64 va, u64 ps, int rid, char cl, search_section_t s_sect);
   60.11  typedef thash_data_t *(FIND_NEXT_OVL_FN)(struct thash_cb *hcb);
   60.12 @@ -204,7 +204,7 @@ typedef struct thash_cb {
   60.13          GET_RR_FN       *get_rr_fn;
   60.14          RECYCLE_FN      *recycle_notifier;
   60.15          thash_cch_mem_t *cch_freelist;
   60.16 -        struct exec_domain *vcpu;
   60.17 +        struct vcpu *vcpu;
   60.18          PTA     pta;
   60.19          /* VTLB/VHPT common information */
   60.20          FIND_OVERLAP_FN *find_overlap;
   60.21 @@ -306,7 +306,7 @@ extern void thash_purge_entries_ex(thash
   60.22                          u64 rid, u64 va, u64 sz, 
   60.23                          search_section_t p_sect, 
   60.24                          CACHE_LINE_TYPE cl);
   60.25 -extern thash_cb_t *init_domain_tlb(struct exec_domain *d);
   60.26 +extern thash_cb_t *init_domain_tlb(struct vcpu *d);
   60.27  
   60.28  /*
   60.29   * Purge all TCs or VHPT entries including those in Hash table.
   60.30 @@ -330,8 +330,8 @@ extern thash_data_t *vtlb_lookup_ex(thas
   60.31  extern u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps);
   60.32  extern u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps);
   60.33  extern void purge_machine_tc_by_domid(domid_t domid);
   60.34 -extern void machine_tlb_insert(struct exec_domain *d, thash_data_t *tlb);
   60.35 -extern rr_t vmmu_get_rr(struct exec_domain *vcpu, u64 va);
   60.36 +extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
   60.37 +extern rr_t vmmu_get_rr(struct vcpu *vcpu, u64 va);
   60.38  
   60.39  #define   VTLB_DEBUG
   60.40  #ifdef   VTLB_DEBUG
    61.1 --- a/xen/include/asm-ia64/vmx.h	Thu Jun 02 19:19:24 2005 +0000
    61.2 +++ b/xen/include/asm-ia64/vmx.h	Thu Jun 02 21:05:33 2005 +0000
    61.3 @@ -29,10 +29,10 @@ extern unsigned int vmx_enabled;
    61.4  extern void vmx_init_env(void);
    61.5  extern void vmx_final_setup_domain(struct domain *d);
    61.6  extern void vmx_init_double_mapping_stub(void);
    61.7 -extern void vmx_save_state(struct exec_domain *ed);
    61.8 -extern void vmx_load_state(struct exec_domain *ed);
    61.9 +extern void vmx_save_state(struct vcpu *v);
   61.10 +extern void vmx_load_state(struct vcpu *v);
   61.11  extern vmx_insert_double_mapping(u64,u64,u64,u64,u64);
   61.12  extern void vmx_purge_double_mapping(u64, u64, u64);
   61.13 -extern void vmx_change_double_mapping(struct exec_domain *ed, u64 oldrr7, u64 newrr7);
   61.14 +extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
   61.15  
   61.16  #endif /* _ASM_IA64_VT_H */
    62.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Thu Jun 02 19:19:24 2005 +0000
    62.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Thu Jun 02 21:05:33 2005 +0000
    62.3 @@ -113,9 +113,9 @@ typedef struct vpd {
    62.4  
    62.5  void vmx_enter_scheduler(void);
    62.6  
    62.7 -//FIXME: Map for LID to exec_domain, Eddie
    62.8 +//FIXME: Map for LID to vcpu, Eddie
    62.9  #define	MAX_NUM_LPS		(1UL<<16)
   62.10 -extern struct exec_domain	*lid_edt[MAX_NUM_LPS];
   62.11 +extern struct vcpu	*lid_edt[MAX_NUM_LPS];
   62.12  
   62.13  struct arch_vmx_struct {
   62.14  //    struct virutal_platform_def     vmx_platform;
    63.1 --- a/xen/include/asm-x86/current.h	Thu Jun 02 19:19:24 2005 +0000
    63.2 +++ b/xen/include/asm-x86/current.h	Thu Jun 02 21:05:33 2005 +0000
    63.3 @@ -11,12 +11,12 @@
    63.4  #include <public/xen.h>
    63.5  #include <asm/page.h>
    63.6  
    63.7 -struct exec_domain;
    63.8 +struct vcpu;
    63.9  
   63.10  struct cpu_info {
   63.11      struct cpu_user_regs guest_cpu_user_regs;
   63.12      unsigned int         processor_id;
   63.13 -    struct exec_domain  *current_ed;
   63.14 +    struct vcpu  *current_ed;
   63.15  };
   63.16  
   63.17  static inline struct cpu_info *get_cpu_info(void)
    64.1 --- a/xen/include/asm-x86/debugger.h	Thu Jun 02 19:19:24 2005 +0000
    64.2 +++ b/xen/include/asm-x86/debugger.h	Thu Jun 02 21:05:33 2005 +0000
    64.3 @@ -60,7 +60,7 @@ static inline int debugger_trap_fatal(
    64.4  static inline int debugger_trap_entry(
    64.5      unsigned int vector, struct cpu_user_regs *regs)
    64.6  {
    64.7 -    struct exec_domain *ed = current;
    64.8 +    struct vcpu *v = current;
    64.9  
   64.10      if ( !KERNEL_MODE(ed, regs) || (ed->domain->domain_id == 0) )
   64.11          return 0;
    65.1 --- a/xen/include/asm-x86/domain.h	Thu Jun 02 19:19:24 2005 +0000
    65.2 +++ b/xen/include/asm-x86/domain.h	Thu Jun 02 21:05:33 2005 +0000
    65.3 @@ -66,13 +66,13 @@ struct arch_domain
    65.4  
    65.5  } __cacheline_aligned;
    65.6  
    65.7 -struct arch_exec_domain
    65.8 +struct arch_vcpu
    65.9  {
   65.10      struct vcpu_guest_context guest_context;
   65.11  
   65.12      unsigned long      flags; /* TF_ */
   65.13  
   65.14 -    void (*schedule_tail) (struct exec_domain *);
   65.15 +    void (*schedule_tail) (struct vcpu *);
   65.16  
   65.17      /* Bounce information for propagating an exception to guest OS. */
   65.18      struct trap_bounce trap_bounce;
    66.1 --- a/xen/include/asm-x86/i387.h	Thu Jun 02 19:19:24 2005 +0000
    66.2 +++ b/xen/include/asm-x86/i387.h	Thu Jun 02 21:05:33 2005 +0000
    66.3 @@ -15,8 +15,8 @@
    66.4  #include <asm/processor.h>
    66.5  
    66.6  extern void init_fpu(void);
    66.7 -extern void save_init_fpu(struct exec_domain *tsk);
    66.8 -extern void restore_fpu(struct exec_domain *tsk);
    66.9 +extern void save_init_fpu(struct vcpu *tsk);
   66.10 +extern void restore_fpu(struct vcpu *tsk);
   66.11  
   66.12  #define unlazy_fpu(_tsk) do { \
   66.13      if ( test_bit(_VCPUF_fpu_dirtied, &(_tsk)->vcpu_flags) ) \
   66.14 @@ -29,12 +29,12 @@ extern void restore_fpu(struct exec_doma
   66.15  } while ( 0 )
   66.16  
   66.17  /* Make domain the FPU owner */
   66.18 -static inline void setup_fpu(struct exec_domain *ed)
   66.19 +static inline void setup_fpu(struct vcpu *v)
   66.20  {
   66.21 -    if ( !test_and_set_bit(_VCPUF_fpu_dirtied, &ed->vcpu_flags) )
   66.22 +    if ( !test_and_set_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
   66.23      {
   66.24 -        if ( test_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags) )
   66.25 -            restore_fpu(ed);
   66.26 +        if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
   66.27 +            restore_fpu(v);
   66.28          else
   66.29              init_fpu();
   66.30      }
    67.1 --- a/xen/include/asm-x86/ldt.h	Thu Jun 02 19:19:24 2005 +0000
    67.2 +++ b/xen/include/asm-x86/ldt.h	Thu Jun 02 21:05:33 2005 +0000
    67.3 @@ -4,13 +4,13 @@
    67.4  
    67.5  #ifndef __ASSEMBLY__
    67.6  
    67.7 -static inline void load_LDT(struct exec_domain *ed)
    67.8 +static inline void load_LDT(struct vcpu *v)
    67.9  {
   67.10      unsigned int cpu;
   67.11      struct desc_struct *desc;
   67.12      unsigned long ents;
   67.13  
   67.14 -    if ( (ents = ed->arch.guest_context.ldt_ents) == 0 )
   67.15 +    if ( (ents = v->arch.guest_context.ldt_ents) == 0 )
   67.16      {
   67.17          __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
   67.18      }
   67.19 @@ -18,9 +18,9 @@ static inline void load_LDT(struct exec_
   67.20      {
   67.21          cpu = smp_processor_id();
   67.22          desc = gdt_table + __LDT(cpu) - FIRST_RESERVED_GDT_ENTRY;
   67.23 -        desc->a = ((LDT_VIRT_START(ed)&0xffff)<<16) | (ents*8-1);
   67.24 -        desc->b = (LDT_VIRT_START(ed)&(0xff<<24)) | 0x8200 |
   67.25 -            ((LDT_VIRT_START(ed)&0xff0000)>>16);
   67.26 +        desc->a = ((LDT_VIRT_START(v)&0xffff)<<16) | (ents*8-1);
   67.27 +        desc->b = (LDT_VIRT_START(v)&(0xff<<24)) | 0x8200 |
   67.28 +            ((LDT_VIRT_START(v)&0xff0000)>>16);
   67.29          __asm__ __volatile__ ( "lldt %%ax" : : "a" (__LDT(cpu)<<3) );
   67.30      }
   67.31  }
    68.1 --- a/xen/include/asm-x86/mm.h	Thu Jun 02 19:19:24 2005 +0000
    68.2 +++ b/xen/include/asm-x86/mm.h	Thu Jun 02 21:05:33 2005 +0000
    68.3 @@ -146,7 +146,7 @@ void init_frametable(void);
    68.4  
    68.5  int alloc_page_type(struct pfn_info *page, unsigned int type);
    68.6  void free_page_type(struct pfn_info *page, unsigned int type);
    68.7 -extern void invalidate_shadow_ldt(struct exec_domain *d);
    68.8 +extern void invalidate_shadow_ldt(struct vcpu *d);
    68.9  extern int shadow_remove_all_write_access(
   68.10      struct domain *d, unsigned long gpfn, unsigned long gmfn);
   68.11  extern u32 shadow_remove_all_access( struct domain *d, unsigned long gmfn);
   68.12 @@ -299,7 +299,7 @@ struct ptwr_info {
   68.13      /* Info about last ptwr update batch. */
   68.14      unsigned int prev_nr_updates;
   68.15      /* Exec domain which created writable mapping. */
   68.16 -    struct exec_domain *ed;
   68.17 +    struct vcpu *vcpu;
   68.18  };
   68.19  
   68.20  #define PTWR_PT_ACTIVE 0
   68.21 @@ -348,5 +348,5 @@ void propagate_page_fault(unsigned long 
   68.22  int update_grant_va_mapping(unsigned long va,
   68.23                              l1_pgentry_t _nl1e, 
   68.24                              struct domain *d,
   68.25 -                            struct exec_domain *ed);
   68.26 +                            struct vcpu *v);
   68.27  #endif /* __ASM_X86_MM_H__ */
    69.1 --- a/xen/include/asm-x86/processor.h	Thu Jun 02 19:19:24 2005 +0000
    69.2 +++ b/xen/include/asm-x86/processor.h	Thu Jun 02 21:05:33 2005 +0000
    69.3 @@ -125,14 +125,14 @@
    69.4  #define TBF_INTERRUPT          8
    69.5  #define TBF_FAILSAFE          16
    69.6  
    69.7 -/* 'arch_exec_domain' flags values */
    69.8 +/* 'arch_vcpu' flags values */
    69.9  #define _TF_kernel_mode        0
   69.10  #define TF_kernel_mode         (1<<_TF_kernel_mode)
   69.11  
   69.12  #ifndef __ASSEMBLY__
   69.13  
   69.14  struct domain;
   69.15 -struct exec_domain;
   69.16 +struct vcpu;
   69.17  
   69.18  /*
   69.19   * Default implementation of macro that returns current
   69.20 @@ -401,7 +401,7 @@ extern struct tss_struct init_tss[NR_CPU
   69.21  
   69.22  #ifdef CONFIG_X86_32
   69.23  
   69.24 -extern void init_int80_direct_trap(struct exec_domain *ed);
   69.25 +extern void init_int80_direct_trap(struct vcpu *v);
   69.26  #define set_int80_direct_trap(_ed)                  \
   69.27      (memcpy(idt_tables[(_ed)->processor] + 0x80,    \
   69.28              &((_ed)->arch.int80_desc), 8))
   69.29 @@ -415,14 +415,14 @@ extern void init_int80_direct_trap(struc
   69.30  
   69.31  extern int gpf_emulate_4gb(struct cpu_user_regs *regs);
   69.32  
   69.33 -extern void write_ptbase(struct exec_domain *ed);
   69.34 +extern void write_ptbase(struct vcpu *v);
   69.35  
   69.36 -void destroy_gdt(struct exec_domain *d);
   69.37 -long set_gdt(struct exec_domain *d, 
   69.38 +void destroy_gdt(struct vcpu *d);
   69.39 +long set_gdt(struct vcpu *d, 
   69.40               unsigned long *frames, 
   69.41               unsigned int entries);
   69.42  
   69.43 -long set_debugreg(struct exec_domain *p, int reg, unsigned long value);
   69.44 +long set_debugreg(struct vcpu *p, int reg, unsigned long value);
   69.45  
   69.46  struct microcode_header {
   69.47      unsigned int hdrver;
    70.1 --- a/xen/include/asm-x86/shadow.h	Thu Jun 02 19:19:24 2005 +0000
    70.2 +++ b/xen/include/asm-x86/shadow.h	Thu Jun 02 21:05:33 2005 +0000
    70.3 @@ -58,7 +58,7 @@
    70.4  #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
    70.5  #define __shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
    70.6       (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
    70.7 -#define shadow_linear_l2_table(_ed) ((_ed)->arch.shadow_vtable)
    70.8 +#define shadow_linear_l2_table(_v) ((_v)->arch.shadow_vtable)
    70.9  
   70.10  // easy access to the hl2 table (for translated but not external modes only)
   70.11  #define __linear_hl2_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START + \
   70.12 @@ -112,12 +112,12 @@ extern void shadow_mode_init(void);
   70.13  extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
   70.14  extern int shadow_fault(unsigned long va, struct cpu_user_regs *regs);
   70.15  extern int shadow_mode_enable(struct domain *p, unsigned int mode);
   70.16 -extern void shadow_invlpg(struct exec_domain *, unsigned long);
   70.17 +extern void shadow_invlpg(struct vcpu *, unsigned long);
   70.18  extern struct out_of_sync_entry *shadow_mark_mfn_out_of_sync(
   70.19 -    struct exec_domain *ed, unsigned long gpfn, unsigned long mfn);
   70.20 -extern void free_monitor_pagetable(struct exec_domain *ed);
   70.21 +    struct vcpu *v, unsigned long gpfn, unsigned long mfn);
   70.22 +extern void free_monitor_pagetable(struct vcpu *v);
   70.23  extern void __shadow_sync_all(struct domain *d);
   70.24 -extern int __shadow_out_of_sync(struct exec_domain *ed, unsigned long va);
   70.25 +extern int __shadow_out_of_sync(struct vcpu *v, unsigned long va);
   70.26  extern int set_p2m_entry(
   70.27      struct domain *d, unsigned long pfn, unsigned long mfn,
   70.28      struct map_dom_mem_cache *l2cache,
   70.29 @@ -142,12 +142,12 @@ extern void shadow_l4_normal_pt_update(s
   70.30  #endif
   70.31  extern int shadow_do_update_va_mapping(unsigned long va,
   70.32                                         l1_pgentry_t val,
   70.33 -                                       struct exec_domain *ed);
   70.34 +                                       struct vcpu *v);
   70.35  
   70.36  
   70.37  static inline unsigned long __shadow_status(
   70.38      struct domain *d, unsigned long gpfn, unsigned long stype);
   70.39 -static inline void update_hl2e(struct exec_domain *ed, unsigned long va);
   70.40 +static inline void update_hl2e(struct vcpu *v, unsigned long va);
   70.41  
   70.42  extern void vmx_shadow_clear_state(struct domain *);
   70.43  
   70.44 @@ -198,23 +198,23 @@ static void inline
   70.45  }
   70.46  
   70.47  static void inline
   70.48 -__shadow_sync_va(struct exec_domain *ed, unsigned long va)
   70.49 +__shadow_sync_va(struct vcpu *v, unsigned long va)
   70.50  {
   70.51 -    struct domain *d = ed->domain;
   70.52 +    struct domain *d = v->domain;
   70.53  
   70.54 -    if ( d->arch.out_of_sync && __shadow_out_of_sync(ed, va) )
   70.55 +    if ( d->arch.out_of_sync && __shadow_out_of_sync(v, va) )
   70.56      {
   70.57          perfc_incrc(shadow_sync_va);
   70.58  
   70.59          // XXX - could be smarter
   70.60          //
   70.61 -        __shadow_sync_all(ed->domain);
   70.62 +        __shadow_sync_all(v->domain);
   70.63      }
   70.64  
   70.65      // Also make sure the HL2 is up-to-date for this address.
   70.66      //
   70.67 -    if ( unlikely(shadow_mode_translate(ed->domain)) )
   70.68 -        update_hl2e(ed, va);
   70.69 +    if ( unlikely(shadow_mode_translate(v->domain)) )
   70.70 +        update_hl2e(v, va);
   70.71  }
   70.72  
   70.73  static void inline
   70.74 @@ -238,13 +238,13 @@ shadow_sync_all(struct domain *d)
   70.75  //          This probably shouldn't exist.
   70.76  //
   70.77  static void inline
   70.78 -shadow_sync_va(struct exec_domain *ed, unsigned long gva)
   70.79 +shadow_sync_va(struct vcpu *v, unsigned long gva)
   70.80  {
   70.81 -    struct domain *d = ed->domain;
   70.82 +    struct domain *d = v->domain;
   70.83      if ( unlikely(shadow_mode_enabled(d)) )
   70.84      {
   70.85          shadow_lock(d);
   70.86 -        __shadow_sync_va(ed, gva);
   70.87 +        __shadow_sync_va(v, gva);
   70.88          shadow_unlock(d);
   70.89      }
   70.90  }
   70.91 @@ -505,56 +505,56 @@ static inline int mark_dirty(struct doma
   70.92  
   70.93  static inline void
   70.94  __shadow_get_l2e(
   70.95 -    struct exec_domain *ed, unsigned long va, l2_pgentry_t *psl2e)
   70.96 +    struct vcpu *v, unsigned long va, l2_pgentry_t *psl2e)
   70.97  {
   70.98 -    ASSERT(shadow_mode_enabled(ed->domain));
   70.99 +    ASSERT(shadow_mode_enabled(v->domain));
  70.100  
  70.101 -    *psl2e = ed->arch.shadow_vtable[l2_table_offset(va)];
  70.102 +    *psl2e = v->arch.shadow_vtable[l2_table_offset(va)];
  70.103  }
  70.104  
  70.105  static inline void
  70.106  __shadow_set_l2e(
  70.107 -    struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
  70.108 +    struct vcpu *v, unsigned long va, l2_pgentry_t value)
  70.109  {
  70.110 -    ASSERT(shadow_mode_enabled(ed->domain));
  70.111 +    ASSERT(shadow_mode_enabled(v->domain));
  70.112  
  70.113 -    ed->arch.shadow_vtable[l2_table_offset(va)] = value;
  70.114 +    v->arch.shadow_vtable[l2_table_offset(va)] = value;
  70.115  }
  70.116  
  70.117  static inline void
  70.118  __guest_get_l2e(
  70.119 -    struct exec_domain *ed, unsigned long va, l2_pgentry_t *pl2e)
  70.120 +    struct vcpu *v, unsigned long va, l2_pgentry_t *pl2e)
  70.121  {
  70.122 -    *pl2e = ed->arch.guest_vtable[l2_table_offset(va)];
  70.123 +    *pl2e = v->arch.guest_vtable[l2_table_offset(va)];
  70.124  }
  70.125  
  70.126  static inline void
  70.127  __guest_set_l2e(
  70.128 -    struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
  70.129 +    struct vcpu *v, unsigned long va, l2_pgentry_t value)
  70.130  {
  70.131 -    struct domain *d = ed->domain;
  70.132 +    struct domain *d = v->domain;
  70.133  
  70.134 -    ed->arch.guest_vtable[l2_table_offset(va)] = value;
  70.135 +    v->arch.guest_vtable[l2_table_offset(va)] = value;
  70.136  
  70.137      if ( unlikely(shadow_mode_translate(d)) )
  70.138 -        update_hl2e(ed, va);
  70.139 +        update_hl2e(v, va);
  70.140  
  70.141      if ( unlikely(shadow_mode_log_dirty(d)) )
  70.142 -        __mark_dirty(d, pagetable_get_pfn(ed->arch.guest_table));
  70.143 +        __mark_dirty(d, pagetable_get_pfn(v->arch.guest_table));
  70.144  }
  70.145  
  70.146  static inline void
  70.147 -update_hl2e(struct exec_domain *ed, unsigned long va)
  70.148 +update_hl2e(struct vcpu *v, unsigned long va)
  70.149  {
  70.150      int index = l2_table_offset(va);
  70.151      unsigned long mfn;
  70.152 -    l2_pgentry_t gl2e = ed->arch.guest_vtable[index];
  70.153 +    l2_pgentry_t gl2e = v->arch.guest_vtable[index];
  70.154      l1_pgentry_t old_hl2e, new_hl2e;
  70.155      int need_flush = 0;
  70.156  
  70.157 -    ASSERT(shadow_mode_translate(ed->domain));
  70.158 +    ASSERT(shadow_mode_translate(v->domain));
  70.159  
  70.160 -    old_hl2e = ed->arch.hl2_vtable[index];
  70.161 +    old_hl2e = v->arch.hl2_vtable[index];
  70.162  
  70.163      if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) &&
  70.164           VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e))) )
  70.165 @@ -567,16 +567,16 @@ update_hl2e(struct exec_domain *ed, unsi
  70.166      if ( (l1e_has_changed(old_hl2e, new_hl2e, PAGE_FLAG_MASK)) )
  70.167      {
  70.168          if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
  70.169 -             !shadow_get_page(ed->domain, pfn_to_page(l1e_get_pfn(new_hl2e)),
  70.170 -                              ed->domain) )
  70.171 +             !shadow_get_page(v->domain, pfn_to_page(l1e_get_pfn(new_hl2e)),
  70.172 +                              v->domain) )
  70.173              new_hl2e = l1e_empty();
  70.174          if ( l1e_get_flags(old_hl2e) & _PAGE_PRESENT )
  70.175          {
  70.176 -            shadow_put_page(ed->domain, pfn_to_page(l1e_get_pfn(old_hl2e)));
  70.177 +            shadow_put_page(v->domain, pfn_to_page(l1e_get_pfn(old_hl2e)));
  70.178              need_flush = 1;
  70.179          }
  70.180  
  70.181 -        ed->arch.hl2_vtable[l2_table_offset(va)] = new_hl2e;
  70.182 +        v->arch.hl2_vtable[l2_table_offset(va)] = new_hl2e;
  70.183  
  70.184          if ( need_flush )
  70.185          {
  70.186 @@ -712,14 +712,14 @@ shadow_unpin(unsigned long smfn)
  70.187  /************************************************************************/
  70.188  
  70.189  extern void shadow_mark_va_out_of_sync(
  70.190 -    struct exec_domain *ed, unsigned long gpfn, unsigned long mfn,
  70.191 +    struct vcpu *v, unsigned long gpfn, unsigned long mfn,
  70.192      unsigned long va);
  70.193  
  70.194  static inline int l1pte_write_fault(
  70.195 -    struct exec_domain *ed, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p,
  70.196 +    struct vcpu *v, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p,
  70.197      unsigned long va)
  70.198  {
  70.199 -    struct domain *d = ed->domain;
  70.200 +    struct domain *d = v->domain;
  70.201      l1_pgentry_t gpte = *gpte_p;
  70.202      l1_pgentry_t spte;
  70.203      unsigned long gpfn = l1e_get_pfn(gpte);
  70.204 @@ -745,7 +745,7 @@ static inline int l1pte_write_fault(
  70.205          __mark_dirty(d, gmfn);
  70.206  
  70.207      if ( mfn_is_page_table(gmfn) )
  70.208 -        shadow_mark_va_out_of_sync(ed, gpfn, gmfn, va);
  70.209 +        shadow_mark_va_out_of_sync(v, gpfn, gmfn, va);
  70.210  
  70.211      *gpte_p = gpte;
  70.212      *spte_p = spte;
  70.213 @@ -1541,11 +1541,11 @@ extern void shadow_map_l1_into_current_l
  70.214  void static inline
  70.215  shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
  70.216  {
  70.217 -    struct exec_domain *ed = current;
  70.218 -    struct domain *d = ed->domain;
  70.219 +    struct vcpu *v = current;
  70.220 +    struct domain *d = v->domain;
  70.221      l2_pgentry_t sl2e;
  70.222  
  70.223 -    __shadow_get_l2e(ed, va, &sl2e);
  70.224 +    __shadow_get_l2e(v, va, &sl2e);
  70.225      if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
  70.226      {
  70.227          /*
  70.228 @@ -1559,7 +1559,7 @@ shadow_set_l1e(unsigned long va, l1_pgen
  70.229          }
  70.230          else /* check to see if it exists; if so, link it in */
  70.231          {
  70.232 -            l2_pgentry_t gpde = linear_l2_table(ed)[l2_table_offset(va)];
  70.233 +            l2_pgentry_t gpde = linear_l2_table(v)[l2_table_offset(va)];
  70.234              unsigned long gl1pfn = l2e_get_pfn(gpde);
  70.235              unsigned long sl1mfn = __shadow_status(d, gl1pfn, PGT_l1_shadow);
  70.236  
  70.237 @@ -1571,8 +1571,8 @@ shadow_set_l1e(unsigned long va, l1_pgen
  70.238                  if ( !get_shadow_ref(sl1mfn) )
  70.239                      BUG();
  70.240                  l2pde_general(d, &gpde, &sl2e, sl1mfn);
  70.241 -                __guest_set_l2e(ed, va, gpde);
  70.242 -                __shadow_set_l2e(ed, va, sl2e);
  70.243 +                __guest_set_l2e(v, va, gpde);
  70.244 +                __shadow_set_l2e(v, va, sl2e);
  70.245              }
  70.246              else
  70.247              {
  70.248 @@ -1631,18 +1631,18 @@ static inline l1_pgentry_t gva_to_gpte(u
  70.249  {
  70.250      l2_pgentry_t gpde;
  70.251      l1_pgentry_t gpte;
  70.252 -    struct exec_domain *ed = current;
  70.253 +    struct vcpu *v = current;
  70.254  
  70.255      ASSERT( shadow_mode_translate(current->domain) );
  70.256  
  70.257 -    __guest_get_l2e(ed, gva, &gpde);
  70.258 +    __guest_get_l2e(v, gva, &gpde);
  70.259      if ( unlikely(!(l2e_get_flags(gpde) & _PAGE_PRESENT)) )
  70.260          return l1e_empty();;
  70.261  
  70.262      // This is actually overkill - we only need to make sure the hl2
  70.263      // is in-sync.
  70.264      //
  70.265 -    shadow_sync_va(ed, gva);
  70.266 +    shadow_sync_va(v, gva);
  70.267  
  70.268      if ( unlikely(__copy_from_user(&gpte,
  70.269                                     &linear_pg_table[gva >> PAGE_SHIFT],
  70.270 @@ -1668,22 +1668,22 @@ static inline unsigned long gva_to_gpa(u
  70.271  
  70.272  /************************************************************************/
  70.273  
  70.274 -extern void __update_pagetables(struct exec_domain *ed);
  70.275 -static inline void update_pagetables(struct exec_domain *ed)
  70.276 +extern void __update_pagetables(struct vcpu *v);
  70.277 +static inline void update_pagetables(struct vcpu *v)
  70.278  {
  70.279 -    struct domain *d = ed->domain;
  70.280 +    struct domain *d = v->domain;
  70.281      int paging_enabled;
  70.282  
  70.283  #ifdef CONFIG_VMX
  70.284 -    if ( VMX_DOMAIN(ed) )
  70.285 -        paging_enabled = vmx_paging_enabled(ed);
  70.286 +    if ( VMX_DOMAIN(v) )
  70.287 +        paging_enabled = vmx_paging_enabled(v);
  70.288              
  70.289      else
  70.290  #endif
  70.291          // HACK ALERT: there's currently no easy way to figure out if a domU
  70.292          // has set its arch.guest_table to zero, vs not yet initialized it.
  70.293          //
  70.294 -        paging_enabled = !!pagetable_get_paddr(ed->arch.guest_table);
  70.295 +        paging_enabled = !!pagetable_get_paddr(v->arch.guest_table);
  70.296  
  70.297      /*
  70.298       * We don't call __update_pagetables() when vmx guest paging is
  70.299 @@ -1694,33 +1694,33 @@ static inline void update_pagetables(str
  70.300      if ( unlikely(shadow_mode_enabled(d)) && paging_enabled )
  70.301      {
  70.302          shadow_lock(d);
  70.303 -        __update_pagetables(ed);
  70.304 +        __update_pagetables(v);
  70.305          shadow_unlock(d);
  70.306      }
  70.307  
  70.308      if ( likely(!shadow_mode_external(d)) )
  70.309      {
  70.310  #ifdef __x86_64__
  70.311 -        if ( !(ed->arch.flags & TF_kernel_mode) )
  70.312 -            ed->arch.monitor_table = ed->arch.guest_table_user;
  70.313 +        if ( !(v->arch.flags & TF_kernel_mode) )
  70.314 +            v->arch.monitor_table = v->arch.guest_table_user;
  70.315          else
  70.316  #endif
  70.317          if ( shadow_mode_enabled(d) )
  70.318 -            ed->arch.monitor_table = ed->arch.shadow_table;
  70.319 +            v->arch.monitor_table = v->arch.shadow_table;
  70.320          else
  70.321 -            ed->arch.monitor_table = ed->arch.guest_table;
  70.322 +            v->arch.monitor_table = v->arch.guest_table;
  70.323      }
  70.324  }
  70.325  
  70.326  #if SHADOW_DEBUG
  70.327 -extern int _check_pagetable(struct exec_domain *ed, char *s);
  70.328 -extern int _check_all_pagetables(struct exec_domain *ed, char *s);
  70.329 +extern int _check_pagetable(struct vcpu *v, char *s);
  70.330 +extern int _check_all_pagetables(struct vcpu *v, char *s);
  70.331  
  70.332 -#define check_pagetable(_ed, _s) _check_pagetable(_ed, _s)
  70.333 -//#define check_pagetable(_ed, _s) _check_all_pagetables(_ed, _s)
  70.334 +#define check_pagetable(_v, _s) _check_pagetable(_v, _s)
  70.335 +//#define check_pagetable(_v, _s) _check_all_pagetables(_v, _s)
  70.336  
  70.337  #else
  70.338 -#define check_pagetable(_ed, _s) ((void)0)
  70.339 +#define check_pagetable(_v, _s) ((void)0)
  70.340  #endif
  70.341  
  70.342  #endif /* XEN_SHADOW_H */
    71.1 --- a/xen/include/asm-x86/vmx.h	Thu Jun 02 19:19:24 2005 +0000
    71.2 +++ b/xen/include/asm-x86/vmx.h	Thu Jun 02 21:05:33 2005 +0000
    71.3 @@ -29,10 +29,10 @@
    71.4  extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
    71.5  extern void vmx_asm_do_resume(void);
    71.6  extern void vmx_asm_do_launch(void);
    71.7 -extern void vmx_intr_assist(struct exec_domain *d);
    71.8 +extern void vmx_intr_assist(struct vcpu *d);
    71.9  
   71.10 -extern void arch_vmx_do_launch(struct exec_domain *);
   71.11 -extern void arch_vmx_do_resume(struct exec_domain *);
   71.12 +extern void arch_vmx_do_launch(struct vcpu *);
   71.13 +extern void arch_vmx_do_resume(struct vcpu *);
   71.14  
   71.15  extern int vmcs_size;
   71.16  extern unsigned int cpu_rev;
   71.17 @@ -296,7 +296,7 @@ static inline void vmx_stts()
   71.18  }
   71.19  
   71.20  /* Works only for ed == current */
   71.21 -static inline int vmx_paging_enabled(struct exec_domain *ed)
   71.22 +static inline int vmx_paging_enabled(struct vcpu *v)
   71.23  {
   71.24      unsigned long cr0;
   71.25  
    72.1 --- a/xen/include/asm-x86/vmx_platform.h	Thu Jun 02 19:19:24 2005 +0000
    72.2 +++ b/xen/include/asm-x86/vmx_platform.h	Thu Jun 02 21:05:33 2005 +0000
    72.3 @@ -86,7 +86,7 @@ struct virutal_platform_def {
    72.4  
    72.5  extern void handle_mmio(unsigned long, unsigned long);
    72.6  extern void vmx_wait_io(void);
    72.7 -extern int vmx_setup_platform(struct exec_domain *, struct cpu_user_regs *);
    72.8 +extern int vmx_setup_platform(struct vcpu *, struct cpu_user_regs *);
    72.9  
   72.10  // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame.
   72.11  #define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT)))
    73.1 --- a/xen/include/asm-x86/vmx_virpit.h	Thu Jun 02 19:19:24 2005 +0000
    73.2 +++ b/xen/include/asm-x86/vmx_virpit.h	Thu Jun 02 21:05:33 2005 +0000
    73.3 @@ -37,6 +37,6 @@ struct vmx_virpit_t {
    73.4  } ;
    73.5  
    73.6  /* to hook the ioreq packet to get the PIT initializaiton info */
    73.7 -extern void vmx_hooks_assist(struct exec_domain *d);
    73.8 +extern void vmx_hooks_assist(struct vcpu *d);
    73.9  
   73.10  #endif /* _VMX_VIRPIT_H_ */
    74.1 --- a/xen/include/asm-x86/vmx_vmcs.h	Thu Jun 02 19:19:24 2005 +0000
    74.2 +++ b/xen/include/asm-x86/vmx_vmcs.h	Thu Jun 02 21:05:33 2005 +0000
    74.3 @@ -55,8 +55,8 @@ struct arch_vmx_struct {
    74.4  #define ARCH_VMX_VMCS_RESUME    2       /* Needs VMCS resume */
    74.5  #define ARCH_VMX_IO_WAIT        3       /* Waiting for I/O completion */
    74.6  
    74.7 -void vmx_do_launch(struct exec_domain *); 
    74.8 -void vmx_do_resume(struct exec_domain *); 
    74.9 +void vmx_do_launch(struct vcpu *); 
   74.10 +void vmx_do_resume(struct vcpu *); 
   74.11  
   74.12  struct vmcs_struct *alloc_vmcs(void);
   74.13  void free_vmcs(struct vmcs_struct *);
    75.1 --- a/xen/include/public/event_channel.h	Thu Jun 02 19:19:24 2005 +0000
    75.2 +++ b/xen/include/public/event_channel.h	Thu Jun 02 21:05:33 2005 +0000
    75.3 @@ -141,7 +141,7 @@ typedef struct {
    75.4          } PACKED interdomain; /* EVTCHNSTAT_interdomain */
    75.5          u32 pirq;      /* EVTCHNSTAT_pirq        */   /* 12 */
    75.6          u32 virq;      /* EVTCHNSTAT_virq        */   /* 12 */
    75.7 -        u32 ipi_edom;  /* EVTCHNSTAT_ipi         */   /* 12 */
    75.8 +        u32 ipi_vcpu;  /* EVTCHNSTAT_ipi         */   /* 12 */
    75.9      } PACKED u;
   75.10  } PACKED evtchn_status_t; /* 20 bytes */
   75.11  
   75.12 @@ -151,7 +151,7 @@ typedef struct {
   75.13  #define EVTCHNOP_bind_ipi         7
   75.14  typedef struct {
   75.15      /* IN parameters. */
   75.16 -    u32 ipi_edom;                     /*  0 */
   75.17 +    u32 ipi_vcpu;                     /*  0 */
   75.18      /* OUT parameters. */
   75.19      u32 port;                         /*  4 */
   75.20  } PACKED evtchn_bind_ipi_t; /* 8 bytes */
    76.1 --- a/xen/include/xen/domain.h	Thu Jun 02 19:19:24 2005 +0000
    76.2 +++ b/xen/include/xen/domain.h	Thu Jun 02 21:05:33 2005 +0000
    76.3 @@ -6,16 +6,16 @@
    76.4   * Arch-specifics.
    76.5   */
    76.6  
    76.7 -struct exec_domain *arch_alloc_exec_domain_struct(void);
    76.8 +struct vcpu *arch_alloc_vcpu_struct(void);
    76.9  
   76.10 -extern void arch_free_exec_domain_struct(struct exec_domain *ed);
   76.11 +extern void arch_free_vcpu_struct(struct vcpu *v);
   76.12  
   76.13 -extern void arch_do_createdomain(struct exec_domain *ed);
   76.14 +extern void arch_do_createdomain(struct vcpu *v);
   76.15  
   76.16 -extern void arch_do_boot_vcpu(struct exec_domain *ed);
   76.17 +extern void arch_do_boot_vcpu(struct vcpu *v);
   76.18  
   76.19  extern int  arch_set_info_guest(
   76.20 -    struct exec_domain *d, struct vcpu_guest_context *c);
   76.21 +    struct vcpu *d, struct vcpu_guest_context *c);
   76.22  
   76.23  extern void free_perdomain_pt(struct domain *d);
   76.24  
    77.1 --- a/xen/include/xen/event.h	Thu Jun 02 19:19:24 2005 +0000
    77.2 +++ b/xen/include/xen/event.h	Thu Jun 02 21:05:33 2005 +0000
    77.3 @@ -20,19 +20,19 @@
    77.4   * may require explicit memory barriers.
    77.5   */
    77.6  
    77.7 -static inline void evtchn_set_pending(struct exec_domain *ed, int port)
    77.8 +static inline void evtchn_set_pending(struct vcpu *v, int port)
    77.9  {
   77.10 -    struct domain *d = ed->domain;
   77.11 +    struct domain *d = v->domain;
   77.12      shared_info_t *s = d->shared_info;
   77.13      int            running;
   77.14  
   77.15      /* These three operations must happen in strict order. */
   77.16      if ( !test_and_set_bit(port,    &s->evtchn_pending[0]) &&
   77.17           !test_bit        (port,    &s->evtchn_mask[0])    &&
   77.18 -         !test_and_set_bit(port>>5, &ed->vcpu_info->evtchn_pending_sel) )
   77.19 +         !test_and_set_bit(port>>5, &v->vcpu_info->evtchn_pending_sel) )
   77.20      {
   77.21          /* The VCPU pending flag must be set /after/ update to evtchn-pend. */
   77.22 -        set_bit(0, &ed->vcpu_info->evtchn_upcall_pending);
   77.23 +        set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
   77.24  
   77.25          /*
   77.26           * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
   77.27 @@ -43,10 +43,10 @@ static inline void evtchn_set_pending(st
   77.28           * NB2. We save VCPUF_running across the unblock to avoid a needless
   77.29           * IPI for domains that we IPI'd to unblock.
   77.30           */
   77.31 -        running = test_bit(_VCPUF_running, &ed->vcpu_flags);
   77.32 -        exec_domain_unblock(ed);
   77.33 +        running = test_bit(_VCPUF_running, &v->vcpu_flags);
   77.34 +        vcpu_unblock(v);
   77.35          if ( running )
   77.36 -            smp_send_event_check_cpu(ed->processor);
   77.37 +            smp_send_event_check_cpu(v->processor);
   77.38      }
   77.39  }
   77.40  
   77.41 @@ -55,12 +55,12 @@ static inline void evtchn_set_pending(st
   77.42   *  @d:        Domain to which virtual IRQ should be sent
   77.43   *  @virq:     Virtual IRQ number (VIRQ_*)
   77.44   */
   77.45 -static inline void send_guest_virq(struct exec_domain *ed, int virq)
   77.46 +static inline void send_guest_virq(struct vcpu *v, int virq)
   77.47  {
   77.48 -    int port = ed->virq_to_evtchn[virq];
   77.49 +    int port = v->virq_to_evtchn[virq];
   77.50  
   77.51      if ( likely(port != 0) )
   77.52 -        evtchn_set_pending(ed, port);
   77.53 +        evtchn_set_pending(v, port);
   77.54  }
   77.55  
   77.56  /*
   77.57 @@ -68,9 +68,9 @@ static inline void send_guest_virq(struc
   77.58   *  @d:        Domain to which physical IRQ should be sent
   77.59   *  @pirq:     Physical IRQ number
   77.60   */
   77.61 -static inline void send_guest_pirq(struct exec_domain *ed, int pirq)
   77.62 +static inline void send_guest_pirq(struct vcpu *v, int pirq)
   77.63  {
   77.64 -    evtchn_set_pending(ed, ed->domain->pirq_to_evtchn[pirq]);
   77.65 +    evtchn_set_pending(v, v->domain->pirq_to_evtchn[pirq]);
   77.66  }
   77.67  
   77.68  #define event_pending(_d)                                     \
    78.1 --- a/xen/include/xen/irq.h	Thu Jun 02 19:19:24 2005 +0000
    78.2 +++ b/xen/include/xen/irq.h	Thu Jun 02 21:05:33 2005 +0000
    78.3 @@ -67,9 +67,9 @@ extern hw_irq_controller no_irq_type;
    78.4  extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
    78.5  
    78.6  struct domain;
    78.7 -struct exec_domain;
    78.8 +struct vcpu;
    78.9  extern int pirq_guest_unmask(struct domain *p);
   78.10 -extern int pirq_guest_bind(struct exec_domain *p, int irq, int will_share);
   78.11 +extern int pirq_guest_bind(struct vcpu *p, int irq, int will_share);
   78.12  extern int pirq_guest_unbind(struct domain *p, int irq);
   78.13  extern int pirq_guest_bindable(int irq, int will_share);
   78.14  
    79.1 --- a/xen/include/xen/sched-if.h	Thu Jun 02 19:19:24 2005 +0000
    79.2 +++ b/xen/include/xen/sched-if.h	Thu Jun 02 21:05:33 2005 +0000
    79.3 @@ -13,8 +13,8 @@
    79.4  
    79.5  struct schedule_data {
    79.6      spinlock_t          schedule_lock;  /* spinlock protecting curr        */
    79.7 -    struct exec_domain *curr;           /* current task                    */
    79.8 -    struct exec_domain *idle;           /* idle task for this cpu          */
    79.9 +    struct vcpu *curr;           /* current task                    */
   79.10 +    struct vcpu *idle;           /* idle task for this cpu          */
   79.11      void               *sched_priv;
   79.12      struct ac_timer     s_timer;        /* scheduling timer                */
   79.13      unsigned long       tick;           /* current periodic 'tick'         */
   79.14 @@ -24,7 +24,7 @@ struct schedule_data {
   79.15  } __cacheline_aligned;
   79.16  
   79.17  struct task_slice {
   79.18 -    struct exec_domain *task;
   79.19 +    struct vcpu *task;
   79.20      s_time_t            time;
   79.21  };
   79.22  
   79.23 @@ -33,12 +33,12 @@ struct scheduler {
   79.24      char *opt_name;         /* option name for this scheduler    */
   79.25      unsigned int sched_id;  /* ID for this scheduler             */
   79.26  
   79.27 -    int          (*alloc_task)     (struct exec_domain *);
   79.28 -    void         (*add_task)       (struct exec_domain *);
   79.29 +    int          (*alloc_task)     (struct vcpu *);
   79.30 +    void         (*add_task)       (struct vcpu *);
   79.31      void         (*free_task)      (struct domain *);
   79.32 -    void         (*rem_task)       (struct exec_domain *);
   79.33 -    void         (*sleep)          (struct exec_domain *);
   79.34 -    void         (*wake)           (struct exec_domain *);
   79.35 +    void         (*rem_task)       (struct vcpu *);
   79.36 +    void         (*sleep)          (struct vcpu *);
   79.37 +    void         (*wake)           (struct vcpu *);
   79.38      struct task_slice (*do_schedule) (s_time_t);
   79.39      int          (*control)        (struct sched_ctl_cmd *);
   79.40      int          (*adjdom)         (struct domain *,
    80.1 --- a/xen/include/xen/sched.h	Thu Jun 02 19:19:24 2005 +0000
    80.2 +++ b/xen/include/xen/sched.h	Thu Jun 02 21:05:33 2005 +0000
    80.3 @@ -46,21 +46,21 @@ typedef struct event_channel_st
    80.4          } __attribute__ ((packed)) unbound; /* state == ECS_UNBOUND */
    80.5          struct {
    80.6              u16                 remote_port;
    80.7 -            struct exec_domain *remote_dom;
    80.8 +            struct vcpu *remote_dom;
    80.9          } __attribute__ ((packed)) interdomain; /* state == ECS_INTERDOMAIN */
   80.10          u16 pirq; /* state == ECS_PIRQ */
   80.11          u16 virq; /* state == ECS_VIRQ */
   80.12 -        u32 ipi_edom; /* state == ECS_IPI */
   80.13 +        u32 ipi_vcpu; /* state == ECS_IPI */
   80.14      } u;
   80.15  } event_channel_t;
   80.16  
   80.17  int  init_event_channels(struct domain *d);
   80.18  void destroy_event_channels(struct domain *d);
   80.19 -int  init_exec_domain_event_channels(struct exec_domain *ed);
   80.20 +int  init_vcpu_event_channels(struct vcpu *v);
   80.21  
   80.22  #define CPUMAP_RUNANYWHERE 0xFFFFFFFF
   80.23  
   80.24 -struct exec_domain 
   80.25 +struct vcpu 
   80.26  {
   80.27      int              vcpu_id;
   80.28  
   80.29 @@ -69,7 +69,7 @@ struct exec_domain
   80.30      vcpu_info_t     *vcpu_info;
   80.31  
   80.32      struct domain   *domain;
   80.33 -    struct exec_domain *next_in_list;
   80.34 +    struct vcpu *next_in_list;
   80.35  
   80.36      struct ac_timer  timer;         /* one-shot timer for timeout values */
   80.37      unsigned long    sleep_tick;    /* tick at which this vcpu started sleep */
   80.38 @@ -88,7 +88,7 @@ struct exec_domain
   80.39  
   80.40      cpumap_t         cpumap;        /* which cpus this domain can run on */
   80.41  
   80.42 -    struct arch_exec_domain arch;
   80.43 +    struct arch_vcpu arch;
   80.44  };
   80.45  
   80.46  /* Per-domain lock can be recursively acquired in fault handlers. */
   80.47 @@ -140,7 +140,7 @@ struct domain
   80.48  
   80.49      atomic_t         refcnt;
   80.50  
   80.51 -    struct exec_domain *exec_domain[MAX_VIRT_CPUS];
   80.52 +    struct vcpu *vcpu[MAX_VIRT_CPUS];
   80.53  
   80.54      /* Bitmask of CPUs on which this domain is running. */
   80.55      unsigned long cpuset;
   80.56 @@ -170,13 +170,13 @@ struct domain_setup_info
   80.57  #include <asm/uaccess.h> /* for KERNEL_DS */
   80.58  
   80.59  extern struct domain idle0_domain;
   80.60 -extern struct exec_domain idle0_exec_domain;
   80.61 +extern struct vcpu idle0_vcpu;
   80.62  
   80.63 -extern struct exec_domain *idle_task[NR_CPUS];
   80.64 +extern struct vcpu *idle_task[NR_CPUS];
   80.65  #define IDLE_DOMAIN_ID   (0x7FFFU)
   80.66  #define is_idle_task(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
   80.67  
   80.68 -struct exec_domain *alloc_exec_domain_struct(struct domain *d,
   80.69 +struct vcpu *alloc_vcpu_struct(struct domain *d,
   80.70                                               unsigned long vcpu);
   80.71  
   80.72  void free_domain_struct(struct domain *d);
   80.73 @@ -241,7 +241,7 @@ extern void domain_crash(void);
   80.74   */
   80.75  extern void domain_crash_synchronous(void) __attribute__((noreturn));
   80.76  
   80.77 -void new_thread(struct exec_domain *d,
   80.78 +void new_thread(struct vcpu *d,
   80.79                  unsigned long start_pc,
   80.80                  unsigned long start_stack,
   80.81                  unsigned long start_info);
   80.82 @@ -249,14 +249,14 @@ void new_thread(struct exec_domain *d,
   80.83  #define set_current_state(_s) do { current->state = (_s); } while (0)
   80.84  void scheduler_init(void);
   80.85  void schedulers_start(void);
   80.86 -void sched_add_domain(struct exec_domain *);
   80.87 -void sched_rem_domain(struct exec_domain *);
   80.88 +void sched_add_domain(struct vcpu *);
   80.89 +void sched_rem_domain(struct vcpu *);
   80.90  long sched_ctl(struct sched_ctl_cmd *);
   80.91  long sched_adjdom(struct sched_adjdom_cmd *);
   80.92  int  sched_id();
   80.93 -void domain_wake(struct exec_domain *d);
   80.94 -void domain_sleep_nosync(struct exec_domain *d);
   80.95 -void domain_sleep_sync(struct exec_domain *d);
   80.96 +void domain_wake(struct vcpu *d);
   80.97 +void domain_sleep_nosync(struct vcpu *d);
   80.98 +void domain_sleep_sync(struct vcpu *d);
   80.99  
  80.100  /*
  80.101   * Force loading of currently-executing domain state on the specified set
  80.102 @@ -266,14 +266,14 @@ extern void sync_lazy_execstate_cpuset(u
  80.103  extern void sync_lazy_execstate_all(void);
  80.104  extern int __sync_lazy_execstate(void);
  80.105  
  80.106 -/* Called by the scheduler to switch to another exec_domain. */
  80.107 +/* Called by the scheduler to switch to another vcpu. */
  80.108  extern void context_switch(
  80.109 -    struct exec_domain *prev, 
  80.110 -    struct exec_domain *next);
  80.111 +    struct vcpu *prev, 
  80.112 +    struct vcpu *next);
  80.113  
  80.114 -/* Called by the scheduler to continue running the current exec_domain. */
  80.115 +/* Called by the scheduler to continue running the current vcpu. */
  80.116  extern void continue_running(
  80.117 -    struct exec_domain *same);
  80.118 +    struct vcpu *same);
  80.119  
  80.120  void domain_init(void);
  80.121  
  80.122 @@ -322,8 +322,8 @@ extern struct domain *domain_list;
  80.123  #define for_each_domain(_d) \
  80.124   for ( (_d) = domain_list; (_d) != NULL; (_d) = (_d)->next_in_list )
  80.125  
  80.126 -#define for_each_exec_domain(_d,_ed) \
  80.127 - for ( (_ed) = (_d)->exec_domain[0]; \
  80.128 +#define for_each_vcpu(_d,_ed) \
  80.129 + for ( (_ed) = (_d)->vcpu[0]; \
  80.130         (_ed) != NULL;                \
  80.131         (_ed) = (_ed)->next_in_list )
  80.132  
  80.133 @@ -383,24 +383,24 @@ extern struct domain *domain_list;
  80.134  #define _DOMF_dying            6
  80.135  #define DOMF_dying             (1UL<<_DOMF_dying)
  80.136  
  80.137 -static inline int domain_runnable(struct exec_domain *ed)
  80.138 +static inline int domain_runnable(struct vcpu *v)
  80.139  {
  80.140 -    return ( (atomic_read(&ed->pausecnt) == 0) &&
  80.141 -             !(ed->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause)) &&
  80.142 -             !(ed->domain->domain_flags & (DOMF_shutdown|DOMF_shuttingdown)) );
  80.143 +    return ( (atomic_read(&v->pausecnt) == 0) &&
  80.144 +             !(v->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause)) &&
  80.145 +             !(v->domain->domain_flags & (DOMF_shutdown|DOMF_shuttingdown)) );
  80.146  }
  80.147  
  80.148 -void exec_domain_pause(struct exec_domain *ed);
  80.149 +void vcpu_pause(struct vcpu *v);
  80.150  void domain_pause(struct domain *d);
  80.151 -void exec_domain_unpause(struct exec_domain *ed);
  80.152 +void vcpu_unpause(struct vcpu *v);
  80.153  void domain_unpause(struct domain *d);
  80.154  void domain_pause_by_systemcontroller(struct domain *d);
  80.155  void domain_unpause_by_systemcontroller(struct domain *d);
  80.156  
  80.157 -static inline void exec_domain_unblock(struct exec_domain *ed)
  80.158 +static inline void vcpu_unblock(struct vcpu *v)
  80.159  {
  80.160 -    if ( test_and_clear_bit(_VCPUF_blocked, &ed->vcpu_flags) )
  80.161 -        domain_wake(ed);
  80.162 +    if ( test_and_clear_bit(_VCPUF_blocked, &v->vcpu_flags) )
  80.163 +        domain_wake(v);
  80.164  }
  80.165  
  80.166  #define IS_PRIV(_d)                                         \
    81.1 --- a/xen/include/xen/time.h	Thu Jun 02 19:19:24 2005 +0000
    81.2 +++ b/xen/include/xen/time.h	Thu Jun 02 21:05:33 2005 +0000
    81.3 @@ -54,7 +54,7 @@ s_time_t get_s_time(void);
    81.4  #define MILLISECS(_ms)  (((s_time_t)(_ms)) * 1000000ULL )
    81.5  #define MICROSECS(_us)  (((s_time_t)(_us)) * 1000ULL )
    81.6  
    81.7 -extern void update_dom_time(struct exec_domain *ed);
    81.8 +extern void update_dom_time(struct vcpu *v);
    81.9  extern void do_settime(
   81.10      unsigned long secs, unsigned long usecs, u64 system_time_base);
   81.11  
    82.1 --- a/xen/include/xen/types.h	Thu Jun 02 19:19:24 2005 +0000
    82.2 +++ b/xen/include/xen/types.h	Thu Jun 02 21:05:33 2005 +0000
    82.3 @@ -51,6 +51,6 @@ typedef         __u64           uint64_t
    82.4  
    82.5  
    82.6  struct domain;
    82.7 -struct exec_domain;
    82.8 +struct vcpu;
    82.9  
   82.10  #endif /* __TYPES_H__ */