ia64/xen-unstable

changeset 5986:1ec2225aa8c6

First step to remove CONFIG_VTI for final supporting xen0+xenU+xenVTI at runtime. This changeset mainly addresses common code like domain creation and rid allocation policy, including:

- Boot time vti feature detection
- Uniform arch_do_createdomain, new_thread, arch_set_infoguest, and construct_dom0. Now these function level CONFIG_VTIs have been removed with several specific lines still protected by CONFIG_VTIs. With more feature cleanup later, these lines will be free out grandually.
- Use same rid allocation policy including physical emulation
- Remove duplicated definition rr_t.

Verified breaking nothing. ;-)

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author fred@xuni-t01.sc.intel.com
date Fri Aug 19 21:19:39 2005 -0800 (2005-08-19)
parents 97675c2dbb40
children 5f1ed597f107
files xen/arch/ia64/Makefile xen/arch/ia64/domain.c xen/arch/ia64/linux-xen/setup.c xen/arch/ia64/regionreg.c xen/arch/ia64/vcpu.c xen/arch/ia64/vmmu.c xen/arch/ia64/vmx_init.c xen/arch/ia64/vmx_phy_mode.c xen/arch/ia64/vmx_vcpu.c xen/arch/ia64/vtlb.c xen/arch/ia64/xenmem.c xen/arch/ia64/xensetup.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/linux-xen/asm/pal.h xen/include/asm-ia64/mmu_context.h xen/include/asm-ia64/privop.h xen/include/asm-ia64/regionreg.h xen/include/asm-ia64/tlb.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_vcpu.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-ia64/xenprocessor.h
line diff
     1.1 --- a/xen/arch/ia64/Makefile	Fri Aug 19 20:45:43 2005 -0800
     1.2 +++ b/xen/arch/ia64/Makefile	Fri Aug 19 21:19:39 2005 -0800
     1.3 @@ -14,8 +14,11 @@ OBJS = xensetup.o setup.o time.o irq.o i
     1.4  	irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o \
     1.5  	grant_table.o sn_console.o
     1.6  
     1.7 +# TMP holder to contain *.0 moved out of CONFIG_VTI
     1.8 +OBJS += vmx_init.o
     1.9 +
    1.10  ifeq ($(CONFIG_VTI),y)
    1.11 -OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
    1.12 +OBJS += vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\
    1.13  	vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
    1.14  	vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o pal_emul.o
    1.15  endif
     2.1 --- a/xen/arch/ia64/domain.c	Fri Aug 19 20:45:43 2005 -0800
     2.2 +++ b/xen/arch/ia64/domain.c	Fri Aug 19 21:19:39 2005 -0800
     2.3 @@ -38,25 +38,17 @@
     2.4  
     2.5  #include <asm/vcpu.h>   /* for function declarations */
     2.6  #include <public/arch-ia64.h>
     2.7 -#ifdef CONFIG_VTI
     2.8  #include <asm/vmx.h>
     2.9  #include <asm/vmx_vcpu.h>
    2.10  #include <asm/vmx_vpd.h>
    2.11  #include <asm/pal.h>
    2.12  #include <public/io/ioreq.h>
    2.13 -#endif // CONFIG_VTI
    2.14  
    2.15  #define CONFIG_DOMAIN0_CONTIGUOUS
    2.16  unsigned long dom0_start = -1L;
    2.17 -#ifdef CONFIG_VTI
    2.18  unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
    2.19  //FIXME: alignment should be 256MB, lest Linux use a 256MB page size
    2.20  unsigned long dom0_align = 256*1024*1024;
    2.21 -#else // CONFIG_VTI
    2.22 -unsigned long dom0_size = 512*1024*1024; //FIXME: Should be configurable
    2.23 -//FIXME: alignment should be 256MB, lest Linux use a 256MB page size
    2.24 -unsigned long dom0_align = 64*1024*1024;
    2.25 -#endif // CONFIG_VTI
    2.26  #ifdef DOMU_BUILD_STAGING
    2.27  unsigned long domU_staging_size = 32*1024*1024; //FIXME: Should be configurable
    2.28  unsigned long domU_staging_start;
    2.29 @@ -187,60 +179,6 @@ static void init_switch_stack(struct vcp
    2.30  	memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
    2.31  }
    2.32  
    2.33 -#ifdef CONFIG_VTI
    2.34 -void arch_do_createdomain(struct vcpu *v)
    2.35 -{
    2.36 -	struct domain *d = v->domain;
    2.37 -	struct thread_info *ti = alloc_thread_info(v);
    2.38 -
    2.39 -	/* Clear thread_info to clear some important fields, like preempt_count */
    2.40 -	memset(ti, 0, sizeof(struct thread_info));
    2.41 -	init_switch_stack(v);
    2.42 -
    2.43 - 	/* Shared info area is required to be allocated at domain
    2.44 - 	 * creation, since control panel will write some I/O info
    2.45 - 	 * between front end and back end to that area. However for
    2.46 - 	 * vmx domain, our design is to let domain itself to allcoate
    2.47 - 	 * shared info area, to keep machine page contiguous. So this
    2.48 - 	 * page will be released later when domainN issues request
    2.49 - 	 * after up.
    2.50 - 	 */
    2.51 - 	d->shared_info = (void *)alloc_xenheap_page();
    2.52 -	/* Now assume all vcpu info and event indicators can be
    2.53 -	 * held in one shared page. Definitely later we need to
    2.54 -	 * consider more about it
    2.55 -	 */
    2.56 -
    2.57 -	memset(d->shared_info, 0, PAGE_SIZE);
    2.58 -	d->shared_info->vcpu_data[v->vcpu_id].arch.privregs = 
    2.59 -			alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
    2.60 -	printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs);
    2.61 -	memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE);
    2.62 -	v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
    2.63 -	/* Mask all events, and specific port will be unmasked
    2.64 -	 * when customer subscribes to it.
    2.65 -	 */
    2.66 -	if(v == d->vcpu[0]) {
    2.67 -	    memset(&d->shared_info->evtchn_mask[0], 0xff,
    2.68 -		sizeof(d->shared_info->evtchn_mask));
    2.69 -	}
    2.70 -
    2.71 -	/* Allocate per-domain vTLB and vhpt */
    2.72 -	v->arch.vtlb = init_domain_tlb(v);
    2.73 -
    2.74 -	/* Physical->machine page table will be allocated when 
    2.75 -	 * final setup, since we have no the maximum pfn number in 
    2.76 -	 * this stage
    2.77 -	 */
    2.78 -
    2.79 -	/* FIXME: This is identity mapped address for xenheap. 
    2.80 -	 * Do we need it at all?
    2.81 -	 */
    2.82 -	d->xen_vastart = XEN_START_ADDR;
    2.83 -	d->xen_vaend = XEN_END_ADDR;
    2.84 -	d->arch.breakimm = 0x1000;
    2.85 -}
    2.86 -#else // CONFIG_VTI
    2.87  void arch_do_createdomain(struct vcpu *v)
    2.88  {
    2.89  	struct domain *d = v->domain;
    2.90 @@ -263,11 +201,26 @@ void arch_do_createdomain(struct vcpu *v
    2.91  	v->vcpu_info = &(d->shared_info->vcpu_data[0]);
    2.92  
    2.93  	d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
    2.94 -	if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
    2.95 +
    2.96 +#ifdef CONFIG_VTI
    2.97 +	/* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
    2.98 +	 * to this solution. Maybe it can be deferred until we know created
    2.99 +	 * one as vmx domain */
   2.100 +	v->arch.vtlb = init_domain_tlb(v);
   2.101 +#endif
   2.102 +
   2.103 +	/* We may also need emulation rid for region4, though it's unlikely
   2.104 +	 * to see guest issue uncacheable access in metaphysical mode. But
   2.105 +	 * keep such info here may be more sane.
   2.106 +	 */
   2.107 +	if (((d->arch.metaphysical_rr0 = allocate_metaphysical_rr()) == -1UL)
   2.108 +	 || ((d->arch.metaphysical_rr4 = allocate_metaphysical_rr()) == -1UL))
   2.109  		BUG();
   2.110  	VCPU(v, metaphysical_mode) = 1;
   2.111  	v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
   2.112 +	v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4;
   2.113  	v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
   2.114 +	v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rr4;
   2.115  #define DOMAIN_RID_BITS_DEFAULT 18
   2.116  	if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
   2.117  		BUG();
   2.118 @@ -292,7 +245,6 @@ void arch_do_createdomain(struct vcpu *v
   2.119  		return -ENOMEM;
   2.120  	}
   2.121  }
   2.122 -#endif // CONFIG_VTI
   2.123  
   2.124  void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
   2.125  {
   2.126 @@ -312,16 +264,28 @@ void arch_getdomaininfo_ctxt(struct vcpu
   2.127  	c->shared = v->domain->shared_info->arch;
   2.128  }
   2.129  
   2.130 -#ifndef CONFIG_VTI
   2.131  int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
   2.132  {
   2.133  	struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
   2.134 +	struct domain *d = v->domain;
   2.135 +	int i, rc, ret;
   2.136 +	unsigned long progress = 0;
   2.137  
   2.138  	printf("arch_set_info_guest\n");
   2.139 +	if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
   2.140 +            return 0;
   2.141 +
   2.142 +	if (c->flags & VGCF_VMX_GUEST) {
   2.143 +	    if (!vmx_enabled) {
   2.144 +		printk("No VMX hardware feature for vmx domain.\n");
   2.145 +		return -EINVAL;
   2.146 +	    }
   2.147 +
   2.148 +	    vmx_setup_platform(v, c);
   2.149 +	}
   2.150 +
   2.151  	*regs = c->regs;
   2.152 -	regs->cr_ipsr = IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IC|IA64_PSR_I|IA64_PSR_DFH|IA64_PSR_BN|IA64_PSR_SP|IA64_PSR_DI;
   2.153 -	regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
   2.154 -	regs->ar_rsc |= (2 << 2); /* force PL2/3 */
   2.155 +	new_thread(v, regs->cr_iip, 0, 0);
   2.156  
   2.157   	v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
   2.158  	if ( c->vcpu.privregs && copy_from_user(v->vcpu_info->arch.privregs,
   2.159 @@ -330,100 +294,13 @@ int arch_set_info_guest(struct vcpu *v, 
   2.160  	    return -EFAULT;
   2.161  	}
   2.162  
   2.163 -	init_all_rr(v);
   2.164 +	v->arch.domain_itm_last = -1L;
   2.165 +	d->shared_info->arch = c->shared;
   2.166  
   2.167 -	// this should be in userspace
   2.168 -	regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=tty0 console=tty0 root=/dev/hda1",256L);  //FIXME
   2.169 -	v->arch.domain_itm_last = -1L;
   2.170 - 	VCPU(v, banknum) = 1;
   2.171 - 	VCPU(v, metaphysical_mode) = 1;
   2.172 -
   2.173 -	v->domain->shared_info->arch = c->shared;
   2.174 +	/* Don't redo final setup */
   2.175 +	set_bit(_VCPUF_initialised, &v->vcpu_flags);
   2.176  	return 0;
   2.177  }
   2.178 -#else // CONFIG_VTI
   2.179 -int arch_set_info_guest(
   2.180 -    struct vcpu *v, struct vcpu_guest_context *c)
   2.181 -{
   2.182 -    struct domain *d = v->domain;
   2.183 -    int i, rc, ret;
   2.184 -    unsigned long progress = 0;
   2.185 -    shared_iopage_t *sp;
   2.186 -
   2.187 -    if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
   2.188 -        return 0;
   2.189 -
   2.190 -    /* Lazy FP not implemented yet */
   2.191 -    clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
   2.192 -    if ( c->flags & VGCF_FPU_VALID )
   2.193 -        set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
   2.194 -
   2.195 -    /* Sync d/i cache conservatively, after domain N is loaded */
   2.196 -    ret = ia64_pal_cache_flush(3, 0, &progress, NULL);
   2.197 -    if (ret != PAL_STATUS_SUCCESS)
   2.198 -            panic("PAL CACHE FLUSH failed for dom[%d].\n",
   2.199 -		v->domain->domain_id);
   2.200 -    DPRINTK("Sync i/d cache for dom%d image SUCC\n",
   2.201 -		v->domain->domain_id);
   2.202 -
   2.203 -    /* Physical mode emulation initialization, including
   2.204 -     * emulation ID allcation and related memory request
   2.205 -     */
   2.206 -    physical_mode_init(v);
   2.207 -
   2.208 -    /* FIXME: only support PMT table continuously by far */
   2.209 -    d->arch.pmt = __va(c->pt_base);
   2.210 -    d->arch.max_pfn = c->pt_max_pfn;
   2.211 -    d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg);
   2.212 -    sp = get_sp(d);
   2.213 -    memset((char *)sp,0,PAGE_SIZE);
   2.214 -    /* FIXME: temp due to old CP */
   2.215 -    sp->sp_global.eport = 2;
   2.216 -#ifdef V_IOSAPIC_READY
   2.217 -    sp->vcpu_number = 1;
   2.218 -#endif
   2.219 -    /* TEMP */
   2.220 -    d->arch.vmx_platform.pib_base = 0xfee00000UL;
   2.221 -    
   2.222 -
   2.223 -    if (c->flags & VGCF_VMX_GUEST) {
   2.224 -	if (!vmx_enabled)
   2.225 -	    panic("No VMX hardware feature for vmx domain.\n");
   2.226 -
   2.227 -	vmx_final_setup_domain(d);
   2.228 -
   2.229 -	/* One more step to enable interrupt assist */
   2.230 -	set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
   2.231 -    }
   2.232 -
   2.233 -    vlsapic_reset(v);
   2.234 -    vtm_init(v);
   2.235 -
   2.236 -    /* Only open one port for I/O and interrupt emulation */
   2.237 -    if (v == d->vcpu[0]) {
   2.238 -	memset(&d->shared_info->evtchn_mask[0], 0xff,
   2.239 -		sizeof(d->shared_info->evtchn_mask));
   2.240 -	clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
   2.241 -    }
   2.242 -    /* Setup domain context. Actually IA-64 is a bit different with
   2.243 -     * x86, with almost all system resources better managed by HV
   2.244 -     * directly. CP only needs to provide start IP of guest, which
   2.245 -     * ideally is the load address of guest Firmware.
   2.246 -     */
   2.247 -    new_thread(v, c->guest_iip, 0, 0);
   2.248 -
   2.249 -
   2.250 -    d->xen_vastart = XEN_START_ADDR;
   2.251 -    d->xen_vaend = XEN_END_ADDR;
   2.252 -    d->arch.breakimm = 0x1000 + d->domain_id;
   2.253 -    v->arch._thread.on_ustack = 0;
   2.254 -
   2.255 -    /* Don't redo final setup */
   2.256 -    set_bit(_VCPUF_initialised, &v->vcpu_flags);
   2.257 -
   2.258 -    return 0;
   2.259 -}
   2.260 -#endif // CONFIG_VTI
   2.261  
   2.262  void arch_do_boot_vcpu(struct vcpu *v)
   2.263  {
   2.264 @@ -443,7 +320,8 @@ void domain_relinquish_resources(struct 
   2.265  	printf("domain_relinquish_resources: not implemented\n");
   2.266  }
   2.267  
   2.268 -#ifdef CONFIG_VTI
   2.269 +// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
   2.270 +// and linux/arch/ia64/kernel/process.c:kernel_thread()
   2.271  void new_thread(struct vcpu *v,
   2.272                  unsigned long start_pc,
   2.273                  unsigned long start_stack,
   2.274 @@ -453,7 +331,6 @@ void new_thread(struct vcpu *v,
   2.275  	struct pt_regs *regs;
   2.276  	struct ia64_boot_param *bp;
   2.277  	extern char saved_command_line[];
   2.278 -	//char *dom0_cmdline = "BOOT_IMAGE=scsi0:\EFI\redhat\xenlinux nomca root=/dev/sdb1 ro";
   2.279  
   2.280  
   2.281  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
   2.282 @@ -471,61 +348,31 @@ void new_thread(struct vcpu *v,
   2.283  		regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
   2.284  	}
   2.285  	regs->cr_iip = start_pc;
   2.286 -	regs->cr_ifs = 0; /* why? - matthewc */
   2.287 +	regs->cr_ifs = 1UL << 63; /* or clear? */
   2.288  	regs->ar_fpsr = FPSR_DEFAULT;
   2.289 -	if (VMX_DOMAIN(v)) {
   2.290 -		vmx_init_all_rr(v);
   2.291 -	} else
   2.292 -		init_all_rr(v);
   2.293  
   2.294  	if (VMX_DOMAIN(v)) {
   2.295 -		if (d == dom0) {
   2.296 +#ifdef CONFIG_VTI
   2.297 +		vmx_init_all_rr(v);
   2.298 +		if (d == dom0)
   2.299  		    VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
   2.300 -		    printk("new_thread, done with dom_fw_setup\n");
   2.301 -		}
   2.302  		/* Virtual processor context setup */
   2.303  		VMX_VPD(v, vpsr) = IA64_PSR_BN;
   2.304  		VPD_CR(v, dcr) = 0;
   2.305 +#endif
   2.306  	} else {
   2.307 -		regs->r28 = dom_fw_setup(d,saved_command_line,256L);
   2.308 +		init_all_rr(v);
   2.309 +		if (d == dom0) 
   2.310 +		    regs->r28 = dom_fw_setup(d,saved_command_line,256L);
   2.311 +		else {
   2.312 +		    regs->ar_rsc |= (2 << 2); /* force PL2/3 */
   2.313 +		    regs->r28 = dom_fw_setup(d,"nomca nosmp xencons=tty0 console=tty0 root=/dev/hda1",256L);  //FIXME
   2.314 +		}
   2.315  		VCPU(v, banknum) = 1;
   2.316  		VCPU(v, metaphysical_mode) = 1;
   2.317  		d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
   2.318  	}
   2.319  }
   2.320 -#else // CONFIG_VTI
   2.321 -
   2.322 -// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
   2.323 -// and linux/arch/ia64/kernel/process.c:kernel_thread()
   2.324 -void new_thread(struct vcpu *v,
   2.325 -	            unsigned long start_pc,
   2.326 -	            unsigned long start_stack,
   2.327 -	            unsigned long start_info)
   2.328 -{
   2.329 -	struct domain *d = v->domain;
   2.330 -	struct pt_regs *regs;
   2.331 -	struct ia64_boot_param *bp;
   2.332 -	extern char saved_command_line[];
   2.333 -
   2.334 -#ifdef CONFIG_DOMAIN0_CONTIGUOUS
   2.335 -	if (d == dom0) start_pc += dom0_start;
   2.336 -#endif
   2.337 -
   2.338 -	regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
   2.339 -	regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
   2.340 -		| IA64_PSR_BITS_TO_SET | IA64_PSR_BN
   2.341 -		& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
   2.342 -	regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
   2.343 -	regs->cr_iip = start_pc;
   2.344 -	regs->cr_ifs = 1UL << 63;
   2.345 -	regs->ar_fpsr = FPSR_DEFAULT;
   2.346 -	init_all_rr(v);
   2.347 -	regs->r28 = dom_fw_setup(d,saved_command_line,256L);  //FIXME
   2.348 -	VCPU(v, banknum) = 1;
   2.349 -	VCPU(v, metaphysical_mode) = 1;
   2.350 -	d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
   2.351 -}
   2.352 -#endif // CONFIG_VTI
   2.353  
   2.354  static struct page * map_new_domain0_page(unsigned long mpaddr)
   2.355  {
   2.356 @@ -903,44 +750,6 @@ domU_staging_write_32(unsigned long at, 
   2.357  }
   2.358  #endif
   2.359  
   2.360 -#ifdef CONFIG_VTI
   2.361 -/* Up to whether domain is vmx one, different context may be setup
   2.362 - * here.
   2.363 - */
   2.364 -void
   2.365 -post_arch_do_create_domain(struct vcpu *v, int vmx_domain)
   2.366 -{
   2.367 -    struct domain *d = v->domain;
   2.368 -
   2.369 -    if (!vmx_domain) {
   2.370 -	d->shared_info = (void*)alloc_xenheap_page();
   2.371 -	if (!d->shared_info)
   2.372 -		panic("Allocate share info for non-vmx domain failed.\n");
   2.373 -	d->shared_info_va = 0xfffd000000000000;
   2.374 -
   2.375 -	printk("Build shared info for non-vmx domain\n");
   2.376 -	build_shared_info(d);
   2.377 -	/* Setup start info area */
   2.378 -    }
   2.379 -}
   2.380 -
   2.381 -/* For VMX domain, this is invoked when kernel model in domain
   2.382 - * request actively
   2.383 - */
   2.384 -void build_shared_info(struct domain *d)
   2.385 -{
   2.386 -    int i;
   2.387 -
   2.388 -    /* Set up shared-info area. */
   2.389 -    update_dom_time(d);
   2.390 -
   2.391 -    /* Mask all upcalls... */
   2.392 -    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   2.393 -        d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   2.394 -
   2.395 -    /* ... */
   2.396 -}
   2.397 -
   2.398  /*
   2.399   * Domain 0 has direct access to all devices absolutely. However
   2.400   * the major point of this stub here, is to allow alloc_dom_mem
   2.401 @@ -959,182 +768,12 @@ int construct_dom0(struct domain *d,
   2.402  	               unsigned long initrd_start, unsigned long initrd_len,
   2.403  	               char *cmdline)
   2.404  {
   2.405 -    char *dst;
   2.406 -    int i, rc;
   2.407 -    unsigned long pfn, mfn;
   2.408 -    unsigned long nr_pt_pages;
   2.409 -    unsigned long count;
   2.410 -    unsigned long alloc_start, alloc_end;
   2.411 -    struct pfn_info *page = NULL;
   2.412 -    start_info_t *si;
   2.413 -    struct vcpu *v = d->vcpu[0];
   2.414 -    struct domain_setup_info dsi;
   2.415 -    unsigned long p_start;
   2.416 -    unsigned long pkern_start;
   2.417 -    unsigned long pkern_entry;
   2.418 -    unsigned long pkern_end;
   2.419 -    unsigned long ret;
   2.420 -    unsigned long progress = 0;
   2.421 -
   2.422 -//printf("construct_dom0: starting\n");
   2.423 -    /* Sanity! */
   2.424 -#ifndef CLONE_DOMAIN0
   2.425 -    if ( d != dom0 ) 
   2.426 -        BUG();
   2.427 -    if ( test_bit(_DOMF_constructed, &d->domain_flags) ) 
   2.428 -        BUG();
   2.429 -#endif
   2.430 -
   2.431 -    printk("##Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
   2.432 -    memset(&dsi, 0, sizeof(struct domain_setup_info));
   2.433 -
   2.434 -    printk("*** LOADING DOMAIN 0 ***\n");
   2.435 -
   2.436 -    alloc_start = dom0_start;
   2.437 -    alloc_end = dom0_start + dom0_size;
   2.438 -    d->tot_pages = d->max_pages = (alloc_end - alloc_start)/PAGE_SIZE;
   2.439 -    image_start = __va(ia64_boot_param->initrd_start);
   2.440 -    image_len = ia64_boot_param->initrd_size;
   2.441 -
   2.442 -    dsi.image_addr = (unsigned long)image_start;
   2.443 -    dsi.image_len  = image_len;
   2.444 -    rc = parseelfimage(&dsi);
   2.445 -    if ( rc != 0 )
   2.446 -        return rc;
   2.447 -
   2.448 -    /* Temp workaround */
   2.449 -    if (running_on_sim)
   2.450 -	dsi.xen_section_string = (char *)1;
   2.451 -
   2.452 -    if ((!vmx_enabled) && !dsi.xen_section_string) {
   2.453 -	printk("Lack of hardware support for unmodified vmx dom0\n");
   2.454 -	panic("");
   2.455 -    }
   2.456 -
   2.457 -    if (vmx_enabled && !dsi.xen_section_string) {
   2.458 -	printk("Dom0 is vmx domain!\n");
   2.459 -	vmx_dom0 = 1;
   2.460 -    }
   2.461 -
   2.462 -    p_start = dsi.v_start;
   2.463 -    pkern_start = dsi.v_kernstart;
   2.464 -    pkern_end = dsi.v_kernend;
   2.465 -    pkern_entry = dsi.v_kernentry;
   2.466 -
   2.467 -    printk("p_start=%lx, pkern_start=%lx, pkern_end=%lx, pkern_entry=%lx\n",
   2.468 -	p_start,pkern_start,pkern_end,pkern_entry);
   2.469 -
   2.470 -    if ( (p_start & (PAGE_SIZE-1)) != 0 )
   2.471 -    {
   2.472 -        printk("Initial guest OS must load to a page boundary.\n");
   2.473 -        return -EINVAL;
   2.474 -    }
   2.475 -
   2.476 -    printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
   2.477 -           " Kernel image:  %lx->%lx\n"
   2.478 -           " Entry address: %lx\n"
   2.479 -           " Init. ramdisk:   (NOT IMPLEMENTED YET)\n",
   2.480 -           pkern_start, pkern_end, pkern_entry);
   2.481 -
   2.482 -    if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
   2.483 -    {
   2.484 -        printk("Initial guest OS requires too much space\n"
   2.485 -               "(%luMB is greater than %luMB limit)\n",
   2.486 -               (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20);
   2.487 -        return -ENOMEM;
   2.488 -    }
   2.489 -
   2.490 -    // Other sanity check about Dom0 image
   2.491 -
   2.492 -    /* Construct a frame-allocation list for the initial domain, since these
   2.493 -     * pages are allocated by boot allocator and pfns are not set properly
   2.494 -     */
   2.495 -    for ( mfn = (alloc_start>>PAGE_SHIFT); 
   2.496 -          mfn < (alloc_end>>PAGE_SHIFT); 
   2.497 -          mfn++ )
   2.498 -    {
   2.499 -        page = &frame_table[mfn];
   2.500 -        page_set_owner(page, d);
   2.501 -        page->u.inuse.type_info = 0;
   2.502 -        page->count_info        = PGC_allocated | 1;
   2.503 -        list_add_tail(&page->list, &d->page_list);
   2.504 -
   2.505 -	/* Construct 1:1 mapping */
   2.506 -	machine_to_phys_mapping[mfn] = mfn;
   2.507 -    }
   2.508 -
   2.509 -    post_arch_do_create_domain(v, vmx_dom0);
   2.510 -
   2.511 -    /* Load Dom0 image to its own memory */
   2.512 -    loaddomainelfimage(d,image_start);
   2.513 -
   2.514 -    /* Copy the initial ramdisk. */
   2.515 -
   2.516 -    /* Sync d/i cache conservatively */
   2.517 -    ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
   2.518 -    if (ret != PAL_STATUS_SUCCESS)
   2.519 -            panic("PAL CACHE FLUSH failed for dom0.\n");
   2.520 -    printk("Sync i/d cache for dom0 image SUCC\n");
   2.521 -
   2.522 -    /* Physical mode emulation initialization, including
   2.523 -     * emulation ID allcation and related memory request
   2.524 -     */
   2.525 -    physical_mode_init(v);
   2.526 -    /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
   2.527 -     * for dom0
   2.528 -     */
   2.529 -    d->arch.pmt = NULL;
   2.530 -
   2.531 -    /* Give up the VGA console if DOM0 is configured to grab it. */
   2.532 -    if (cmdline != NULL)
   2.533 -    	console_endboot(strstr(cmdline, "tty0") != NULL);
   2.534 -
   2.535 -    /* VMX specific construction for Dom0, if hardware supports VMX
   2.536 -     * and Dom0 is unmodified image
   2.537 -     */
   2.538 -    printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
   2.539 -    if (vmx_dom0)
   2.540 -	vmx_final_setup_domain(dom0);
   2.541 -    
   2.542 -    /* vpd is ready now */
   2.543 -    vlsapic_reset(v);
   2.544 -    vtm_init(v);
   2.545 -
   2.546 -    set_bit(_DOMF_constructed, &d->domain_flags);
   2.547 -    new_thread(v, pkern_entry, 0, 0);
   2.548 -
   2.549 -    physdev_init_dom0(d);
   2.550 -    // FIXME: Hack for keyboard input
   2.551 -#ifdef CLONE_DOMAIN0
   2.552 -if (d == dom0)
   2.553 -#endif
   2.554 -    serial_input_init();
   2.555 -    if (d == dom0) {
   2.556 -    	VCPU(v, delivery_mask[0]) = -1L;
   2.557 -    	VCPU(v, delivery_mask[1]) = -1L;
   2.558 -    	VCPU(v, delivery_mask[2]) = -1L;
   2.559 -    	VCPU(v, delivery_mask[3]) = -1L;
   2.560 -    }
   2.561 -    else __set_bit(0x30,VCPU(v, delivery_mask));
   2.562 -
   2.563 -    return 0;
   2.564 -}
   2.565 -
   2.566 -
   2.567 -#else //CONFIG_VTI
   2.568 -
   2.569 -int construct_dom0(struct domain *d, 
   2.570 -	               unsigned long image_start, unsigned long image_len, 
   2.571 -	               unsigned long initrd_start, unsigned long initrd_len,
   2.572 -	               char *cmdline)
   2.573 -{
   2.574  	char *dst;
   2.575  	int i, rc;
   2.576  	unsigned long pfn, mfn;
   2.577  	unsigned long nr_pt_pages;
   2.578  	unsigned long count;
   2.579 -	//l2_pgentry_t *l2tab, *l2start;
   2.580 -	//l1_pgentry_t *l1tab = NULL, *l1start = NULL;
   2.581 +	unsigned long alloc_start, alloc_end;
   2.582  	struct pfn_info *page = NULL;
   2.583  	start_info_t *si;
   2.584  	struct vcpu *v = d->vcpu[0];
   2.585 @@ -1144,6 +783,7 @@ int construct_dom0(struct domain *d,
   2.586  	unsigned long pkern_start;
   2.587  	unsigned long pkern_entry;
   2.588  	unsigned long pkern_end;
   2.589 +	unsigned long ret, progress = 0;
   2.590  
   2.591  //printf("construct_dom0: starting\n");
   2.592  	/* Sanity! */
   2.593 @@ -1158,7 +798,9 @@ int construct_dom0(struct domain *d,
   2.594  
   2.595  	printk("*** LOADING DOMAIN 0 ***\n");
   2.596  
   2.597 -	d->max_pages = dom0_size/PAGE_SIZE;
   2.598 +	alloc_start = dom0_start;
   2.599 +	alloc_end = dom0_start + dom0_size;
   2.600 +	d->tot_pages = d->max_pages = dom0_size/PAGE_SIZE;
   2.601  	image_start = __va(ia64_boot_param->initrd_start);
   2.602  	image_len = ia64_boot_param->initrd_size;
   2.603  //printk("image_start=%lx, image_len=%lx\n",image_start,image_len);
   2.604 @@ -1171,6 +813,23 @@ int construct_dom0(struct domain *d,
   2.605  	if ( rc != 0 )
   2.606  	    return rc;
   2.607  
   2.608 +#ifdef CONFIG_VTI
   2.609 +	/* Temp workaround */
   2.610 +	if (running_on_sim)
   2.611 +	    dsi.xen_section_string = (char *)1;
   2.612 +
   2.613 +	/* Check whether dom0 is vti domain */
   2.614 +	if ((!vmx_enabled) && !dsi.xen_section_string) {
   2.615 +	    printk("Lack of hardware support for unmodified vmx dom0\n");
   2.616 +	    panic("");
   2.617 +	}
   2.618 +
   2.619 +	if (vmx_enabled && !dsi.xen_section_string) {
   2.620 +	    printk("Dom0 is vmx domain!\n");
   2.621 +	    vmx_dom0 = 1;
   2.622 +	}
   2.623 +#endif
   2.624 +
   2.625  	p_start = dsi.v_start;
   2.626  	pkern_start = dsi.v_kernstart;
   2.627  	pkern_end = dsi.v_kernend;
   2.628 @@ -1214,14 +873,43 @@ int construct_dom0(struct domain *d,
   2.629  	for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   2.630  	    d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   2.631  
   2.632 +#ifdef CONFIG_VTI
   2.633 +	/* Construct a frame-allocation list for the initial domain, since these
   2.634 +	 * pages are allocated by boot allocator and pfns are not set properly
   2.635 +	 */
   2.636 +	for ( mfn = (alloc_start>>PAGE_SHIFT); 
   2.637 +	      mfn < (alloc_end>>PAGE_SHIFT); 
   2.638 +	      mfn++ )
   2.639 +	{
   2.640 +            page = &frame_table[mfn];
   2.641 +            page_set_owner(page, d);
   2.642 +            page->u.inuse.type_info = 0;
   2.643 +            page->count_info        = PGC_allocated | 1;
   2.644 +            list_add_tail(&page->list, &d->page_list);
   2.645 +
   2.646 +	    /* Construct 1:1 mapping */
   2.647 +	    machine_to_phys_mapping[mfn] = mfn;
   2.648 +	}
   2.649 +
   2.650 +	/* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
   2.651 +	 * for dom0
   2.652 +	 */
   2.653 +	d->arch.pmt = NULL;
   2.654 +#endif
   2.655 +
   2.656  	/* Copy the OS image. */
   2.657 -	//(void)loadelfimage(image_start);
   2.658  	loaddomainelfimage(d,image_start);
   2.659  
   2.660  	/* Copy the initial ramdisk. */
   2.661  	//if ( initrd_len != 0 )
   2.662  	//    memcpy((void *)vinitrd_start, initrd_start, initrd_len);
   2.663  
   2.664 +	/* Sync d/i cache conservatively */
   2.665 +	ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
   2.666 +	if (ret != PAL_STATUS_SUCCESS)
   2.667 +	    panic("PAL CACHE FLUSH failed for dom0.\n");
   2.668 +	printk("Sync i/d cache for dom0 image SUCC\n");
   2.669 +
   2.670  #if 0
   2.671  	/* Set up start info area. */
   2.672  	//si = (start_info_t *)vstartinfo_start;
   2.673 @@ -1257,14 +945,21 @@ int construct_dom0(struct domain *d,
   2.674  #endif
   2.675  	
   2.676  	/* Give up the VGA console if DOM0 is configured to grab it. */
   2.677 -#ifdef IA64
   2.678  	if (cmdline != NULL)
   2.679 -#endif
   2.680 -	console_endboot(strstr(cmdline, "tty0") != NULL);
   2.681 +	    console_endboot(strstr(cmdline, "tty0") != NULL);
   2.682 +
   2.683 +	/* VMX specific construction for Dom0, if hardware supports VMX
   2.684 +	 * and Dom0 is unmodified image
   2.685 +	 */
   2.686 +	printk("Dom0: 0x%lx, domain: 0x%lx\n", (u64)dom0, (u64)d);
   2.687 +	if (vmx_dom0)
   2.688 +	    vmx_final_setup_domain(dom0);
   2.689  
   2.690  	set_bit(_DOMF_constructed, &d->domain_flags);
   2.691  
   2.692  	new_thread(v, pkern_entry, 0, 0);
   2.693 +	physdev_init_dom0(d);
   2.694 +
   2.695  	// FIXME: Hack for keyboard input
   2.696  #ifdef CLONE_DOMAIN0
   2.697  if (d == dom0)
   2.698 @@ -1280,7 +975,6 @@ if (d == dom0)
   2.699  
   2.700  	return 0;
   2.701  }
   2.702 -#endif // CONFIG_VTI
   2.703  
   2.704  // FIXME: When dom0 can construct domains, this goes away (or is rewritten)
   2.705  int construct_domU(struct domain *d,
     3.1 --- a/xen/arch/ia64/linux-xen/setup.c	Fri Aug 19 20:45:43 2005 -0800
     3.2 +++ b/xen/arch/ia64/linux-xen/setup.c	Fri Aug 19 21:19:39 2005 -0800
     3.3 @@ -51,9 +51,7 @@
     3.4  #include <asm/smp.h>
     3.5  #include <asm/system.h>
     3.6  #include <asm/unistd.h>
     3.7 -#ifdef CONFIG_VTI
     3.8  #include <asm/vmx.h>
     3.9 -#endif // CONFIG_VTI
    3.10  #include <asm/io.h>
    3.11  
    3.12  #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
    3.13 @@ -402,9 +400,9 @@ late_setup_arch (char **cmdline_p)
    3.14  	cpu_physical_id(0) = hard_smp_processor_id();
    3.15  #endif
    3.16  
    3.17 -#ifdef CONFIG_VTI
    3.18 +#ifdef XEN
    3.19  	identify_vmx_feature();
    3.20 -#endif // CONFIG_VTI
    3.21 +#endif
    3.22  
    3.23  	cpu_init();	/* initialize the bootstrap CPU */
    3.24  
    3.25 @@ -600,7 +598,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
    3.26  	c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
    3.27  	c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
    3.28  
    3.29 -#ifdef CONFIG_VTI
    3.30 +#ifdef XEN
    3.31  	/* If vmx feature is on, do necessary initialization for vmx */
    3.32  	if (vmx_enabled)
    3.33  		vmx_init_env();
     4.1 --- a/xen/arch/ia64/regionreg.c	Fri Aug 19 20:45:43 2005 -0800
     4.2 +++ b/xen/arch/ia64/regionreg.c	Fri Aug 19 21:19:39 2005 -0800
     4.3 @@ -29,9 +29,6 @@ extern void ia64_new_rr7(unsigned long r
     4.4  #define	MAX_RID_BLOCKS	(1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
     4.5  #define RIDS_PER_RIDBLOCK MIN_RIDS
     4.6  
     4.7 -// This is the one global memory representation of the default Xen region reg
     4.8 -ia64_rr xen_rr;
     4.9 -
    4.10  #if 0
    4.11  // following already defined in include/asm-ia64/gcc_intrin.h
    4.12  // it should probably be ifdef'd out from there to ensure all region
    4.13 @@ -65,7 +62,7 @@ unsigned long allocate_reserved_rid(void
    4.14  
    4.15  
    4.16  // returns -1 if none available
    4.17 -unsigned long allocate_metaphysical_rr0(void)
    4.18 +unsigned long allocate_metaphysical_rr(void)
    4.19  {
    4.20  	ia64_rr rrv;
    4.21  
    4.22 @@ -81,17 +78,6 @@ int deallocate_metaphysical_rid(unsigned
    4.23  	return 1;
    4.24  }
    4.25  
    4.26 -
    4.27 -void init_rr(void)
    4.28 -{
    4.29 -	xen_rr.rrval = 0;
    4.30 -	xen_rr.ve = 0;
    4.31 -	xen_rr.rid = allocate_reserved_rid();
    4.32 -	xen_rr.ps = PAGE_SHIFT;
    4.33 -
    4.34 -	printf("initialized xen_rr.rid=0x%lx\n", xen_rr.rid);
    4.35 -}
    4.36 -
    4.37  /*************************************
    4.38    Region Block setup/management
    4.39  *************************************/
    4.40 @@ -187,34 +173,6 @@ int deallocate_rid_range(struct domain *
    4.41  }
    4.42  
    4.43  
    4.44 -// This function is purely for performance... apparently scrambling
    4.45 -//  bits in the region id makes for better hashing, which means better
    4.46 -//  use of the VHPT, which means better performance
    4.47 -// Note that the only time a RID should be mangled is when it is stored in
    4.48 -//  a region register; anytime it is "viewable" outside of this module,
    4.49 -//  it should be unmangled
    4.50 -
    4.51 -// NOTE: this function is also implemented in assembly code in hyper_set_rr!!
    4.52 -// Must ensure these two remain consistent!
    4.53 -static inline unsigned long
    4.54 -vmMangleRID(unsigned long RIDVal)
    4.55 -{
    4.56 -	union bits64 { unsigned char bytes[4]; unsigned long uint; };
    4.57 -
    4.58 -	union bits64 t;
    4.59 -	unsigned char tmp;
    4.60 -
    4.61 -	t.uint = RIDVal;
    4.62 -	tmp = t.bytes[1];
    4.63 -	t.bytes[1] = t.bytes[3];
    4.64 -	t.bytes[3] = tmp;
    4.65 -
    4.66 -	return t.uint;
    4.67 -}
    4.68 -
    4.69 -// since vmMangleRID is symmetric, use it for unmangling also
    4.70 -#define vmUnmangleRID(x)	vmMangleRID(x)
    4.71 -
    4.72  static inline void
    4.73  set_rr_no_srlz(unsigned long rr, unsigned long rrval)
    4.74  {
     5.1 --- a/xen/arch/ia64/vcpu.c	Fri Aug 19 20:45:43 2005 -0800
     5.2 +++ b/xen/arch/ia64/vcpu.c	Fri Aug 19 21:19:39 2005 -0800
     5.3 @@ -14,9 +14,7 @@
     5.4  #include <asm/tlb.h>
     5.5  #include <asm/processor.h>
     5.6  #include <asm/delay.h>
     5.7 -#ifdef CONFIG_VTI
     5.8  #include <asm/vmx_vcpu.h>
     5.9 -#endif // CONFIG_VTI
    5.10  
    5.11  typedef	union {
    5.12  	struct ia64_psr ia64_psr;
     6.1 --- a/xen/arch/ia64/vmmu.c	Fri Aug 19 20:45:43 2005 -0800
     6.2 +++ b/xen/arch/ia64/vmmu.c	Fri Aug 19 21:19:39 2005 -0800
     6.3 @@ -81,10 +81,10 @@ u64 get_mfn(domid_t domid, u64 gpfn, u64
     6.4  /*
     6.5   * The VRN bits of va stand for which rr to get.
     6.6   */
     6.7 -rr_t vmmu_get_rr(VCPU *vcpu, u64 va)
     6.8 +ia64_rr vmmu_get_rr(VCPU *vcpu, u64 va)
     6.9  {
    6.10 -    rr_t   vrr;
    6.11 -    vmx_vcpu_get_rr(vcpu, va, &vrr.value);
    6.12 +    ia64_rr   vrr;
    6.13 +    vmx_vcpu_get_rr(vcpu, va, &vrr.rrval);
    6.14      return vrr;
    6.15  }
    6.16  
    6.17 @@ -240,7 +240,7 @@ void machine_tlb_insert(struct vcpu *d, 
    6.18      u64     saved_itir, saved_ifa, saved_rr;
    6.19      u64     pages;
    6.20      thash_data_t    mtlb;
    6.21 -    rr_t    vrr;
    6.22 +    ia64_rr vrr;
    6.23      unsigned int    cl = tlb->cl;
    6.24  
    6.25      mtlb.ifa = tlb->vadr;
    6.26 @@ -264,7 +264,7 @@ void machine_tlb_insert(struct vcpu *d, 
    6.27      /* Only access memory stack which is mapped by TR,
    6.28       * after rr is switched.
    6.29       */
    6.30 -    ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.value));
    6.31 +    ia64_set_rr(mtlb.ifa, vmx_vrrtomrr(d, vrr.rrval));
    6.32      ia64_srlz_d();
    6.33      if ( cl == ISIDE_TLB ) {
    6.34          ia64_itci(mtlb.page_flags);
    6.35 @@ -287,12 +287,12 @@ u64 machine_thash(PTA pta, u64 va, u64 r
    6.36      u64     hash_addr, tag;
    6.37      unsigned long psr;
    6.38      struct vcpu *v = current;
    6.39 -    rr_t    vrr;
    6.40 +    ia64_rr vrr;
    6.41  
    6.42      
    6.43      saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
    6.44      saved_rr0 = ia64_get_rr(0);
    6.45 -    vrr.value = saved_rr0;
    6.46 +    vrr.rrval = saved_rr0;
    6.47      vrr.rid = rid;
    6.48      vrr.ps = ps;
    6.49  
    6.50 @@ -300,7 +300,7 @@ u64 machine_thash(PTA pta, u64 va, u64 r
    6.51      // TODO: Set to enforce lazy mode
    6.52      local_irq_save(psr);
    6.53      ia64_setreg(_IA64_REG_CR_PTA, pta.val);
    6.54 -    ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
    6.55 +    ia64_set_rr(0, vmx_vrrtomrr(v, vrr.rrval));
    6.56      ia64_srlz_d();
    6.57  
    6.58      hash_addr = ia64_thash(va);
    6.59 @@ -318,19 +318,19 @@ u64 machine_ttag(PTA pta, u64 va, u64 ri
    6.60      u64     hash_addr, tag;
    6.61      u64     psr;
    6.62      struct vcpu *v = current;
    6.63 -    rr_t    vrr;
    6.64 +    ia64_rr vrr;
    6.65  
    6.66      // TODO: Set to enforce lazy mode    
    6.67      saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
    6.68      saved_rr0 = ia64_get_rr(0);
    6.69 -    vrr.value = saved_rr0;
    6.70 +    vrr.rrval = saved_rr0;
    6.71      vrr.rid = rid;
    6.72      vrr.ps = ps;
    6.73  
    6.74      va = (va << 3) >> 3;    // set VRN to 0.
    6.75      local_irq_save(psr);
    6.76      ia64_setreg(_IA64_REG_CR_PTA, pta.val);
    6.77 -    ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
    6.78 +    ia64_set_rr(0, vmx_vrrtomrr(v, vrr.rrval));
    6.79      ia64_srlz_d();
    6.80  
    6.81      tag = ia64_ttag(va);
    6.82 @@ -354,15 +354,15 @@ void machine_tlb_purge(u64 rid, u64 va, 
    6.83  {
    6.84      u64       saved_rr0;
    6.85      u64       psr;
    6.86 -    rr_t      vrr;
    6.87 +    ia64_rr vrr;
    6.88  
    6.89      va = (va << 3) >> 3;    // set VRN to 0.
    6.90      saved_rr0 = ia64_get_rr(0);
    6.91 -    vrr.value = saved_rr0;
    6.92 +    vrr.rrval = saved_rr0;
    6.93      vrr.rid = rid;
    6.94      vrr.ps = ps;
    6.95      local_irq_save(psr);
    6.96 -    ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.value) );
    6.97 +    ia64_set_rr( 0, vmx_vrrtomrr(current,vrr.rrval) );
    6.98      ia64_srlz_d();
    6.99      ia64_ptcl(va, ps << 2);
   6.100      ia64_set_rr( 0, saved_rr0 );
   6.101 @@ -421,14 +421,14 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
   6.102      u64     gpip;   // guest physical IP
   6.103      u64     mpa;
   6.104      thash_data_t    *tlb;
   6.105 -    rr_t    vrr;
   6.106 +    ia64_rr vrr;
   6.107      u64     mfn;
   6.108      
   6.109      if ( !(VMX_VPD(vcpu, vpsr) & IA64_PSR_IT) ) {   // I-side physical mode
   6.110          gpip = gip;
   6.111      }
   6.112      else {
   6.113 -        vmx_vcpu_get_rr(vcpu, gip, &vrr.value);
   6.114 +        vmx_vcpu_get_rr(vcpu, gip, &vrr.rrval);
   6.115          tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu), 
   6.116                  vrr.rid, gip, ISIDE_TLB );
   6.117          if ( tlb == NULL ) panic("No entry found in ITLB\n");
   6.118 @@ -448,7 +448,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
   6.119      thash_data_t data, *ovl;
   6.120      thash_cb_t  *hcb;
   6.121      search_section_t sections;
   6.122 -    rr_t    vrr;
   6.123 +    ia64_rr vrr;
   6.124  
   6.125      hcb = vmx_vcpu_get_vtlb(vcpu);
   6.126      data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
   6.127 @@ -481,7 +481,7 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
   6.128      thash_data_t data, *ovl;
   6.129      thash_cb_t  *hcb;
   6.130      search_section_t sections;
   6.131 -    rr_t    vrr;
   6.132 +    ia64_rr vrr;
   6.133  
   6.134      hcb = vmx_vcpu_get_vtlb(vcpu);
   6.135      data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
   6.136 @@ -511,7 +511,7 @@ int vmx_lock_guest_dtc (VCPU *vcpu, UINT
   6.137  {
   6.138  
   6.139      thash_cb_t  *hcb;
   6.140 -    rr_t  vrr;
   6.141 +    ia64_rr vrr;
   6.142      u64	  preferred_size;
   6.143  
   6.144      vmx_vcpu_get_rr(vcpu, va, &vrr);
   6.145 @@ -527,7 +527,7 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN
   6.146      thash_data_t data, *ovl;
   6.147      thash_cb_t  *hcb;
   6.148      search_section_t sections;
   6.149 -    rr_t    vrr;
   6.150 +    ia64_rr vrr;
   6.151  
   6.152      hcb = vmx_vcpu_get_vtlb(vcpu);
   6.153      data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
   6.154 @@ -559,7 +559,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN
   6.155      thash_data_t data, *ovl;
   6.156      thash_cb_t  *hcb;
   6.157      search_section_t sections;
   6.158 -    rr_t    vrr;
   6.159 +    ia64_rr    vrr;
   6.160  
   6.161  
   6.162      hcb = vmx_vcpu_get_vtlb(vcpu);
     7.1 --- a/xen/arch/ia64/vmx_init.c	Fri Aug 19 20:45:43 2005 -0800
     7.2 +++ b/xen/arch/ia64/vmx_init.c	Fri Aug 19 21:19:39 2005 -0800
     7.3 @@ -22,6 +22,9 @@
     7.4   */
     7.5  
     7.6  /*
     7.7 + * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>:
     7.8 + * Disable doubling mapping
     7.9 + *
    7.10   * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
    7.11   * Simplied design in first step:
    7.12   *	- One virtual environment
    7.13 @@ -39,6 +42,7 @@
    7.14  #include <xen/lib.h>
    7.15  #include <asm/vmmu.h>
    7.16  #include <public/arch-ia64.h>
    7.17 +#include <public/io/ioreq.h>
    7.18  #include <asm/vmx_phy_mode.h>
    7.19  #include <asm/processor.h>
    7.20  #include <asm/vmx.h>
    7.21 @@ -126,8 +130,43 @@ vmx_init_env(void)
    7.22  	else
    7.23  		ASSERT(tmp_base != __vsa_base);
    7.24  
    7.25 +#ifdef XEN_DBL_MAPPING
    7.26  	/* Init stub for rr7 switch */
    7.27  	vmx_init_double_mapping_stub();
    7.28 +#endif 
    7.29 +}
    7.30 +
    7.31 +void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c)
    7.32 +{
    7.33 +	struct domain *d = v->domain;
    7.34 +	shared_iopage_t *sp;
    7.35 +
    7.36 +	ASSERT(d != dom0); /* only for non-privileged vti domain */
    7.37 +	d->arch.vmx_platform.shared_page_va = __va(c->share_io_pg);
    7.38 +	sp = get_sp(d);
    7.39 +	memset((char *)sp,0,PAGE_SIZE);
    7.40 +	/* FIXME: temp due to old CP */
    7.41 +	sp->sp_global.eport = 2;
    7.42 +#ifdef V_IOSAPIC_READY
    7.43 +	sp->vcpu_number = 1;
    7.44 +#endif
    7.45 +	/* TEMP */
    7.46 +	d->arch.vmx_platform.pib_base = 0xfee00000UL;
    7.47 +
    7.48 +	/* One more step to enable interrupt assist */
    7.49 +	set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
    7.50 +	/* Only open one port for I/O and interrupt emulation */
    7.51 +	if (v == d->vcpu[0]) {
    7.52 +	    memset(&d->shared_info->evtchn_mask[0], 0xff,
    7.53 +		sizeof(d->shared_info->evtchn_mask));
    7.54 +	    clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
    7.55 +	}
    7.56 +
    7.57 +	/* FIXME: only support PMT table continuously by far */
    7.58 +	d->arch.pmt = __va(c->pt_base);
    7.59 +	d->arch.max_pfn = c->pt_max_pfn;
    7.60 +
    7.61 +	vmx_final_setup_domain(d);
    7.62  }
    7.63  
    7.64  typedef union {
    7.65 @@ -171,7 +210,7 @@ static vpd_t *alloc_vpd(void)
    7.66  }
    7.67  
    7.68  
    7.69 -
    7.70 +#ifdef CONFIG_VTI
    7.71  /*
    7.72   * Create a VP on intialized VMX environment.
    7.73   */
    7.74 @@ -190,6 +229,7 @@ vmx_create_vp(struct vcpu *v)
    7.75  		panic("ia64_pal_vp_create failed. \n");
    7.76  }
    7.77  
    7.78 +#ifdef XEN_DBL_MAPPING
    7.79  void vmx_init_double_mapping_stub(void)
    7.80  {
    7.81  	u64 base, psr;
    7.82 @@ -206,6 +246,7 @@ void vmx_init_double_mapping_stub(void)
    7.83  	ia64_srlz_i();
    7.84  	printk("Add TR mapping for rr7 switch stub, with physical: 0x%lx\n", (u64)(__pa(base)));
    7.85  }
    7.86 +#endif
    7.87  
    7.88  /* Other non-context related tasks can be done in context switch */
    7.89  void
    7.90 @@ -219,12 +260,14 @@ vmx_save_state(struct vcpu *v)
    7.91  	if (status != PAL_STATUS_SUCCESS)
    7.92  		panic("Save vp status failed\n");
    7.93  
    7.94 +#ifdef XEN_DBL_MAPPING
    7.95  	/* FIXME: Do we really need purge double mapping for old vcpu?
    7.96  	 * Since rid is completely different between prev and next,
    7.97  	 * it's not overlap and thus no MCA possible... */
    7.98  	dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
    7.99          vmx_purge_double_mapping(dom_rr7, KERNEL_START,
   7.100  				 (u64)v->arch.vtlb->ts->vhpt->hash);
   7.101 +#endif
   7.102  
   7.103  	/* Need to save KR when domain switch, though HV itself doesn;t
   7.104  	 * use them.
   7.105 @@ -252,12 +295,14 @@ vmx_load_state(struct vcpu *v)
   7.106  	if (status != PAL_STATUS_SUCCESS)
   7.107  		panic("Restore vp status failed\n");
   7.108  
   7.109 +#ifdef XEN_DBL_MAPPING
   7.110  	dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
   7.111  	pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
   7.112  	pte_vhpt = pte_val(pfn_pte((__pa(v->arch.vtlb->ts->vhpt->hash) >> PAGE_SHIFT), PAGE_KERNEL));
   7.113  	vmx_insert_double_mapping(dom_rr7, KERNEL_START,
   7.114  				  (u64)v->arch.vtlb->ts->vhpt->hash,
   7.115  				  pte_xen, pte_vhpt);
   7.116 +#endif
   7.117  
   7.118  	ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
   7.119  	ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
   7.120 @@ -271,6 +316,7 @@ vmx_load_state(struct vcpu *v)
   7.121  	 * anchored in vcpu */
   7.122  }
   7.123  
   7.124 +#ifdef XEN_DBL_MAPPING
   7.125  /* Purge old double mapping and insert new one, due to rr7 change */
   7.126  void
   7.127  vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7)
   7.128 @@ -287,6 +333,8 @@ vmx_change_double_mapping(struct vcpu *v
   7.129  				  vhpt_base,
   7.130  				  pte_xen, pte_vhpt);
   7.131  }
   7.132 +#endif // XEN_DBL_MAPPING
   7.133 +#endif // CONFIG_VTI
   7.134  
   7.135  /*
   7.136   * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
   7.137 @@ -307,12 +355,21 @@ vmx_final_setup_domain(struct domain *d)
   7.138  	v->arch.arch_vmx.vpd = vpd;
   7.139  	vpd->virt_env_vaddr = vm_buffer;
   7.140  
   7.141 +#ifdef CONFIG_VTI
   7.142  	/* v->arch.schedule_tail = arch_vmx_do_launch; */
   7.143  	vmx_create_vp(v);
   7.144  
   7.145  	/* Set this ed to be vmx */
   7.146  	set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags);
   7.147  
   7.148 +	/* Physical mode emulation initialization, including
   7.149 +	* emulation ID allcation and related memory request
   7.150 +	*/
   7.151 +	physical_mode_init(v);
   7.152 +
   7.153 +	vlsapic_reset(v);
   7.154 +	vtm_init(v);
   7.155 +#endif
   7.156 +
   7.157  	/* Other vmx specific initialization work */
   7.158  }
   7.159 -
     8.1 --- a/xen/arch/ia64/vmx_phy_mode.c	Fri Aug 19 20:45:43 2005 -0800
     8.2 +++ b/xen/arch/ia64/vmx_phy_mode.c	Fri Aug 19 21:19:39 2005 -0800
     8.3 @@ -104,22 +104,8 @@ physical_mode_init(VCPU *vcpu)
     8.4      UINT64 psr;
     8.5      struct domain * d = vcpu->domain;
     8.6  
     8.7 -    vcpu->domain->arch.emul_phy_rr0.rid = XEN_RR7_RID+((d->domain_id)<<3);
     8.8 -    /* FIXME */
     8.9 -#if 0
    8.10 -    vcpu->domain->arch.emul_phy_rr0.ps = 28;  /* set page size to 256M */
    8.11 -#endif
    8.12 -	vcpu->domain->arch.emul_phy_rr0.ps = EMUL_PHY_PAGE_SHIFT;  /* set page size to 4k */
    8.13 -    vcpu->domain->arch.emul_phy_rr0.ve = 1; /* enable VHPT walker on this region */
    8.14 -
    8.15 -    vcpu->domain->arch.emul_phy_rr4.rid = XEN_RR7_RID + ((d->domain_id)<<3) + 4;
    8.16 -    vcpu->domain->arch.emul_phy_rr4.ps = EMUL_PHY_PAGE_SHIFT;  /* set page size to 4k */
    8.17 -    vcpu->domain->arch.emul_phy_rr4.ve = 1; /* enable VHPT walker on this region */
    8.18 -
    8.19      vcpu->arch.old_rsc = 0;
    8.20      vcpu->arch.mode_flags = GUEST_IN_PHY;
    8.21 -
    8.22 -    return;
    8.23  }
    8.24  
    8.25  extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
    8.26 @@ -246,19 +232,23 @@ void
    8.27  vmx_load_all_rr(VCPU *vcpu)
    8.28  {
    8.29  	unsigned long psr;
    8.30 +	ia64_rr phy_rr;
    8.31  
    8.32  	psr = ia64_clear_ic();
    8.33  
    8.34 +	phy_rr.ps = EMUL_PHY_PAGE_SHIFT; 
    8.35 +	phy_rr.ve = 1;
    8.36 +
    8.37  	/* WARNING: not allow co-exist of both virtual mode and physical
    8.38  	 * mode in same region
    8.39  	 */
    8.40  	if (is_physical_mode(vcpu)) {
    8.41  		if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
    8.42  			panic("Unexpected domain switch in phy emul\n");
    8.43 -		ia64_set_rr((VRN0 << VRN_SHIFT),
    8.44 -			     vcpu->domain->arch.emul_phy_rr0.rrval);
    8.45 -		ia64_set_rr((VRN4 << VRN_SHIFT),
    8.46 -			     vcpu->domain->arch.emul_phy_rr4.rrval);
    8.47 +		phy_rr.rid = vcpu->domain->arch.metaphysical_rr0;
    8.48 +		ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
    8.49 +		phy_rr.rid = vcpu->domain->arch.metaphysical_rr4;
    8.50 +		ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
    8.51  	} else {
    8.52  		ia64_set_rr((VRN0 << VRN_SHIFT),
    8.53  			     vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
    8.54 @@ -284,13 +274,18 @@ void
    8.55  switch_to_physical_rid(VCPU *vcpu)
    8.56  {
    8.57      UINT64 psr;
    8.58 +    ia64_rr phy_rr;
    8.59 +
    8.60 +    phy_rr.ps = EMUL_PHY_PAGE_SHIFT; 
    8.61 +    phy_rr.ve = 1;
    8.62  
    8.63      /* Save original virtual mode rr[0] and rr[4] */
    8.64 -
    8.65      psr=ia64_clear_ic();
    8.66 -    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr0.rrval);
    8.67 +    phy_rr.rid = vcpu->domain->arch.metaphysical_rr0;
    8.68 +    ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
    8.69      ia64_srlz_d();
    8.70 -    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr4.rrval);
    8.71 +    phy_rr.rid = vcpu->domain->arch.metaphysical_rr4;
    8.72 +    ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
    8.73      ia64_srlz_d();
    8.74  
    8.75      ia64_set_psr(psr);
     9.1 --- a/xen/arch/ia64/vmx_vcpu.c	Fri Aug 19 20:45:43 2005 -0800
     9.2 +++ b/xen/arch/ia64/vmx_vcpu.c	Fri Aug 19 21:19:39 2005 -0800
     9.3 @@ -234,9 +234,11 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
     9.4      case VRN7:
     9.5          VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val);
     9.6          /* Change double mapping for this domain */
     9.7 +#ifdef XEN_DBL_MAPPING
     9.8          vmx_change_double_mapping(vcpu,
     9.9                        vmx_vrrtomrr(vcpu,oldrr.rrval),
    9.10                        vmx_vrrtomrr(vcpu,newrr.rrval));
    9.11 +#endif
    9.12          break;
    9.13      default:
    9.14          ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
    10.1 --- a/xen/arch/ia64/vtlb.c	Fri Aug 19 20:45:43 2005 -0800
    10.2 +++ b/xen/arch/ia64/vtlb.c	Fri Aug 19 21:19:39 2005 -0800
    10.3 @@ -283,7 +283,7 @@ int __tlb_to_vhpt(thash_cb_t *hcb,
    10.4              thash_data_t *vhpt)
    10.5  {
    10.6      u64 pages,mfn;
    10.7 -    rr_t vrr;
    10.8 +    ia64_rr vrr;
    10.9  
   10.10      ASSERT ( hcb->ht == THASH_VHPT );
   10.11      vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
   10.12 @@ -361,7 +361,7 @@ void vtlb_insert(thash_cb_t *hcb, thash_
   10.13  {
   10.14      thash_data_t    *hash_table, *cch;
   10.15      int flag;
   10.16 -    rr_t  vrr;
   10.17 +    ia64_rr vrr;
   10.18      u64 gppn;
   10.19      u64 ppns, ppne;
   10.20      
   10.21 @@ -397,7 +397,7 @@ void vtlb_insert(thash_cb_t *hcb, thash_
   10.22  static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
   10.23  {
   10.24      thash_data_t    *hash_table, *cch;
   10.25 -    rr_t  vrr;
   10.26 +    ia64_rr vrr;
   10.27      
   10.28      hash_table = (hcb->hash_func)(hcb->pta,
   10.29                          va, entry->rid, entry->ps);
   10.30 @@ -425,7 +425,7 @@ static void vhpt_insert(thash_cb_t *hcb,
   10.31  void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
   10.32  {
   10.33      thash_data_t    *hash_table;
   10.34 -    rr_t  vrr;
   10.35 +    ia64_rr vrr;
   10.36      
   10.37      vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
   10.38      if ( entry->ps != vrr.ps && entry->tc ) {
   10.39 @@ -556,7 +556,7 @@ static thash_data_t *vtlb_find_overlap(t
   10.40      thash_data_t    *hash_table;
   10.41      thash_internal_t *priv = &hcb->priv;
   10.42      u64     tag;
   10.43 -    rr_t    vrr;
   10.44 +    ia64_rr vrr;
   10.45  
   10.46      priv->_curva = va & ~(size-1);
   10.47      priv->_eva = priv->_curva + size;
   10.48 @@ -580,7 +580,7 @@ static thash_data_t *vhpt_find_overlap(t
   10.49      thash_data_t    *hash_table;
   10.50      thash_internal_t *priv = &hcb->priv;
   10.51      u64     tag;
   10.52 -    rr_t    vrr;
   10.53 +    ia64_rr vrr;
   10.54  
   10.55      priv->_curva = va & ~(size-1);
   10.56      priv->_eva = priv->_curva + size;
   10.57 @@ -633,7 +633,7 @@ static thash_data_t *vtlb_next_overlap(t
   10.58      thash_data_t    *ovl;
   10.59      thash_internal_t *priv = &hcb->priv;
   10.60      u64 addr,rr_psize;
   10.61 -    rr_t  vrr;
   10.62 +    ia64_rr vrr;
   10.63  
   10.64      if ( priv->s_sect.tr ) {
   10.65          ovl = vtr_find_next_overlap (hcb);
   10.66 @@ -665,7 +665,7 @@ static thash_data_t *vhpt_next_overlap(t
   10.67      thash_data_t    *ovl;
   10.68      thash_internal_t *priv = &hcb->priv;
   10.69      u64 addr,rr_psize;
   10.70 -    rr_t  vrr;
   10.71 +    ia64_rr vrr;
   10.72  
   10.73      vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
   10.74      rr_psize = PSIZE(vrr.ps);
   10.75 @@ -800,7 +800,7 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t 
   10.76  {
   10.77      thash_data_t    *hash_table, *cch;
   10.78      u64     tag;
   10.79 -    rr_t    vrr;
   10.80 +    ia64_rr vrr;
   10.81     
   10.82      ASSERT ( hcb->ht == THASH_VTLB );
   10.83      
    11.1 --- a/xen/arch/ia64/xenmem.c	Fri Aug 19 20:45:43 2005 -0800
    11.2 +++ b/xen/arch/ia64/xenmem.c	Fri Aug 19 21:19:39 2005 -0800
    11.3 @@ -30,8 +30,8 @@ static unsigned long num_dma_physpages;
    11.4   */
    11.5  #ifdef CONFIG_VTI
    11.6  unsigned long *mpt_table;
    11.7 -unsigned long *mpt_table_size;
    11.8 -#endif
    11.9 +unsigned long mpt_table_size;
   11.10 +#endif // CONFIG_VTI
   11.11  
   11.12  void
   11.13  paging_init (void)
   11.14 @@ -53,21 +53,6 @@ paging_init (void)
   11.15  
   11.16  	printk("machine to physical table: 0x%lx\n", (u64)mpt_table);
   11.17  	memset(mpt_table, INVALID_M2P_ENTRY, mpt_table_size);
   11.18 -
   11.19 -	/* Any more setup here? On VMX enabled platform,
   11.20 -	 * there's no need to keep guest linear pg table,
   11.21 -	 * and read only mpt table. MAP cache is not used
   11.22 -	 * in this stage, and later it will be in region 5.
   11.23 -	 * IO remap is in region 6 with identity mapping.
   11.24 -	 */
   11.25 -	/* HV_tlb_init(); */
   11.26 -
   11.27 -#else // CONFIG_VTI
   11.28 -
   11.29 -	/* Allocate and map the machine-to-phys table */
   11.30 -	if ((pg = alloc_domheap_pages(NULL, 10, 0)) == NULL)
   11.31 -		panic("Not enough memory to bootstrap Xen.\n");
   11.32 -	memset(page_to_virt(pg), 0x55, 16UL << 20);
   11.33  #endif // CONFIG_VTI
   11.34  
   11.35  	/* Other mapping setup */
    12.1 --- a/xen/arch/ia64/xensetup.c	Fri Aug 19 20:45:43 2005 -0800
    12.2 +++ b/xen/arch/ia64/xensetup.c	Fri Aug 19 21:19:39 2005 -0800
    12.3 @@ -181,11 +181,6 @@ void start_kernel(void)
    12.4      printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n",
    12.5  	    xen_pstart, xenheap_phys_end);
    12.6  
    12.7 -#ifdef CONFIG_VTI
    12.8 -    /* If we want to enable vhpt for all regions, related initialization
    12.9 -     * for HV TLB must be done earlier before first TLB miss
   12.10 -     */
   12.11 -#endif // CONFIG_VTI
   12.12      /* Find next hole */
   12.13      firsthole_start = 0;
   12.14      efi_memmap_walk(xen_find_first_hole, &firsthole_start);
    13.1 --- a/xen/include/asm-ia64/domain.h	Fri Aug 19 20:45:43 2005 -0800
    13.2 +++ b/xen/include/asm-ia64/domain.h	Fri Aug 19 21:19:39 2005 -0800
    13.3 @@ -3,39 +3,28 @@
    13.4  
    13.5  #include <linux/thread_info.h>
    13.6  #include <asm/tlb.h>
    13.7 -#ifdef CONFIG_VTI
    13.8  #include <asm/vmx_vpd.h>
    13.9  #include <asm/vmmu.h>
   13.10  #include <asm/regionreg.h>
   13.11  #include <public/arch-ia64.h>
   13.12  #include <asm/vmx_platform.h>
   13.13 -#endif // CONFIG_VTI
   13.14  #include <xen/list.h>
   13.15  
   13.16  extern void arch_do_createdomain(struct vcpu *);
   13.17  
   13.18  extern void domain_relinquish_resources(struct domain *);
   13.19  
   13.20 -#ifdef CONFIG_VTI
   13.21 -struct trap_bounce {
   13.22 -	// TO add, FIXME Eddie
   13.23 -};
   13.24 -
   13.25 -#define	 PMT_SIZE	(32L*1024*1024)		// 32M for PMT
   13.26 -#endif // CONFIG_VTI
   13.27 -
   13.28  struct arch_domain {
   13.29      struct mm_struct *active_mm;
   13.30      struct mm_struct *mm;
   13.31      int metaphysical_rr0;
   13.32 +    int metaphysical_rr4;
   13.33      int starting_rid;		/* first RID assigned to domain */
   13.34      int ending_rid;		/* one beyond highest RID assigned to domain */
   13.35      int rid_bits;		/* number of virtual rid bits (default: 18) */
   13.36      int breakimm;
   13.37 -#ifdef  CONFIG_VTI
   13.38 +
   13.39      int imp_va_msb;
   13.40 -    ia64_rr emul_phy_rr0;
   13.41 -    ia64_rr emul_phy_rr4;
   13.42      unsigned long *pmt;	/* physical to machine table */
   13.43      /*
   13.44       * max_pfn is the maximum page frame in guest physical space, including
   13.45 @@ -44,7 +33,7 @@ struct arch_domain {
   13.46       */
   13.47      unsigned long max_pfn;
   13.48      struct virutal_platform_def     vmx_platform;
   13.49 -#endif  //CONFIG_VTI
   13.50 +
   13.51      u64 xen_vastart;
   13.52      u64 xen_vaend;
   13.53      u64 shared_info_va;
   13.54 @@ -78,15 +67,15 @@ struct arch_vcpu {
   13.55  #endif
   13.56      void *regs;	/* temporary until find a better way to do privops */
   13.57      int metaphysical_rr0;		// from arch_domain (so is pinned)
   13.58 +    int metaphysical_rr4;		// from arch_domain (so is pinned)
   13.59      int metaphysical_saved_rr0;		// from arch_domain (so is pinned)
   13.60 +    int metaphysical_saved_rr4;		// from arch_domain (so is pinned)
   13.61      int breakimm;			// from arch_domain (so is pinned)
   13.62      int starting_rid;		/* first RID assigned to domain */
   13.63      int ending_rid;		/* one beyond highest RID assigned to domain */
   13.64      struct mm_struct *active_mm;
   13.65      struct thread_struct _thread;	// this must be last
   13.66 -#ifdef CONFIG_VTI
   13.67 -    void (*schedule_tail) (struct vcpu *);
   13.68 -    struct trap_bounce trap_bounce;
   13.69 +
   13.70      thash_cb_t *vtlb;
   13.71      char irq_new_pending;
   13.72      char irq_new_condition;    // vpsr.i/vtpr change, check for pending VHPI
   13.73 @@ -94,9 +83,7 @@ struct arch_vcpu {
   13.74      //for phycial  emulation
   13.75      unsigned long old_rsc;
   13.76      int mode_flags;
   13.77 -
   13.78      struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
   13.79 -#endif	// CONFIG_VTI
   13.80  };
   13.81  
   13.82  #define active_mm arch.active_mm
    14.1 --- a/xen/include/asm-ia64/linux-xen/asm/pal.h	Fri Aug 19 20:45:43 2005 -0800
    14.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pal.h	Fri Aug 19 21:19:39 2005 -0800
    14.3 @@ -1559,9 +1559,7 @@ ia64_pal_prefetch_visibility (s64 trans_
    14.4  	return iprv.status;
    14.5  }
    14.6  
    14.7 -#ifdef CONFIG_VTI
    14.8  #include <asm/vmx_pal.h>
    14.9 -#endif // CONFIG_VTI
   14.10  #endif /* __ASSEMBLY__ */
   14.11  
   14.12  #endif /* _ASM_IA64_PAL_H */
    15.1 --- a/xen/include/asm-ia64/mmu_context.h	Fri Aug 19 20:45:43 2005 -0800
    15.2 +++ b/xen/include/asm-ia64/mmu_context.h	Fri Aug 19 21:19:39 2005 -0800
    15.3 @@ -2,11 +2,7 @@
    15.4  #define __ASM_MMU_CONTEXT_H
    15.5  //dummy file to resolve non-arch-indep include
    15.6  #ifdef XEN
    15.7 -#ifndef CONFIG_VTI
    15.8  #define IA64_REGION_ID_KERNEL 0
    15.9 -#else // CONFIG_VTI
   15.10 -#define IA64_REGION_ID_KERNEL 0x1e0000	/* Start from all 1 in highest 4 bits */
   15.11 -#endif // CONFIG_VTI
   15.12  #define ia64_rid(ctx,addr)	(((ctx) << 3) | (addr >> 61))
   15.13  
   15.14  #ifndef __ASSEMBLY__
    16.1 --- a/xen/include/asm-ia64/privop.h	Fri Aug 19 20:45:43 2005 -0800
    16.2 +++ b/xen/include/asm-ia64/privop.h	Fri Aug 19 21:19:39 2005 -0800
    16.3 @@ -133,7 +133,6 @@ typedef union U_INST64_M46 {
    16.4      struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6, x3:3, un1:1, major:4; };
    16.5  } INST64_M46;
    16.6  
    16.7 -#ifdef CONFIG_VTI
    16.8  typedef union U_INST64_M47 {
    16.9      IA64_INST inst;
   16.10      struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
   16.11 @@ -169,8 +168,6 @@ typedef union U_INST64_M6 {
   16.12      struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2, x6:6, m:1, major:4; };
   16.13  } INST64_M6;
   16.14  
   16.15 -#endif // CONFIG_VTI
   16.16 -
   16.17  typedef union U_INST64 {
   16.18      IA64_INST inst;
   16.19      struct { unsigned long :37, major:4; } generic;
   16.20 @@ -182,14 +179,12 @@ typedef union U_INST64 {
   16.21      INST64_I26 I26;	// mov register to ar (I unit)
   16.22      INST64_I27 I27;	// mov immediate to ar (I unit)
   16.23      INST64_I28 I28;	// mov from ar (I unit)
   16.24 -#ifdef CONFIG_VTI
   16.25 -    INST64_M1  M1;  // ld integer
   16.26 +    INST64_M1  M1;	// ld integer
   16.27      INST64_M2  M2;
   16.28      INST64_M3  M3;
   16.29 -    INST64_M4  M4;  // st integer
   16.30 +    INST64_M4  M4;	// st integer
   16.31      INST64_M5  M5;
   16.32 -    INST64_M6  M6;  // ldfd floating pointer
   16.33 -#endif // CONFIG_VTI
   16.34 +    INST64_M6  M6;	// ldfd floating pointer
   16.35      INST64_M28 M28;	// purge translation cache entry
   16.36      INST64_M29 M29;	// mov register to ar (M unit)
   16.37      INST64_M30 M30;	// mov immediate to ar (M unit)
   16.38 @@ -204,9 +199,7 @@ typedef union U_INST64 {
   16.39      INST64_M44 M44;	// set/reset system mask
   16.40      INST64_M45 M45;	// translation purge
   16.41      INST64_M46 M46;	// translation access (tpa,tak)
   16.42 -#ifdef CONFIG_VTI
   16.43      INST64_M47 M47;	// purge translation entry
   16.44 -#endif // CONFIG_VTI
   16.45  } INST64;
   16.46  
   16.47  #define MASK_41 ((UINT64)0x1ffffffffff)
    17.1 --- a/xen/include/asm-ia64/regionreg.h	Fri Aug 19 20:45:43 2005 -0800
    17.2 +++ b/xen/include/asm-ia64/regionreg.h	Fri Aug 19 21:19:39 2005 -0800
    17.3 @@ -1,12 +1,6 @@
    17.4  #ifndef		_REGIONREG_H_
    17.5  #define		_REGIONREG_H_
    17.6 -#ifdef  CONFIG_VTI
    17.7 -#define XEN_DEFAULT_RID     0xf00000
    17.8 -#define DOMAIN_RID_SHIFT    20
    17.9 -#define DOMAIN_RID_MASK     (~(1U<<DOMAIN_RID_SHIFT -1))
   17.10 -#else //CONFIG_VTI
   17.11  #define XEN_DEFAULT_RID		7
   17.12 -#endif // CONFIG_VTI
   17.13  #define	IA64_MIN_IMPL_RID_MSB	17
   17.14  #define _REGION_ID(x)   ({ia64_rr _v; _v.rrval = (long) (x); _v.rid;})
   17.15  #define _REGION_PAGE_SIZE(x)    ({ia64_rr _v; _v.rrval = (long) (x); _v.ps;})
   17.16 @@ -42,4 +36,32 @@ typedef union ia64_rr {
   17.17  
   17.18  int set_one_rr(unsigned long rr, unsigned long val);
   17.19  
   17.20 +// This function is purely for performance... apparently scrambling
   17.21 +//  bits in the region id makes for better hashing, which means better
   17.22 +//  use of the VHPT, which means better performance
   17.23 +// Note that the only time a RID should be mangled is when it is stored in
   17.24 +//  a region register; anytime it is "viewable" outside of this module,
   17.25 +//  it should be unmangled
   17.26 +
   17.27 +// NOTE: this function is also implemented in assembly code in hyper_set_rr!!
   17.28 +// Must ensure these two remain consistent!
   17.29 +static inline unsigned long
   17.30 +vmMangleRID(unsigned long RIDVal)
   17.31 +{
   17.32 +	union bits64 { unsigned char bytes[4]; unsigned long uint; };
   17.33 +
   17.34 +	union bits64 t;
   17.35 +	unsigned char tmp;
   17.36 +
   17.37 +	t.uint = RIDVal;
   17.38 +	tmp = t.bytes[1];
   17.39 +	t.bytes[1] = t.bytes[3];
   17.40 +	t.bytes[3] = tmp;
   17.41 +
   17.42 +	return t.uint;
   17.43 +}
   17.44 +
   17.45 +// since vmMangleRID is symmetric, use it for unmangling also
   17.46 +#define vmUnmangleRID(x)	vmMangleRID(x)
   17.47 +
   17.48  #endif		/* !_REGIONREG_H_ */
    18.1 --- a/xen/include/asm-ia64/tlb.h	Fri Aug 19 20:45:43 2005 -0800
    18.2 +++ b/xen/include/asm-ia64/tlb.h	Fri Aug 19 21:19:39 2005 -0800
    18.3 @@ -35,17 +35,4 @@ typedef struct {
    18.4      unsigned long rid;
    18.5  } TR_ENTRY;
    18.6  
    18.7 -#ifdef CONFIG_VTI
    18.8 -typedef union {
    18.9 -        unsigned long   value;
   18.10 -        struct {
   18.11 -                unsigned long ve : 1;
   18.12 -                unsigned long rv1 : 1;
   18.13 -                unsigned long ps  : 6;
   18.14 -                unsigned long rid : 24;
   18.15 -                unsigned long rv2 : 32;
   18.16 -        };
   18.17 -} rr_t;
   18.18 -#endif // CONFIG_VTI
   18.19 -
   18.20  #endif
    19.1 --- a/xen/include/asm-ia64/vmmu.h	Fri Aug 19 20:45:43 2005 -0800
    19.2 +++ b/xen/include/asm-ia64/vmmu.h	Fri Aug 19 21:19:39 2005 -0800
    19.3 @@ -23,10 +23,11 @@
    19.4  #ifndef XEN_TLBthash_H
    19.5  #define XEN_TLBthash_H
    19.6  
    19.7 -#include "xen/config.h"
    19.8 -#include "xen/types.h"
    19.9 -#include "public/xen.h"
   19.10 -#include "asm/tlb.h"
   19.11 +#include <xen/config.h>
   19.12 +#include <xen/types.h>
   19.13 +#include <public/xen.h>
   19.14 +#include <asm/tlb.h>
   19.15 +#include <asm/regionreg.h>
   19.16  
   19.17  //#define         THASH_TLB_TR            0
   19.18  //#define         THASH_TLB_TC            1
   19.19 @@ -152,7 +153,7 @@ typedef u64 *(TTAG_FN)(PTA pta, u64 va, 
   19.20  typedef u64 *(GET_MFN_FN)(domid_t d, u64 gpfn, u64 pages);
   19.21  typedef void *(REM_NOTIFIER_FN)(struct hash_cb *hcb, thash_data_t *entry);
   19.22  typedef void (RECYCLE_FN)(struct hash_cb *hc, u64 para);
   19.23 -typedef rr_t (GET_RR_FN)(struct vcpu *vcpu, u64 reg);
   19.24 +typedef ia64_rr (GET_RR_FN)(struct vcpu *vcpu, u64 reg);
   19.25  typedef thash_data_t *(FIND_OVERLAP_FN)(struct thash_cb *hcb, 
   19.26          u64 va, u64 ps, int rid, char cl, search_section_t s_sect);
   19.27  typedef thash_data_t *(FIND_NEXT_OVL_FN)(struct thash_cb *hcb);
   19.28 @@ -329,7 +330,7 @@ extern u64 machine_ttag(PTA pta, u64 va,
   19.29  extern u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps);
   19.30  extern void purge_machine_tc_by_domid(domid_t domid);
   19.31  extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
   19.32 -extern rr_t vmmu_get_rr(struct vcpu *vcpu, u64 va);
   19.33 +extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va);
   19.34  extern thash_cb_t *init_domain_tlb(struct vcpu *d);
   19.35  
   19.36  #define   VTLB_DEBUG
    20.1 --- a/xen/include/asm-ia64/vmx.h	Fri Aug 19 20:45:43 2005 -0800
    20.2 +++ b/xen/include/asm-ia64/vmx.h	Fri Aug 19 21:19:39 2005 -0800
    20.3 @@ -32,9 +32,12 @@ extern void vmx_final_setup_domain(struc
    20.4  extern void vmx_init_double_mapping_stub(void);
    20.5  extern void vmx_save_state(struct vcpu *v);
    20.6  extern void vmx_load_state(struct vcpu *v);
    20.7 +extern void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c);
    20.8 +#ifdef XEN_DBL_MAPPING
    20.9  extern vmx_insert_double_mapping(u64,u64,u64,u64,u64);
   20.10  extern void vmx_purge_double_mapping(u64, u64, u64);
   20.11  extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
   20.12 +#endif
   20.13  
   20.14  extern void vmx_wait_io(void);
   20.15  extern void vmx_io_assist(struct vcpu *v);
    21.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Fri Aug 19 20:45:43 2005 -0800
    21.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Fri Aug 19 21:19:39 2005 -0800
    21.3 @@ -308,7 +308,9 @@ vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
    21.4      
    21.5      vtm=&(vcpu->arch.arch_vmx.vtm);
    21.6      VPD_CR(vcpu,itm)=val;
    21.7 +#ifdef CONFIG_VTI
    21.8      vtm_interruption_update(vcpu, vtm);
    21.9 +#endif
   21.10      return IA64_NO_FAULT;
   21.11  }
   21.12  static inline
   21.13 @@ -414,7 +416,9 @@ static inline
   21.14  IA64FAULT
   21.15  vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
   21.16  {
   21.17 +#ifdef CONFIG_VTI
   21.18      guest_write_eoi(vcpu);
   21.19 +#endif
   21.20      return IA64_NO_FAULT;
   21.21  }
   21.22  
   21.23 @@ -424,7 +428,9 @@ vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
   21.24  {
   21.25  
   21.26      VPD_CR(vcpu,itv)=val;
   21.27 +#ifdef CONFIG_VTI
   21.28      vtm_set_itv(vcpu);
   21.29 +#endif
   21.30      return IA64_NO_FAULT;
   21.31  }
   21.32  static inline
   21.33 @@ -465,13 +471,17 @@ vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
   21.34  static inline
   21.35  IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
   21.36  {
   21.37 +#ifdef CONFIG_VTI
   21.38      vtm_set_itc(vcpu, val);
   21.39 +#endif
   21.40      return  IA64_NO_FAULT;
   21.41  }
   21.42  static inline
   21.43  IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
   21.44  {
   21.45 +#ifdef CONFIG_VTI
   21.46      *val = vtm_get_itc(vcpu);
   21.47 +#endif
   21.48      return  IA64_NO_FAULT;
   21.49  }
   21.50  static inline
   21.51 @@ -584,15 +594,22 @@ IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
   21.52      return (IA64_NO_FAULT);
   21.53  }
   21.54  
   21.55 +/* Another hash performance algorithm */
   21.56  #define redistribute_rid(rid)	(((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
   21.57  static inline unsigned long
   21.58 -vmx_vrrtomrr(VCPU *vcpu,unsigned long val)
   21.59 +vmx_vrrtomrr(VCPU *v, unsigned long val)
   21.60  {
   21.61      ia64_rr rr;
   21.62      u64	  rid;
   21.63 +
   21.64      rr.rrval=val;
   21.65 +    rr.rid = vmMangleRID(v->arch.starting_rid  + rr.rid);
   21.66 +/* Disable this rid allocation algorithm for now */
   21.67 +#if 0
   21.68      rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
   21.69      rr.rid = redistribute_rid(rid);
   21.70 +#endif 
   21.71 +
   21.72      rr.ve=1;
   21.73      return rr.rrval;
   21.74  }
    22.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Fri Aug 19 20:45:43 2005 -0800
    22.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Fri Aug 19 21:19:39 2005 -0800
    22.3 @@ -62,12 +62,6 @@ typedef struct {
    22.4  	unsigned long	rsv6[46];
    22.5  } cr_t;
    22.6  
    22.7 -void vmx_enter_scheduler(void);
    22.8 -
    22.9 -//FIXME: Map for LID to vcpu, Eddie
   22.10 -#define	MAX_NUM_LPS		(1UL<<16)
   22.11 -extern struct vcpu	*lid_edt[MAX_NUM_LPS];
   22.12 -
   22.13  struct arch_vmx_struct {
   22.14  //    struct virutal_platform_def     vmx_platform;
   22.15  	vpd_t       *vpd;
    23.1 --- a/xen/include/asm-ia64/xenprocessor.h	Fri Aug 19 20:45:43 2005 -0800
    23.2 +++ b/xen/include/asm-ia64/xenprocessor.h	Fri Aug 19 21:19:39 2005 -0800
    23.3 @@ -50,16 +50,11 @@ struct ia64_psr {
    23.4  	__u64 ri : 2;
    23.5  	__u64 ed : 1;
    23.6  	__u64 bn : 1;
    23.7 -#ifdef CONFIG_VTI
    23.8  	__u64 ia : 1;
    23.9  	__u64 vm : 1;
   23.10  	__u64 reserved5 : 17;
   23.11 -#else // CONFIG_VTI
   23.12 -	__u64 reserved4 : 19;
   23.13 -#endif // CONFIG_VTI
   23.14  };
   23.15  
   23.16 -#ifdef  CONFIG_VTI
   23.17  /* vmx like above but expressed as bitfields for more efficient access: */
   23.18  typedef  union{
   23.19      __u64 val;
   23.20 @@ -218,6 +213,4 @@ enum {
   23.21          ret;                            \
   23.22  })
   23.23  
   23.24 -#endif  //  CONFIG_VTI
   23.25 -
   23.26  #endif // _ASM_IA64_XENPROCESSOR_H