direct-io.hg

changeset 5508:85fab828d6ff

bitkeeper revision 1.1718.1.4 (42b59a8dca4vxae_1vlE_W95_lDsNg)

Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@firebug.cl.cam.ac.uk
date Sun Jun 19 16:17:17 2005 +0000 (2005-06-19)
parents 182cb4aae17d f494f01b62a8
children dd6990776fcc
files .rootkeys xen/arch/ia64/Makefile xen/arch/ia64/asm-offsets.c xen/arch/ia64/dom0_ops.c xen/arch/ia64/domain.c xen/arch/ia64/hypercall.c xen/arch/ia64/hyperprivop.S xen/arch/ia64/ivt.S xen/arch/ia64/patch/linux-2.6.11/io.h xen/arch/ia64/patch/linux-2.6.11/ptrace.h xen/arch/ia64/patch/linux-2.6.11/uaccess.h xen/arch/ia64/privop.c xen/arch/ia64/process.c xen/arch/ia64/regionreg.c xen/arch/ia64/tools/mkbuildtree xen/arch/ia64/vcpu.c xen/arch/ia64/vhpt.c xen/arch/ia64/vmmu.c xen/arch/ia64/vmx_ivt.S xen/arch/ia64/vmx_minstate.h xen/arch/ia64/vmx_process.c xen/arch/ia64/vtlb.c xen/arch/ia64/xenmem.c xen/arch/ia64/xenmisc.c xen/common/Makefile xen/include/asm-ia64/config.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/event.h xen/include/asm-ia64/mm.h xen/include/asm-ia64/tlb.h xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/vhpt.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx_platform.h xen/include/asm-ia64/vmx_ptrace.h xen/include/asm-ia64/vmx_vpd.h xen/include/asm-x86/event.h xen/include/public/arch-ia64.h xen/include/public/arch-x86_32.h xen/include/public/arch-x86_64.h xen/include/public/xen.h xen/include/xen/event.h
line diff
     1.1 --- a/.rootkeys	Sun Jun 19 08:48:29 2005 +0000
     1.2 +++ b/.rootkeys	Sun Jun 19 16:17:17 2005 +0000
     1.3 @@ -1146,6 +1146,7 @@ 425ae516p4ICTkjqNYEfYFxqULj4dw xen/arch/
     1.4  425ae516juUB257qrwUdsL9AsswrqQ xen/arch/ia64/patch/linux-2.6.11/time.c
     1.5  425ae5167zQn7zYcgKtDUDX2v-e8mw xen/arch/ia64/patch/linux-2.6.11/tlb.c
     1.6  425ae5162bIl2Dgd19x-FceB4L9oGw xen/arch/ia64/patch/linux-2.6.11/types.h
     1.7 +42ae01f01KDfSgVQnscwJ0psRmEaCw xen/arch/ia64/patch/linux-2.6.11/uaccess.h
     1.8  425ae516cFUNY2jHD46bujcF5NJheA xen/arch/ia64/patch/linux-2.6.11/unaligned.c
     1.9  421098b39QFMC-1t1r38CA7NxAYBPA xen/arch/ia64/patch/linux-2.6.7/bootmem.h
    1.10  421098b3SIA1vZX9fFUjo1T3o_jMCQ xen/arch/ia64/patch/linux-2.6.7/current.h
    1.11 @@ -1364,6 +1365,7 @@ 421098b6Y3xqcv873Gvg1rQ5CChfFw xen/inclu
    1.12  421098b6ZcIrn_gdqjUtdJyCE0YkZQ xen/include/asm-ia64/debugger.h
    1.13  421098b6z0zSuW1rcSJK1gR8RUi-fw xen/include/asm-ia64/dom_fw.h
    1.14  421098b6Nn0I7hGB8Mkd1Cis0KMkhA xen/include/asm-ia64/domain.h
    1.15 +42b1d2d0rkNCmG2nFOnL-OfhJG9mDw xen/include/asm-ia64/event.h
    1.16  4241e880hAyo_dk0PPDYj3LsMIvf-Q xen/include/asm-ia64/flushtlb.h
    1.17  421098b6X3Fs2yht42TE2ufgKqt2Fw xen/include/asm-ia64/ia64_int.h
    1.18  421098b7psFAn8kbeR-vcRCdc860Vw xen/include/asm-ia64/init.h
    1.19 @@ -1388,7 +1390,6 @@ 428b9f387tov0OtOEeF8fVWSR2v5Pg xen/inclu
    1.20  428b9f38is0zTsIm96_BKo4MLw0SzQ xen/include/asm-ia64/vmx_pal_vsa.h
    1.21  428b9f38iDqbugHUheJrcTCD7zlb4g xen/include/asm-ia64/vmx_phy_mode.h
    1.22  428b9f38grd_B0AGB1yp0Gi2befHaQ xen/include/asm-ia64/vmx_platform.h
    1.23 -428b9f38lm0ntDBusHggeQXkx1-1HQ xen/include/asm-ia64/vmx_ptrace.h
    1.24  428b9f38XgwHchZEpOzRtWfz0agFNQ xen/include/asm-ia64/vmx_vcpu.h
    1.25  428b9f38tDTTJbkoONcAB9ODP8CiVg xen/include/asm-ia64/vmx_vpd.h
    1.26  428b9f38_o0U5uJqmxZf_bqi6_PqVw xen/include/asm-ia64/vtm.h
    1.27 @@ -1412,6 +1413,7 @@ 3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen/inclu
    1.28  40715b2dTokMLYGSuD58BnxOqyWVew xen/include/asm-x86/div64.h
    1.29  4204e7acwzqgXyTAPKa1nM-L7Ec0Qw xen/include/asm-x86/domain.h
    1.30  41d3eaaeIBzW621S1oa0c2yk7X43qQ xen/include/asm-x86/e820.h
    1.31 +42b1d2caFkOByU5n4LuMnT05f3kJFg xen/include/asm-x86/event.h
    1.32  3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen/include/asm-x86/fixmap.h
    1.33  3e2d29944GI24gf7vOP_7x8EyuqxeA xen/include/asm-x86/flushtlb.h
    1.34  4294b5eep4lWuDtYUR74gYwt-_FnHA xen/include/asm-x86/genapic.h
     2.1 --- a/xen/arch/ia64/Makefile	Sun Jun 19 08:48:29 2005 +0000
     2.2 +++ b/xen/arch/ia64/Makefile	Sun Jun 19 16:17:17 2005 +0000
     2.3 @@ -15,7 +15,7 @@ OBJS = xensetup.o setup.o time.o irq.o i
     2.4  ifeq ($(CONFIG_VTI),y)
     2.5  OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
     2.6  	vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
     2.7 -	vtlb.o mmio.o vlsapic.o
     2.8 +	vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o
     2.9  endif
    2.10  # perfmon.o
    2.11  # unwind.o needed for kernel unwinding (rare)
     3.1 --- a/xen/arch/ia64/asm-offsets.c	Sun Jun 19 08:48:29 2005 +0000
     3.2 +++ b/xen/arch/ia64/asm-offsets.c	Sun Jun 19 16:17:17 2005 +0000
     3.3 @@ -75,6 +75,9 @@ void foo(void)
     3.4  	DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
     3.5  	DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
     3.6  	DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
     3.7 +	DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
     3.8 +	DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
     3.9 +	DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
    3.10  
    3.11  	BLANK();
    3.12  
     4.1 --- a/xen/arch/ia64/dom0_ops.c	Sun Jun 19 08:48:29 2005 +0000
     4.2 +++ b/xen/arch/ia64/dom0_ops.c	Sun Jun 19 16:17:17 2005 +0000
     4.3 @@ -18,14 +18,6 @@
     4.4  #include <xen/console.h>
     4.5  #include <public/sched_ctl.h>
     4.6  
     4.7 -#define TRC_DOM0OP_ENTER_BASE  0x00020000
     4.8 -#define TRC_DOM0OP_LEAVE_BASE  0x00030000
     4.9 -
    4.10 -static int msr_cpu_mask;
    4.11 -static unsigned long msr_addr;
    4.12 -static unsigned long msr_lo;
    4.13 -static unsigned long msr_hi;
    4.14 -
    4.15  long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
    4.16  {
    4.17      long ret = 0;
    4.18 @@ -35,6 +27,49 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    4.19  
    4.20      switch ( op->cmd )
    4.21      {
    4.22 +    /*
    4.23 +     * NOTE: DOM0_GETMEMLIST has somewhat different semantics on IA64 -
    4.24 +     * it actually allocates and maps pages.
    4.25 +     */
    4.26 +    case DOM0_GETMEMLIST:
    4.27 +    {
    4.28 +        unsigned long i;
    4.29 +        struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
    4.30 +        unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
    4.31 +        unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
    4.32 +        unsigned long pfn;
    4.33 +        unsigned long *buffer = op->u.getmemlist.buffer;
    4.34 +        struct page *page;
    4.35 +
    4.36 +        ret = -EINVAL;
    4.37 +        if ( d != NULL )
    4.38 +        {
    4.39 +            ret = 0;
    4.40 +
    4.41 +            for ( i = start_page; i < (start_page + nr_pages); i++ )
    4.42 +            {
    4.43 +                page = map_new_domain_page(d, i << PAGE_SHIFT);
    4.44 +                if ( page == NULL )
    4.45 +                {
    4.46 +                    ret = -ENOMEM;
    4.47 +                    break;
    4.48 +                }
    4.49 +                pfn = page_to_pfn(page);
    4.50 +                if ( put_user(pfn, buffer) )
    4.51 +                {
    4.52 +                    ret = -EFAULT;
    4.53 +                    break;
    4.54 +                }
    4.55 +                buffer++;
    4.56 +            }
    4.57 +
    4.58 +            op->u.getmemlist.num_pfns = i - start_page;
    4.59 +            copy_to_user(u_dom0_op, op, sizeof(*op));
    4.60 +            
    4.61 +            put_domain(d);
    4.62 +        }
    4.63 +    }
    4.64 +    break;
    4.65  
    4.66      default:
    4.67          ret = -ENOSYS;
    4.68 @@ -43,10 +78,3 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    4.69  
    4.70      return ret;
    4.71  }
    4.72 -
    4.73 -void arch_getdomaininfo_ctxt(struct domain *d, struct vcpu_guest_context *c)
    4.74 -{ 
    4.75 -    int i;
    4.76 -
    4.77 -	dummy();
    4.78 -}
     5.1 --- a/xen/arch/ia64/domain.c	Sun Jun 19 08:48:29 2005 +0000
     5.2 +++ b/xen/arch/ia64/domain.c	Sun Jun 19 16:17:17 2005 +0000
     5.3 @@ -76,7 +76,7 @@ extern unsigned long dom_fw_setup(struct
     5.4  /* this belongs in include/asm, but there doesn't seem to be a suitable place */
     5.5  void free_perdomain_pt(struct domain *d)
     5.6  {
     5.7 -	dummy();
     5.8 +	printf("free_perdomain_pt: not implemented\n");
     5.9  	//free_page((unsigned long)d->mm.perdomain_pt);
    5.10  }
    5.11  
    5.12 @@ -166,27 +166,49 @@ void arch_free_vcpu_struct(struct vcpu *
    5.13  	free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
    5.14  }
    5.15  
    5.16 +static void init_switch_stack(struct vcpu *v)
    5.17 +{
    5.18 +	struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
    5.19 +	struct switch_stack *sw = (struct switch_stack *) regs - 1;
    5.20 +	extern void ia64_ret_from_clone;
    5.21 +
    5.22 +	memset(sw, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs));
    5.23 +	sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
    5.24 +	sw->b0 = (unsigned long) &ia64_ret_from_clone;
    5.25 +	sw->ar_fpsr = FPSR_DEFAULT;
    5.26 +	v->arch._thread.ksp = (unsigned long) sw - 16;
    5.27 +	// stay on kernel stack because may get interrupts!
    5.28 +	// ia64_ret_from_clone (which b0 gets in new_thread) switches
    5.29 +	// to user stack
    5.30 +	v->arch._thread.on_ustack = 0;
    5.31 +	memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
    5.32 +}
    5.33 +
    5.34  #ifdef CONFIG_VTI
    5.35  void arch_do_createdomain(struct vcpu *v)
    5.36  {
    5.37  	struct domain *d = v->domain;
    5.38  	struct thread_info *ti = alloc_thread_info(v);
    5.39  
    5.40 -	/* If domain is VMX domain, shared info area is created
    5.41 -	 * by domain and then domain notifies HV by specific hypercall.
    5.42 -	 * If domain is xenolinux, shared info area is created by
    5.43 -	 * HV.
    5.44 -	 * Since we have no idea about whether domain is VMX now,
    5.45 -	 * (dom0 when parse and domN when build), postpone possible
    5.46 -	 * allocation.
    5.47 -	 */
    5.48 +	/* Clear thread_info to clear some important fields, like preempt_count */
    5.49 +	memset(ti, 0, sizeof(struct thread_info));
    5.50 +	init_switch_stack(v);
    5.51 +
    5.52 + 	/* Shared info area is required to be allocated at domain
    5.53 + 	 * creation, since control panel will write some I/O info
    5.54 + 	 * between front end and back end to that area. However for
    5.55 + 	 * vmx domain, our design is to let domain itself to allcoate
    5.56 + 	 * shared info area, to keep machine page contiguous. So this
    5.57 + 	 * page will be released later when domainN issues request
    5.58 + 	 * after up.
    5.59 + 	 */
    5.60 + 	d->shared_info = (void *)alloc_xenheap_page();
    5.61  
    5.62  	/* FIXME: Because full virtual cpu info is placed in this area,
    5.63  	 * it's unlikely to put it into one shareinfo page. Later
    5.64  	 * need split vcpu context from vcpu_info and conforms to
    5.65  	 * normal xen convention.
    5.66  	 */
    5.67 -	d->shared_info = NULL;
    5.68  	v->vcpu_info = (void *)alloc_xenheap_page();
    5.69  	if (!v->vcpu_info) {
    5.70     		printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
    5.71 @@ -194,9 +216,6 @@ void arch_do_createdomain(struct vcpu *v
    5.72  	}
    5.73  	memset(v->vcpu_info, 0, PAGE_SIZE);
    5.74  
    5.75 -	/* Clear thread_info to clear some important fields, like preempt_count */
    5.76 -	memset(ti, 0, sizeof(struct thread_info));
    5.77 -
    5.78  	/* Allocate per-domain vTLB and vhpt */
    5.79  	v->arch.vtlb = init_domain_tlb(v);
    5.80  
    5.81 @@ -211,38 +230,25 @@ void arch_do_createdomain(struct vcpu *v
    5.82  	d->xen_vastart = 0xf000000000000000;
    5.83  	d->xen_vaend = 0xf300000000000000;
    5.84  	d->arch.breakimm = 0x1000;
    5.85 -
    5.86 -	// stay on kernel stack because may get interrupts!
    5.87 -	// ia64_ret_from_clone (which b0 gets in new_thread) switches
    5.88 -	// to user stack
    5.89 -	v->arch._thread.on_ustack = 0;
    5.90  }
    5.91  #else // CONFIG_VTI
    5.92  void arch_do_createdomain(struct vcpu *v)
    5.93  {
    5.94  	struct domain *d = v->domain;
    5.95 +	struct thread_info *ti = alloc_thread_info(v);
    5.96 +
    5.97 +	/* Clear thread_info to clear some important fields, like preempt_count */
    5.98 +	memset(ti, 0, sizeof(struct thread_info));
    5.99 +	init_switch_stack(v);
   5.100  
   5.101  	d->shared_info = (void *)alloc_xenheap_page();
   5.102 -	v->vcpu_info = (void *)alloc_xenheap_page();
   5.103 -	if (!v->vcpu_info) {
   5.104 +	if (!d->shared_info) {
   5.105     		printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
   5.106     		while (1);
   5.107  	}
   5.108 -	memset(v->vcpu_info, 0, PAGE_SIZE);
   5.109 -	/* pin mapping */
   5.110 -	// FIXME: Does this belong here?  Or do only at domain switch time?
   5.111 -#if 0
   5.112 -	// this is now done in ia64_new_rr7
   5.113 -	{
   5.114 -		/* WARNING: following must be inlined to avoid nested fault */
   5.115 -		unsigned long psr = ia64_clear_ic();
   5.116 -		ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
   5.117 -		 pte_val(pfn_pte(ia64_tpa(d->shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
   5.118 -		 PAGE_SHIFT);
   5.119 -		ia64_set_psr(psr);
   5.120 -		ia64_srlz_i();
   5.121 -	}
   5.122 -#endif
   5.123 +	memset(d->shared_info, 0, PAGE_SIZE);
   5.124 +	v->vcpu_info = &(d->shared_info->vcpu_data[0]);
   5.125 +
   5.126  	d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
   5.127  	if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
   5.128  		BUG();
   5.129 @@ -258,33 +264,63 @@ void arch_do_createdomain(struct vcpu *v
   5.130  	d->shared_info_va = 0xf100000000000000;
   5.131  	d->arch.breakimm = 0x1000;
   5.132  	v->arch.breakimm = d->arch.breakimm;
   5.133 -	// stay on kernel stack because may get interrupts!
   5.134 -	// ia64_ret_from_clone (which b0 gets in new_thread) switches
   5.135 -	// to user stack
   5.136 -	v->arch._thread.on_ustack = 0;
   5.137 +
   5.138 +	d->arch.mm = xmalloc(struct mm_struct);
   5.139 +	if (unlikely(!d->arch.mm)) {
   5.140 +		printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
   5.141 +		return -ENOMEM;
   5.142 +	}
   5.143 +	memset(d->arch.mm, 0, sizeof(*d->arch.mm));
   5.144 +	d->arch.mm->pgd = pgd_alloc(d->arch.mm);
   5.145 +	if (unlikely(!d->arch.mm->pgd)) {
   5.146 +		printk("Can't allocate pgd for domain %d\n",d->domain_id);
   5.147 +		return -ENOMEM;
   5.148 +	}
   5.149  }
   5.150  #endif // CONFIG_VTI
   5.151  
   5.152 -void arch_do_boot_vcpu(struct vcpu *v)
   5.153 +void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
   5.154  {
   5.155 -	return;
   5.156 +	struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
   5.157 +
   5.158 +	printf("arch_getdomaininfo_ctxt\n");
   5.159 +	c->regs = *regs;
   5.160 +	c->vcpu = v->vcpu_info->arch;
   5.161 +	c->shared = v->domain->shared_info->arch;
   5.162  }
   5.163  
   5.164  int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
   5.165  {
   5.166 -	dummy();
   5.167 -	return 1;
   5.168 +	struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
   5.169 +
   5.170 +	printf("arch_set_info_guest\n");
   5.171 +	*regs = c->regs;
   5.172 +	regs->cr_ipsr = IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IC|IA64_PSR_I|IA64_PSR_DFH|IA64_PSR_BN|IA64_PSR_SP|IA64_PSR_DI;
   5.173 +	regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
   5.174 +	regs->ar_rsc |= (2 << 2); /* force PL2/3 */
   5.175 +
   5.176 +	v->vcpu_info->arch = c->vcpu;
   5.177 +	init_all_rr(v);
   5.178 +
   5.179 +	// this should be in userspace
   5.180 +	regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=ttyS console=ttyS0",256L);  //FIXME
   5.181 +	v->vcpu_info->arch.banknum = 1;
   5.182 +	v->vcpu_info->arch.metaphysical_mode = 1;
   5.183 +
   5.184 +	v->domain->shared_info->arch = c->shared;
   5.185 +	return 0;
   5.186  }
   5.187  
   5.188 -int arch_final_setup_guest(struct vcpu *v, struct vcpu_guest_context *c)
   5.189 +void arch_do_boot_vcpu(struct vcpu *v)
   5.190  {
   5.191 -	dummy();
   5.192 -	return 1;
   5.193 +	printf("arch_do_boot_vcpu: not implemented\n");
   5.194 +	return;
   5.195  }
   5.196  
   5.197  void domain_relinquish_resources(struct domain *d)
   5.198  {
   5.199 -	dummy();
   5.200 +	/* FIXME */
   5.201 +	printf("domain_relinquish_resources: not implemented\n");
   5.202  }
   5.203  
   5.204  #ifdef CONFIG_VTI
   5.205 @@ -294,10 +330,8 @@ void new_thread(struct vcpu *v,
   5.206                  unsigned long start_info)
   5.207  {
   5.208  	struct domain *d = v->domain;
   5.209 -	struct switch_stack *sw;
   5.210  	struct xen_regs *regs;
   5.211  	struct ia64_boot_param *bp;
   5.212 -	extern char ia64_ret_from_clone;
   5.213  	extern char saved_command_line[];
   5.214  	//char *dom0_cmdline = "BOOT_IMAGE=scsi0:\EFI\redhat\xenlinux nomca root=/dev/sdb1 ro";
   5.215  
   5.216 @@ -305,11 +339,8 @@ void new_thread(struct vcpu *v,
   5.217  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
   5.218  	if (d == dom0) start_pc += dom0_start;
   5.219  #endif
   5.220 -	regs = (struct xen_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
   5.221 -	sw = (struct switch_stack *) regs - 1;
   5.222 -	/* Sanity Clear */
   5.223 -	memset(sw, 0, sizeof(struct xen_regs) + sizeof(struct switch_stack));
   5.224  
   5.225 +	regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
   5.226  	if (VMX_DOMAIN(v)) {
   5.227  		/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
   5.228  		regs->cr_ipsr = 0x501008826008; /* Need to be expanded as macro */
   5.229 @@ -320,33 +351,23 @@ void new_thread(struct vcpu *v,
   5.230  		regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
   5.231  	}
   5.232  	regs->cr_iip = start_pc;
   5.233 -	regs->ar_rsc = 0x0;
   5.234 -	regs->cr_ifs = 0x0;
   5.235 -	regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
   5.236 -	sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
   5.237 -	printf("new_thread: v=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
   5.238 -		v,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,&regs->r8);
   5.239 -	printf("iip:0x%lx,ipsr:0x%lx\n", regs->cr_iip, regs->cr_ipsr);
   5.240 -
   5.241 -	sw->b0 = (unsigned long) &ia64_ret_from_clone;
   5.242 -	v->arch._thread.ksp = (unsigned long) sw - 16;
   5.243 -	printk("new_thread, about to call init_all_rr\n");
   5.244 +	regs->cr_ifs = 0; /* why? - matthewc */
   5.245 +	regs->ar_fpsr = FPSR_DEFAULT;
   5.246  	if (VMX_DOMAIN(v)) {
   5.247  		vmx_init_all_rr(v);
   5.248  	} else
   5.249  		init_all_rr(v);
   5.250 -	// set up boot parameters (and fake firmware)
   5.251 -	printk("new_thread, about to call dom_fw_setup\n");
   5.252 -	VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);  //FIXME
   5.253 -	printk("new_thread, done with dom_fw_setup\n");
   5.254  
   5.255  	if (VMX_DOMAIN(v)) {
   5.256 +		VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
   5.257  		/* Virtual processor context setup */
   5.258  		VMX_VPD(v, vpsr) = IA64_PSR_BN;
   5.259  		VPD_CR(v, dcr) = 0;
   5.260  	} else {
   5.261 -		// don't forget to set this!
   5.262 +		regs->r28 = dom_fw_setup(d,saved_command_line,256L);
   5.263  		v->vcpu_info->arch.banknum = 1;
   5.264 +		v->vcpu_info->arch.metaphysical_mode = 1;
   5.265 +		d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
   5.266  	}
   5.267  }
   5.268  #else // CONFIG_VTI
   5.269 @@ -359,54 +380,27 @@ void new_thread(struct vcpu *v,
   5.270  	            unsigned long start_info)
   5.271  {
   5.272  	struct domain *d = v->domain;
   5.273 -	struct switch_stack *sw;
   5.274  	struct pt_regs *regs;
   5.275 -	unsigned long new_rbs;
   5.276  	struct ia64_boot_param *bp;
   5.277 -	extern char ia64_ret_from_clone;
   5.278  	extern char saved_command_line[];
   5.279  
   5.280  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
   5.281  	if (d == dom0) start_pc += dom0_start;
   5.282  #endif
   5.283 +
   5.284  	regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
   5.285 -	sw = (struct switch_stack *) regs - 1;
   5.286 -	memset(sw,0,sizeof(struct switch_stack)+sizeof(struct pt_regs));
   5.287 -	new_rbs = (unsigned long) v + IA64_RBS_OFFSET;
   5.288  	regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
   5.289  		| IA64_PSR_BITS_TO_SET | IA64_PSR_BN
   5.290  		& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
   5.291  	regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
   5.292  	regs->cr_iip = start_pc;
   5.293 -	regs->ar_rsc = 0;		/* lazy mode */
   5.294 -	regs->ar_rnat = 0;
   5.295 -	regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
   5.296 -	regs->loadrs = 0;
   5.297 -	//regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */
   5.298 -	//regs->r8 = 0x01234567890abcdef; // FIXME: temp marker
   5.299 -	//regs->r12 = ((unsigned long) regs - 16);	/* 16 byte scratch */
   5.300  	regs->cr_ifs = 1UL << 63;
   5.301 -	regs->pr = 0;
   5.302 -	sw->pr = 0;
   5.303 -	regs->ar_pfs = 0;
   5.304 -	sw->caller_unat = 0;
   5.305 -	sw->ar_pfs = 0;
   5.306 -	sw->ar_bspstore = new_rbs;
   5.307 -	//regs->r13 = (unsigned long) v;
   5.308 -printf("new_thread: v=%p, start_pc=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
   5.309 -v,start_pc,regs,sw,new_rbs,IA64_STK_OFFSET,&regs->r8);
   5.310 -	sw->b0 = (unsigned long) &ia64_ret_from_clone;
   5.311 -	v->arch._thread.ksp = (unsigned long) sw - 16;
   5.312 -	//v->thread_info->flags = 0;
   5.313 -printk("new_thread, about to call init_all_rr\n");
   5.314 +	regs->ar_fpsr = FPSR_DEFAULT;
   5.315  	init_all_rr(v);
   5.316 -	// set up boot parameters (and fake firmware)
   5.317 -printk("new_thread, about to call dom_fw_setup\n");
   5.318  	regs->r28 = dom_fw_setup(d,saved_command_line,256L);  //FIXME
   5.319 -printk("new_thread, done with dom_fw_setup\n");
   5.320 -	// don't forget to set this!
   5.321  	v->vcpu_info->arch.banknum = 1;
   5.322 -	memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
   5.323 +	v->vcpu_info->arch.metaphysical_mode = 1;
   5.324 +	d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
   5.325  }
   5.326  #endif // CONFIG_VTI
   5.327  
   5.328 @@ -1037,21 +1031,6 @@ int construct_dom0(struct domain *d,
   5.329  	strcpy(d->name,"Domain0");
   5.330  #endif
   5.331  
   5.332 -	// prepare domain0 pagetable (maps METAphysical to physical)
   5.333 -	// following is roughly mm_init() in linux/kernel/fork.c
   5.334 -	d->arch.mm = xmalloc(struct mm_struct);
   5.335 -	if (unlikely(!d->arch.mm)) {
   5.336 -	    	printk("Can't allocate mm_struct for domain0\n");
   5.337 -	    	return -ENOMEM;
   5.338 -	}
   5.339 -	memset(d->arch.mm, 0, sizeof(*d->arch.mm));
   5.340 -	d->arch.mm->pgd = pgd_alloc(d->arch.mm);
   5.341 -	if (unlikely(!d->arch.mm->pgd)) {
   5.342 -	    	printk("Can't allocate pgd for domain0\n");
   5.343 -	    	return -ENOMEM;
   5.344 -	}
   5.345 -
   5.346 -
   5.347  	/* Mask all upcalls... */
   5.348  	for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   5.349  	    d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   5.350 @@ -1146,19 +1125,6 @@ int construct_domU(struct domain *d,
   5.351  	printk("parsedomainelfimage returns %d\n",rc);
   5.352  	if ( rc != 0 ) return rc;
   5.353  
   5.354 -	d->arch.mm = xmalloc(struct mm_struct);
   5.355 -	if (unlikely(!d->arch.mm)) {
   5.356 -	    	printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
   5.357 -	    	return -ENOMEM;
   5.358 -	}
   5.359 -	memset(d->arch.mm, 0, sizeof(*d->arch.mm));
   5.360 -	d->arch.mm->pgd = pgd_alloc(d->arch.mm);
   5.361 -	if (unlikely(!d->arch.mm->pgd)) {
   5.362 -	    	printk("Can't allocate pgd for domain %d\n",d->domain_id);
   5.363 -	    	return -ENOMEM;
   5.364 -	}
   5.365 -
   5.366 -
   5.367  	/* Mask all upcalls... */
   5.368  	for ( i = 0; i < MAX_VIRT_CPUS; i++ )
   5.369  		d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
   5.370 @@ -1231,10 +1197,10 @@ void machine_halt(void)
   5.371  	while(1);
   5.372  }
   5.373  
   5.374 -void dummy(void)
   5.375 +void dummy_called(char *function)
   5.376  {
   5.377  	if (platform_is_hp_ski()) asm("break 0;;");
   5.378 -	printf("dummy called: spinning....\n");
   5.379 +	printf("dummy called in %s: spinning....\n", function);
   5.380  	while(1);
   5.381  }
   5.382  
     6.1 --- a/xen/arch/ia64/hypercall.c	Sun Jun 19 08:48:29 2005 +0000
     6.2 +++ b/xen/arch/ia64/hypercall.c	Sun Jun 19 16:17:17 2005 +0000
     6.3 @@ -19,8 +19,6 @@ extern unsigned long translate_domain_mp
     6.4  extern struct ia64_sal_retval pal_emulator_static(UINT64);
     6.5  extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
     6.6  
     6.7 -void fooefi(void) {}
     6.8 -
     6.9  int
    6.10  ia64_hypercall (struct pt_regs *regs)
    6.11  {
    6.12 @@ -122,6 +120,31 @@ ia64_hypercall (struct pt_regs *regs)
    6.13  	    case 0xfffb: // test dummy hypercall
    6.14  		regs->r8 = domU_staging_read_8(vcpu_get_gr(v,32));
    6.15  		break;
    6.16 +
    6.17 +	    case __HYPERVISOR_dom0_op:
    6.18 +		regs->r8 = do_dom0_op(regs->r14);
    6.19 +		break;
    6.20 +
    6.21 +	    case __HYPERVISOR_dom_mem_op:
    6.22 +#ifdef CONFIG_VTI
    6.23 +		regs->r8 = do_dom_mem_op(regs->r14, regs->r15, regs->r16, regs->r17, regs->r18); 
    6.24 +#else
    6.25 +		/* we don't handle reservations; just return success */
    6.26 +		regs->r8 = regs->r16;
    6.27 +#endif
    6.28 +		break;
    6.29 +
    6.30 +	    case __HYPERVISOR_event_channel_op:
    6.31 +		regs->r8 = do_event_channel_op(regs->r14);
    6.32 +		break;
    6.33 +
    6.34 +	    case __HYPERVISOR_console_io:
    6.35 +		regs->r8 = do_console_io(regs->r14, regs->r15, regs->r16);
    6.36 +		break;
    6.37 +
    6.38 +	    default:
    6.39 +		printf("unknown hypercall %x\n", regs->r2);
    6.40 +		regs->r8 = (unsigned long)-1;
    6.41  	}
    6.42  	return 1;
    6.43  }
     7.1 --- a/xen/arch/ia64/hyperprivop.S	Sun Jun 19 08:48:29 2005 +0000
     7.2 +++ b/xen/arch/ia64/hyperprivop.S	Sun Jun 19 16:17:17 2005 +0000
     7.3 @@ -41,40 +41,46 @@
     7.4  //	r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
     7.5  //	r31 == pr
     7.6  GLOBAL_ENTRY(fast_hyperprivop)
     7.7 -#if 1
     7.8  	// HYPERPRIVOP_SSM_I?
     7.9  	// assumes domain interrupts pending, so just do it
    7.10  	cmp.eq p7,p6=XEN_HYPER_SSM_I,r17
    7.11  (p7)	br.sptk.many hyper_ssm_i;;
    7.12 -#endif
    7.13 -#if 1
    7.14 -	// if domain interrupts pending, give up for now and do it the slow way
    7.15 +
    7.16 +	// FIXME. This algorithm gives up (goes to the slow path) if there
    7.17 +	// are ANY interrupts pending, even if they are currently
    7.18 +	// undeliverable.  This should be improved later...
    7.19  	adds r20=XSI_PEND_OFS-XSI_PSR_IC_OFS,r18 ;;
    7.20 -	ld8 r20=[r20] ;;
    7.21 -	cmp.ne p7,p0=r0,r20
    7.22 -(p7)	br.sptk.many dispatch_break_fault ;;
    7.23 +	ld4 r20=[r20] ;;
    7.24 +	cmp.eq p7,p0=r0,r20
    7.25 +(p7)	br.cond.sptk.many 1f
    7.26 +	mov r20=IA64_KR(CURRENT);;
    7.27 +	adds r21=IA64_VCPU_IRR0_OFFSET,r20;
    7.28 +	adds r22=IA64_VCPU_IRR0_OFFSET+8,r20;;
    7.29 +	ld8 r23=[r21],16; ld8 r24=[r22],16;;
    7.30 +	ld8 r21=[r21]; ld8 r22=[r22];;
    7.31 +	or r23=r23,r24; or r21=r21,r22;;
    7.32 +	or r20=r23,r21;;
    7.33 +1:	// when we get to here r20=~=interrupts pending
    7.34  
    7.35  	// HYPERPRIVOP_RFI?
    7.36  	cmp.eq p7,p6=XEN_HYPER_RFI,r17
    7.37  (p7)	br.sptk.many hyper_rfi;;
    7.38  
    7.39 +	cmp.ne p7,p0=r20,r0
    7.40 +(p7)	br.spnt.many dispatch_break_fault ;;
    7.41 +
    7.42  // hard to test, because only called from rbs_switch
    7.43  	// HYPERPRIVOP_COVER?
    7.44  	cmp.eq p7,p6=XEN_HYPER_COVER,r17
    7.45  (p7)	br.sptk.many hyper_cover;;
    7.46 -#endif
    7.47  
    7.48 -#if 1
    7.49  	// HYPERPRIVOP_SSM_DT?
    7.50  	cmp.eq p7,p6=XEN_HYPER_SSM_DT,r17
    7.51  (p7)	br.sptk.many hyper_ssm_dt;;
    7.52 -#endif
    7.53  
    7.54 -#if 1
    7.55  	// HYPERPRIVOP_RSM_DT?
    7.56  	cmp.eq p7,p6=XEN_HYPER_RSM_DT,r17
    7.57  (p7)	br.sptk.many hyper_rsm_dt;;
    7.58 -#endif
    7.59  
    7.60  	// if not one of the above, give up for now and do it the slow way
    7.61  	br.sptk.many dispatch_break_fault ;;
    7.62 @@ -336,12 +342,16 @@ GLOBAL_ENTRY(fast_break_reflect)
    7.63  
    7.64  // ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
    7.65  ENTRY(hyper_rfi)
    7.66 -#ifdef FAST_HYPERPRIVOP_CNT
    7.67 -	movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RFI);;
    7.68 -	ld8 r21=[r20];;
    7.69 -	adds r21=1,r21;;
    7.70 -	st8 [r20]=r21;;
    7.71 -#endif
    7.72 +	// if no interrupts pending, proceed
    7.73 +	cmp.eq p7,p0=r20,r0
    7.74 +(p7)	br.sptk.many 1f
    7.75 +	// interrupts pending, if rfi'ing to interrupts on, go slow way
    7.76 +	adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
    7.77 +	ld8 r21=[r20];;		// r21 = vcr.ipsr
    7.78 +	extr.u r22=r21,IA64_PSR_I_BIT,1 ;;
    7.79 +	cmp.ne p7,p0=r22,r0 ;;
    7.80 +(p7)	br.spnt.many dispatch_break_fault ;;
    7.81 +1:
    7.82  	adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
    7.83  	ld8 r21=[r20];;		// r21 = vcr.ipsr
    7.84  	extr.u r22=r21,IA64_PSR_BE_BIT,1 ;;
    7.85 @@ -375,7 +385,13 @@ ENTRY(hyper_rfi)
    7.86  (p7)	br.sptk.many dispatch_break_fault ;;
    7.87  
    7.88  	// OK now, let's do an rfi.
    7.89 -	// r18=&vpsr.i|vpsr.ic, r21==vpsr, r20==&vcr.iip, r22=vcr.iip
    7.90 +#ifdef FAST_HYPERPRIVOP_CNT
    7.91 +	movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RFI);;
    7.92 +	ld8 r23=[r20];;
    7.93 +	adds r23=1,r23;;
    7.94 +	st8 [r20]=r23;;
    7.95 +#endif
    7.96 +	// r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
    7.97  	mov cr.iip=r22;;
    7.98  	adds r20=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
    7.99  	st4 [r20]=r0 ;;
     8.1 --- a/xen/arch/ia64/ivt.S	Sun Jun 19 08:48:29 2005 +0000
     8.2 +++ b/xen/arch/ia64/ivt.S	Sun Jun 19 16:17:17 2005 +0000
     8.3 @@ -348,12 +348,23 @@ ENTRY(alt_itlb_miss)
     8.4  //	;;
     8.5  //#endif
     8.6  #endif
     8.7 +#ifdef XEN
     8.8 +	mov r31=pr
     8.9 +	mov r16=cr.ifa		// get address that caused the TLB miss
    8.10 +	;;
    8.11 +late_alt_itlb_miss:
    8.12 +	movl r17=PAGE_KERNEL
    8.13 +	mov r21=cr.ipsr
    8.14 +	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
    8.15 +	;;
    8.16 +#else
    8.17  	mov r16=cr.ifa		// get address that caused the TLB miss
    8.18  	movl r17=PAGE_KERNEL
    8.19  	mov r21=cr.ipsr
    8.20  	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
    8.21  	mov r31=pr
    8.22  	;;
    8.23 +#endif
    8.24  #ifdef CONFIG_DISABLE_VHPT
    8.25  	shr.u r22=r16,61			// get the region number into r21
    8.26  	;;
    8.27 @@ -367,9 +378,15 @@ ENTRY(alt_itlb_miss)
    8.28  #endif
    8.29  	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
    8.30  	and r19=r19,r16		// clear ed, reserved bits, and PTE control bits
    8.31 +#ifdef XEN
    8.32 +	shr.u r18=r16,55	// move address bit 59 to bit 4
    8.33 +	;;
    8.34 +	and r18=0x10,r18	// bit 4=address-bit(59)
    8.35 +#else
    8.36  	shr.u r18=r16,57	// move address bit 61 to bit 4
    8.37  	;;
    8.38  	andcm r18=0x10,r18	// bit 4=~address-bit(61)
    8.39 +#endif
    8.40  	cmp.ne p8,p0=r0,r23	// psr.cpl != 0?
    8.41  	or r19=r17,r19		// insert PTE control bits into r19
    8.42  	;;
    8.43 @@ -393,13 +410,18 @@ ENTRY(alt_dtlb_miss)
    8.44  //	;;
    8.45  //#endif
    8.46  #endif
    8.47 +#ifdef XEN
    8.48 +	mov r31=pr
    8.49  	mov r16=cr.ifa		// get address that caused the TLB miss
    8.50 +	;;
    8.51 +late_alt_dtlb_miss:
    8.52  	movl r17=PAGE_KERNEL
    8.53  	mov r20=cr.isr
    8.54  	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
    8.55  	mov r21=cr.ipsr
    8.56 -	mov r31=pr
    8.57  	;;
    8.58 +#else
    8.59 +#endif
    8.60  #ifdef CONFIG_DISABLE_VHPT
    8.61  	shr.u r22=r16,61			// get the region number into r21
    8.62  	;;
    8.63 @@ -414,24 +436,33 @@ ENTRY(alt_dtlb_miss)
    8.64  	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
    8.65  	and r22=IA64_ISR_CODE_MASK,r20		// get the isr.code field
    8.66  	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT	// is speculation bit on?
    8.67 +#ifdef XEN
    8.68 +	shr.u r18=r16,55			// move address bit 59 to bit 4
    8.69 +	and r19=r19,r16				// clear ed, reserved bits, and PTE control bits
    8.70 +	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
    8.71 +	;;
    8.72 +	and r18=0x10,r18	// bit 4=address-bit(59)
    8.73 +#else
    8.74  	shr.u r18=r16,57			// move address bit 61 to bit 4
    8.75  	and r19=r19,r16				// clear ed, reserved bits, and PTE control bits
    8.76  	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
    8.77  	;;
    8.78  	andcm r18=0x10,r18	// bit 4=~address-bit(61)
    8.79 +#endif
    8.80  	cmp.ne p8,p0=r0,r23
    8.81  (p9)	cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
    8.82  (p8)	br.cond.spnt page_fault
    8.83  #ifdef XEN
    8.84  	;;
    8.85 -	// FIXME: inadequate test, this is where we test for Xen address
    8.86 -	// note that 0xf000 (cached) and 0xd000 (uncached) addresses
    8.87 -	// should be OK.  (Though no I/O is done in Xen, EFI needs uncached
    8.88 -	// addresses and some domain EFI calls are passed through)
    8.89 -	tbit.nz p0,p8=r16,60
    8.90 -(p8)	br.cond.spnt page_fault
    8.91 -//(p8)	br.cond.spnt 0
    8.92 -	;;
    8.93 +	// Test for Xen address, if not handle via page_fault
    8.94 +	// note that 0xf000 (cached) and 0xe800 (uncached) addresses
    8.95 +	// should be OK.
    8.96 +	extr.u r22=r16,59,5;;
    8.97 +	cmp.eq p8,p0=0x1e,r22
    8.98 +(p8)	br.cond.spnt 1f;;
    8.99 +	cmp.ne p8,p0=0x1d,r22
   8.100 +(p8)	br.cond.sptk page_fault ;;
   8.101 +1:
   8.102  #endif
   8.103  
   8.104  	dep r21=-1,r21,IA64_PSR_ED_BIT,1
     9.1 --- a/xen/arch/ia64/patch/linux-2.6.11/io.h	Sun Jun 19 08:48:29 2005 +0000
     9.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/io.h	Sun Jun 19 16:17:17 2005 +0000
     9.3 @@ -5,7 +5,7 @@
     9.4   #define SLOW_DOWN_IO	do { } while (0)
     9.5   
     9.6  +#ifdef XEN
     9.7 -+#define __IA64_UNCACHED_OFFSET	0xd000000000000000UL	/* region 6 */
     9.8 ++#define __IA64_UNCACHED_OFFSET	0xe800000000000000UL
     9.9  +#else
    9.10   #define __IA64_UNCACHED_OFFSET	0xc000000000000000UL	/* region 6 */
    9.11  +#endif
    10.1 --- a/xen/arch/ia64/patch/linux-2.6.11/ptrace.h	Sun Jun 19 08:48:29 2005 +0000
    10.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/ptrace.h	Sun Jun 19 16:17:17 2005 +0000
    10.3 @@ -4,9 +4,9 @@
    10.4    * (because the memory stack pointer MUST ALWAYS be aligned this way)
    10.5    *
    10.6    */
    10.7 -+#ifdef CONFIG_VTI
    10.8 -+#include "vmx_ptrace.h"
    10.9 -+#else  //CONFIG_VTI
   10.10 ++#ifdef XEN
   10.11 ++#include <public/arch-ia64.h>
   10.12 ++#else
   10.13   struct pt_regs {
   10.14   	/* The following registers are saved by SAVE_MIN: */
   10.15   	unsigned long b6;		/* scratch */
   10.16 @@ -14,7 +14,7 @@
   10.17   	struct ia64_fpreg f10;		/* scratch */
   10.18   	struct ia64_fpreg f11;		/* scratch */
   10.19   };
   10.20 -+#endif // CONFIG_VTI
   10.21 ++#endif
   10.22   
   10.23   /*
   10.24    * This structure contains the addition registers that need to
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/uaccess.h	Sun Jun 19 16:17:17 2005 +0000
    11.3 @@ -0,0 +1,22 @@
    11.4 +--- ../../linux-2.6.11/include/asm-ia64/uaccess.h	2005-06-06 10:36:23.000000000 -0600
    11.5 ++++ include/asm-ia64/uaccess.h	2005-06-10 18:08:06.000000000 -0600
    11.6 +@@ -60,6 +60,11 @@
    11.7 +  * address TASK_SIZE is never valid.  We also need to make sure that the address doesn't
    11.8 +  * point inside the virtually mapped linear page table.
    11.9 +  */
   11.10 ++#ifdef XEN
   11.11 ++/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
   11.12 ++#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
   11.13 ++#define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned long)(addr)))
   11.14 ++#else
   11.15 + #define __access_ok(addr, size, segment)						\
   11.16 + ({											\
   11.17 + 	__chk_user_ptr(addr);								\
   11.18 +@@ -67,6 +72,7 @@
   11.19 + 	 && ((segment).seg == KERNEL_DS.seg						\
   11.20 + 	     || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT)));	\
   11.21 + })
   11.22 ++#endif
   11.23 + #define access_ok(type, addr, size)	__access_ok((addr), (size), get_fs())
   11.24 + 
   11.25 + static inline int
    12.1 --- a/xen/arch/ia64/privop.c	Sun Jun 19 08:48:29 2005 +0000
    12.2 +++ b/xen/arch/ia64/privop.c	Sun Jun 19 16:17:17 2005 +0000
    12.3 @@ -748,10 +748,22 @@ priv_emulate(VCPU *vcpu, REGS *regs, UIN
    12.4  #define HYPERPRIVOP_ITC_D		0x5
    12.5  #define HYPERPRIVOP_ITC_I		0x6
    12.6  #define HYPERPRIVOP_SSM_I		0x7
    12.7 -#define HYPERPRIVOP_MAX			0x7
    12.8 +#define HYPERPRIVOP_GET_IVR		0x8
    12.9 +#define HYPERPRIVOP_GET_TPR		0x9
   12.10 +#define HYPERPRIVOP_SET_TPR		0xa
   12.11 +#define HYPERPRIVOP_EOI			0xb
   12.12 +#define HYPERPRIVOP_SET_ITM		0xc
   12.13 +#define HYPERPRIVOP_THASH		0xd
   12.14 +#define HYPERPRIVOP_PTC_GA		0xe
   12.15 +#define HYPERPRIVOP_ITR_D		0xf
   12.16 +#define HYPERPRIVOP_GET_RR		0x10
   12.17 +#define HYPERPRIVOP_SET_RR		0x11
   12.18 +#define HYPERPRIVOP_MAX			0x11
   12.19  
   12.20  char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
   12.21  	0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
   12.22 +	"=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
   12.23 +	"=rr", "rr=",
   12.24  	0
   12.25  };
   12.26  
   12.27 @@ -766,6 +778,7 @@ ia64_hyperprivop(unsigned long iim, REGS
   12.28  	struct vcpu *v = (struct domain *) current;
   12.29  	INST64 inst;
   12.30  	UINT64 val;
   12.31 +	UINT64 itir, ifa;
   12.32  
   12.33  // FIXME: Handle faults appropriately for these
   12.34  	if (!iim || iim > HYPERPRIVOP_MAX) {
   12.35 @@ -797,6 +810,44 @@ ia64_hyperprivop(unsigned long iim, REGS
   12.36  	    case HYPERPRIVOP_SSM_I:
   12.37  		(void)vcpu_set_psr_i(v);
   12.38  		return 1;
   12.39 +	    case HYPERPRIVOP_GET_IVR:
   12.40 +		(void)vcpu_get_ivr(v,&val);
   12.41 +		regs->r8 = val;
   12.42 +		return 1;
   12.43 +	    case HYPERPRIVOP_GET_TPR:
   12.44 +		(void)vcpu_get_tpr(v,&val);
   12.45 +		regs->r8 = val;
   12.46 +		return 1;
   12.47 +	    case HYPERPRIVOP_SET_TPR:
   12.48 +		(void)vcpu_set_tpr(v,regs->r8);
   12.49 +		return 1;
   12.50 +	    case HYPERPRIVOP_EOI:
   12.51 +		(void)vcpu_set_eoi(v,0L);
   12.52 +		return 1;
   12.53 +	    case HYPERPRIVOP_SET_ITM:
   12.54 +		(void)vcpu_set_itm(v,regs->r8);
   12.55 +		return 1;
   12.56 +	    case HYPERPRIVOP_THASH:
   12.57 +		(void)vcpu_thash(v,regs->r8,&val);
   12.58 +		regs->r8 = val;
   12.59 +		return 1;
   12.60 +	    case HYPERPRIVOP_PTC_GA:
   12.61 +		// FIXME: this doesn't seem to work yet, turned off
   12.62 +		//(void)vcpu_ptc_ga(v,regs->r8,regs->r9);
   12.63 +		//return 1;
   12.64 +		break;
   12.65 +	    case HYPERPRIVOP_ITR_D:
   12.66 +		(void)vcpu_get_itir(v,&itir);
   12.67 +		(void)vcpu_get_ifa(v,&ifa);
   12.68 +		(void)vcpu_itr_d(v,regs->r8,regs->r9,itir,ifa);
   12.69 +		return 1;
   12.70 +	    case HYPERPRIVOP_GET_RR:
   12.71 +		(void)vcpu_get_rr(v,regs->r8,&val);
   12.72 +		regs->r8 = val;
   12.73 +		return 1;
   12.74 +	    case HYPERPRIVOP_SET_RR:
   12.75 +		(void)vcpu_set_rr(v,regs->r8,regs->r9);
   12.76 +		return 1;
   12.77  	}
   12.78  	return 0;
   12.79  }
    13.1 --- a/xen/arch/ia64/process.c	Sun Jun 19 08:48:29 2005 +0000
    13.2 +++ b/xen/arch/ia64/process.c	Sun Jun 19 16:17:17 2005 +0000
    13.3 @@ -313,45 +313,31 @@ void xen_handle_domain_access(unsigned l
    13.4  	}
    13.5  if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
    13.6  		
    13.7 +	if (trp = match_tr(current,address)) {
    13.8 +		// FIXME address had better be pre-validated on insert
    13.9 +		pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
   13.10 +		vcpu_itc_no_srlz(current,6,address,pteval,-1UL,(trp->itir>>2)&0x3f);
   13.11 +		return;
   13.12 +	}
   13.13  	// if we are fortunate enough to have it in the 1-entry TLB...
   13.14  	if (pteval = match_dtlb(ed,address,&ps,NULL)) {
   13.15  		vcpu_itc_no_srlz(ed,6,address,pteval,-1UL,ps);
   13.16  		return;
   13.17  	}
   13.18 -	// look in the TRs
   13.19 -	fault = vcpu_tpa(ed,address,&mpaddr);
   13.20 -	if (fault != IA64_NO_FAULT) {
   13.21 -		static int uacnt = 0;
   13.22 -		// can't translate it, just fail (poor man's exception)
   13.23 -		// which results in retrying execution
   13.24 -//printk("*** xen_handle_domain_access: poor man's exception cnt=%i iip=%p, addr=%p...\n",uacnt++,iip,address);
   13.25 -		if (ia64_done_with_exception(regs)) {
   13.26 +	if (ia64_done_with_exception(regs)) {
   13.27  //if (!(uacnt++ & 0x3ff)) printk("*** xen_handle_domain_access: successfully handled cnt=%d iip=%p, addr=%p...\n",uacnt,iip,address);
   13.28  			return;
   13.29 -		}
   13.30 -		else {
   13.31 -			// should never happen.  If it does, region 0 addr may
   13.32 -			// indicate a bad xen pointer
   13.33 -			printk("*** xen_handle_domain_access: exception table"
   13.34 -                               " lookup failed, iip=%p, addr=%p, spinning...\n",
   13.35 -				iip,address);
   13.36 -			panic_domain(regs,"*** xen_handle_domain_access: exception table"
   13.37 -                               " lookup failed, iip=%p, addr=%p, spinning...\n",
   13.38 -				iip,address);
   13.39 -		}
   13.40  	}
   13.41 -	if (d == dom0) {
   13.42 -		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   13.43 -			printk("xen_handle_domain_access: vcpu_tpa returned out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
   13.44 -			tdpfoo();
   13.45 -		}
   13.46 +	else {
   13.47 +		// should never happen.  If it does, region 0 addr may
   13.48 +		// indicate a bad xen pointer
   13.49 +		printk("*** xen_handle_domain_access: exception table"
   13.50 +                       " lookup failed, iip=%p, addr=%p, spinning...\n",
   13.51 +			iip,address);
   13.52 +		panic_domain(regs,"*** xen_handle_domain_access: exception table"
   13.53 +                       " lookup failed, iip=%p, addr=%p, spinning...\n",
   13.54 +			iip,address);
   13.55  	}
   13.56 -//printk("*** xen_handle_domain_access: tpa resolved miss @%p...\n",address);
   13.57 -	pteval = lookup_domain_mpa(d,mpaddr);
   13.58 -	// would be nice to have a counter here
   13.59 -	//printf("Handling privop data TLB miss\n");
   13.60 -	// FIXME, must be inlined or potential for nested fault here!
   13.61 -	vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
   13.62  }
   13.63  
   13.64  void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
   13.65 @@ -441,7 +427,7 @@ panic_domain(0,"ia64_do_page_fault: @%p?
   13.66  				if (pteval & _PAGE_P)
   13.67  				{
   13.68  					pteval = translate_domain_pte(pteval,address,itir);
   13.69 -					vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
   13.70 +					vcpu_itc_no_srlz(current,is_data?6:1,address,pteval,-1UL,(itir>>2)&0x3f);
   13.71  					return;
   13.72  				}
   13.73  				else vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
   13.74 @@ -768,7 +754,7 @@ if (!running_on_sim) { printf("SSC_OPEN,
   13.75  		vcpu_set_gr(current,8,-1L);
   13.76  		break;
   13.77  	    default:
   13.78 -		printf("ia64_handle_break: bad ssc code %lx, iip=%p\n",ssc,regs->cr_iip);
   13.79 +		printf("ia64_handle_break: bad ssc code %lx, iip=%p, b0=%p\n",ssc,regs->cr_iip,regs->b0);
   13.80  		break;
   13.81  	}
   13.82  	vcpu_increment_iip(current);
    14.1 --- a/xen/arch/ia64/regionreg.c	Sun Jun 19 08:48:29 2005 +0000
    14.2 +++ b/xen/arch/ia64/regionreg.c	Sun Jun 19 16:17:17 2005 +0000
    14.3 @@ -274,6 +274,7 @@ int set_one_rr(unsigned long rr, unsigne
    14.4  		return 0;
    14.5  	}
    14.6  
    14.7 +#ifdef CONFIG_VTI
    14.8  	memrrv.rrval = rrv.rrval;
    14.9  	if (rreg == 7) {
   14.10  		newrrv.rid = newrid;
   14.11 @@ -290,6 +291,15 @@ int set_one_rr(unsigned long rr, unsigne
   14.12  		if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
   14.13  		set_rr(rr,newrrv.rrval);
   14.14  	}
   14.15 +#else
   14.16 +	memrrv.rrval = rrv.rrval;
   14.17 +	newrrv.rid = newrid;
   14.18 +	newrrv.ve = 1;  // VHPT now enabled for region 7!!
   14.19 +	newrrv.ps = PAGE_SHIFT;
   14.20 +	if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
   14.21 +	if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
   14.22 +	else set_rr(rr,newrrv.rrval);
   14.23 +#endif
   14.24  	return 1;
   14.25  }
   14.26  
    15.1 --- a/xen/arch/ia64/tools/mkbuildtree	Sun Jun 19 08:48:29 2005 +0000
    15.2 +++ b/xen/arch/ia64/tools/mkbuildtree	Sun Jun 19 16:17:17 2005 +0000
    15.3 @@ -259,7 +259,7 @@ softlink include/asm-ia64/string.h inclu
    15.4  softlink include/asm-ia64/thread_info.h include/asm-ia64/thread_info.h
    15.5  softlink include/asm-ia64/timex.h include/asm-ia64/timex.h
    15.6  softlink include/asm-ia64/topology.h include/asm-ia64/topology.h
    15.7 -softlink include/asm-ia64/uaccess.h include/asm-ia64/uaccess.h
    15.8 +cp_patch include/asm-ia64/uaccess.h include/asm-ia64/uaccess.h uaccess.h
    15.9  softlink include/asm-ia64/unaligned.h include/asm-ia64/unaligned.h
   15.10  softlink include/asm-ia64/unistd.h include/asm-ia64/unistd.h
   15.11  softlink include/asm-ia64/unwind.h include/asm-ia64/unwind.h
    16.1 --- a/xen/arch/ia64/vcpu.c	Sun Jun 19 08:48:29 2005 +0000
    16.2 +++ b/xen/arch/ia64/vcpu.c	Sun Jun 19 16:17:17 2005 +0000
    16.3 @@ -43,8 +43,9 @@ typedef	union {
    16.4  
    16.5  #ifdef PRIVOP_ADDR_COUNT
    16.6  struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
    16.7 -	{ "rsm", { 0 }, { 0 }, 0 },
    16.8 -	{ "ssm", { 0 }, { 0 }, 0 }
    16.9 +	{ "=ifa", { 0 }, { 0 }, 0 },
   16.10 +	{ "thash", { 0 }, { 0 }, 0 },
   16.11 +	0
   16.12  };
   16.13  extern void privop_count_addr(unsigned long addr, int inst);
   16.14  #define	PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
   16.15 @@ -135,7 +136,7 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 
   16.16  	struct ia64_psr psr, imm, *ipsr;
   16.17  	REGS *regs = vcpu_regs(vcpu);
   16.18  
   16.19 -	PRIVOP_COUNT_ADDR(regs,_RSM);
   16.20 +	//PRIVOP_COUNT_ADDR(regs,_RSM);
   16.21  	// TODO: All of these bits need to be virtualized
   16.22  	// TODO: Only allowed for current vcpu
   16.23  	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   16.24 @@ -183,7 +184,7 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UI
   16.25  	REGS *regs = vcpu_regs(vcpu);
   16.26  	UINT64 mask, enabling_interrupts = 0;
   16.27  
   16.28 -	PRIVOP_COUNT_ADDR(regs,_SSM);
   16.29 +	//PRIVOP_COUNT_ADDR(regs,_SSM);
   16.30  	// TODO: All of these bits need to be virtualized
   16.31  	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   16.32  	imm = *(struct ia64_psr *)&imm24;
   16.33 @@ -369,6 +370,8 @@ IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT6
   16.34  IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
   16.35  {
   16.36  	UINT64 val = PSCB(vcpu,ifa);
   16.37 +	REGS *regs = vcpu_regs(vcpu);
   16.38 +	PRIVOP_COUNT_ADDR(regs,_GET_IFA);
   16.39  	*pval = val;
   16.40  	return (IA64_NO_FAULT);
   16.41  }
   16.42 @@ -422,6 +425,8 @@ IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT6
   16.43  {
   16.44  	//return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
   16.45  	UINT64 val = PSCB(vcpu,iha);
   16.46 +	REGS *regs = vcpu_regs(vcpu);
   16.47 +	PRIVOP_COUNT_ADDR(regs,_THASH);
   16.48  	*pval = val;
   16.49  	return (IA64_NO_FAULT);
   16.50  }
   16.51 @@ -539,7 +544,7 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
   16.52      } else
   16.53  #endif // CONFIG_VTI
   16.54      {
   16.55 -	if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
   16.56 +	/* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
   16.57  	if (test_bit(vector,PSCBX(vcpu,irr))) {
   16.58  //printf("vcpu_pend_interrupt: overrun\n");
   16.59  	}
   16.60 @@ -569,10 +574,10 @@ UINT64 vcpu_check_pending_interrupts(VCP
   16.61  	UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
   16.62  
   16.63  	p = &PSCBX(vcpu,irr[3]);
   16.64 -	q = &PSCB(vcpu,delivery_mask[3]);
   16.65 +	/* q = &PSCB(vcpu,delivery_mask[3]); */
   16.66  	r = &PSCBX(vcpu,insvc[3]);
   16.67  	for (i = 3; ; p--, q--, r--, i--) {
   16.68 -		bits = *p & *q;
   16.69 +		bits = *p /* & *q */;
   16.70  		if (bits) break; // got a potential interrupt
   16.71  		if (*r) {
   16.72  			// nothing in this word which is pending+inservice
   16.73 @@ -1589,7 +1594,8 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64
   16.74  		// addresses never get flushed.  More work needed if this
   16.75  		// ever happens.
   16.76  //printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
   16.77 -		vhpt_insert(vaddr,pte,logps<<2);
   16.78 +		if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
   16.79 +		else vhpt_insert(vaddr,pte,logps<<2);
   16.80  	}
   16.81  	// even if domain pagesize is larger than PAGE_SIZE, just put
   16.82  	// PAGE_SIZE mapping in the vhpt for now, else purging is complicated
    17.1 --- a/xen/arch/ia64/vhpt.c	Sun Jun 19 08:48:29 2005 +0000
    17.2 +++ b/xen/arch/ia64/vhpt.c	Sun Jun 19 16:17:17 2005 +0000
    17.3 @@ -87,6 +87,37 @@ void vhpt_map(void)
    17.4  	ia64_srlz_i();
    17.5  }
    17.6  
    17.7 +void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)
    17.8 +{
    17.9 +	unsigned long mask = (1L << logps) - 1;
   17.10 +	int i;
   17.11 +
   17.12 +	if (logps-PAGE_SHIFT > 10) {
   17.13 +		// if this happens, we may want to revisit this algorithm
   17.14 +		printf("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
   17.15 +		while(1);
   17.16 +	}
   17.17 +	if (logps-PAGE_SHIFT > 2) {
   17.18 +		// FIXME: Should add counter here to see how often this
   17.19 +		//  happens (e.g. for 16MB pages!) and determine if it
   17.20 +		//  is a performance problem.  On a quick look, it takes
   17.21 +		//  about 39000 instrs for a 16MB page and it seems to occur
   17.22 +		//  only a few times/second, so OK for now.
   17.23 +		//  An alternate solution would be to just insert the one
   17.24 +		//  16KB in the vhpt (but with the full mapping)?
   17.25 +		//printf("vhpt_multiple_insert: logps-PAGE_SHIFT==%d,"
   17.26 +			//"va=%p, pa=%p, pa-masked=%p\n",
   17.27 +			//logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK,
   17.28 +			//(pte&_PFN_MASK)&~mask);
   17.29 +	}
   17.30 +	vaddr &= ~mask;
   17.31 +	pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
   17.32 +	for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
   17.33 +		vhpt_insert(vaddr,pte,logps<<2);
   17.34 +		vaddr += PAGE_SIZE;
   17.35 +	}
   17.36 +}
   17.37 +
   17.38  void vhpt_init(void)
   17.39  {
   17.40  	unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva;
    18.1 --- a/xen/arch/ia64/vmmu.c	Sun Jun 19 08:48:29 2005 +0000
    18.2 +++ b/xen/arch/ia64/vmmu.c	Sun Jun 19 16:17:17 2005 +0000
    18.3 @@ -454,12 +454,13 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
    18.4      data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
    18.5      data.itir=itir;
    18.6      data.vadr=PAGEALIGN(ifa,data.ps);
    18.7 -    data.section=THASH_TLB_TC;
    18.8 +    data.tc = 1;
    18.9      data.cl=ISIDE_TLB;
   18.10      vmx_vcpu_get_rr(vcpu, ifa, &vrr);
   18.11      data.rid = vrr.rid;
   18.12      
   18.13 -    sections.v = THASH_SECTION_TR;
   18.14 +    sections.tr = 1;
   18.15 +    sections.tc = 0;
   18.16  
   18.17      ovl = thash_find_overlap(hcb, &data, sections);
   18.18      while (ovl) {
   18.19 @@ -467,9 +468,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
   18.20          panic("Tlb conflict!!");
   18.21          return;
   18.22      }
   18.23 -    sections.v = THASH_SECTION_TC;
   18.24 -    thash_purge_entries(hcb, &data, sections);
   18.25 -    thash_insert(hcb, &data, ifa);
   18.26 +    thash_purge_and_insert(hcb, &data);
   18.27      return IA64_NO_FAULT;
   18.28  }
   18.29  
   18.30 @@ -488,11 +487,12 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
   18.31      data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
   18.32      data.itir=itir;
   18.33      data.vadr=PAGEALIGN(ifa,data.ps);
   18.34 -    data.section=THASH_TLB_TC;
   18.35 +    data.tc = 1;
   18.36      data.cl=DSIDE_TLB;
   18.37      vmx_vcpu_get_rr(vcpu, ifa, &vrr);
   18.38      data.rid = vrr.rid;
   18.39 -    sections.v = THASH_SECTION_TR;
   18.40 +    sections.tr = 1;
   18.41 +    sections.tc = 0;
   18.42  
   18.43      ovl = thash_find_overlap(hcb, &data, sections);
   18.44      if (ovl) {
   18.45 @@ -500,42 +500,27 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
   18.46          panic("Tlb conflict!!");
   18.47          return;
   18.48      }
   18.49 -    sections.v = THASH_SECTION_TC;
   18.50 -    thash_purge_entries(hcb, &data, sections);
   18.51 -    thash_insert(hcb, &data, ifa);
   18.52 +    thash_purge_and_insert(hcb, &data);
   18.53      return IA64_NO_FAULT;
   18.54  }
   18.55  
   18.56 -IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va)
   18.57 +/*
   18.58 + * Return TRUE/FALSE for success of lock operation
   18.59 + */
   18.60 +int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
   18.61  {
   18.62  
   18.63 -    thash_data_t data, *ovl;
   18.64      thash_cb_t  *hcb;
   18.65 -    search_section_t sections;
   18.66 -    rr_t    vrr;
   18.67 +    rr_t  vrr;
   18.68 +    u64	  preferred_size;
   18.69  
   18.70 -    hcb = vmx_vcpu_get_vtlb(vcpu);
   18.71 -    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
   18.72 -    data.itir=0;
   18.73 -    data.ps = ps;
   18.74 -    data.vadr=PAGEALIGN(va,ps);
   18.75 -    data.section=THASH_TLB_FM;
   18.76 -    data.cl=DSIDE_TLB;
   18.77      vmx_vcpu_get_rr(vcpu, va, &vrr);
   18.78 -    data.rid = vrr.rid;
   18.79 -    sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM;
   18.80 -
   18.81 -    ovl = thash_find_overlap(hcb, &data, sections);
   18.82 -    if (ovl) {
   18.83 -          // generate MCA.
   18.84 -        panic("Foreignmap Tlb conflict!!");
   18.85 -        return;
   18.86 -    }
   18.87 -    thash_insert(hcb, &data, va);
   18.88 -    return IA64_NO_FAULT;
   18.89 +    hcb = vmx_vcpu_get_vtlb(vcpu);
   18.90 +    va = PAGEALIGN(va,vrr.ps);
   18.91 +    preferred_size = PSIZE(vrr.ps);
   18.92 +    return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
   18.93  }
   18.94  
   18.95 -
   18.96  IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
   18.97  {
   18.98  
   18.99 @@ -548,11 +533,12 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN
  18.100      data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  18.101      data.itir=itir;
  18.102      data.vadr=PAGEALIGN(ifa,data.ps);
  18.103 -    data.section=THASH_TLB_TR;
  18.104 +    data.tc = 0;
  18.105      data.cl=ISIDE_TLB;
  18.106      vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  18.107      data.rid = vrr.rid;
  18.108 -    sections.v = THASH_SECTION_TR;
  18.109 +    sections.tr = 1;
  18.110 +    sections.tc = 0;
  18.111  
  18.112      ovl = thash_find_overlap(hcb, &data, sections);
  18.113      if (ovl) {
  18.114 @@ -560,7 +546,8 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN
  18.115          panic("Tlb conflict!!");
  18.116          return;
  18.117      }
  18.118 -    sections.v=THASH_SECTION_TC;
  18.119 +    sections.tr = 0;
  18.120 +    sections.tc = 1;
  18.121      thash_purge_entries(hcb, &data, sections);
  18.122      thash_tr_insert(hcb, &data, ifa, idx);
  18.123      return IA64_NO_FAULT;
  18.124 @@ -579,11 +566,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN
  18.125      data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  18.126      data.itir=itir;
  18.127      data.vadr=PAGEALIGN(ifa,data.ps);
  18.128 -    data.section=THASH_TLB_TR;
  18.129 +    data.tc = 0;
  18.130      data.cl=DSIDE_TLB;
  18.131      vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  18.132      data.rid = vrr.rid;
  18.133 -    sections.v = THASH_SECTION_TR;
  18.134 +    sections.tr = 1;
  18.135 +    sections.tc = 0;
  18.136  
  18.137      ovl = thash_find_overlap(hcb, &data, sections);
  18.138      while (ovl) {
  18.139 @@ -591,7 +579,8 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN
  18.140          panic("Tlb conflict!!");
  18.141          return;
  18.142      }
  18.143 -    sections.v=THASH_SECTION_TC;
  18.144 +    sections.tr = 0;
  18.145 +    sections.tc = 1;
  18.146      thash_purge_entries(hcb, &data, sections);
  18.147      thash_tr_insert(hcb, &data, ifa, idx);
  18.148      return IA64_NO_FAULT;
  18.149 @@ -607,7 +596,8 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT
  18.150  
  18.151      hcb = vmx_vcpu_get_vtlb(vcpu);
  18.152      rr=vmx_vcpu_rr(vcpu,vadr);
  18.153 -    sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
  18.154 +    sections.tr = 1;
  18.155 +    sections.tc = 1;
  18.156      thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
  18.157      return IA64_NO_FAULT;
  18.158  }
  18.159 @@ -619,7 +609,8 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT
  18.160      search_section_t sections;
  18.161      hcb = vmx_vcpu_get_vtlb(vcpu);
  18.162      rr=vmx_vcpu_rr(vcpu,vadr);
  18.163 -    sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
  18.164 +    sections.tr = 1;
  18.165 +    sections.tc = 1;
  18.166      thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
  18.167      return IA64_NO_FAULT;
  18.168  }
  18.169 @@ -632,7 +623,8 @@ IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UIN
  18.170      thash_data_t data, *ovl;
  18.171      hcb = vmx_vcpu_get_vtlb(vcpu);
  18.172      vrr=vmx_vcpu_rr(vcpu,vadr);
  18.173 -    sections.v = THASH_SECTION_TC;
  18.174 +    sections.tr = 0;
  18.175 +    sections.tc = 1;
  18.176      vadr = PAGEALIGN(vadr, ps);
  18.177  
  18.178      thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
    19.1 --- a/xen/arch/ia64/vmx_ivt.S	Sun Jun 19 08:48:29 2005 +0000
    19.2 +++ b/xen/arch/ia64/vmx_ivt.S	Sun Jun 19 16:17:17 2005 +0000
    19.3 @@ -180,7 +180,7 @@ ENTRY(vmx_dtlb_miss)
    19.4      mov r29=cr.ipsr;
    19.5      ;;
    19.6      tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
    19.7 -(p6)br.sptk vmx_fault_1
    19.8 +(p6)br.sptk vmx_fault_2
    19.9      mov r16 = cr.ifa
   19.10      ;;
   19.11      thash r17 = r16
   19.12 @@ -249,9 +249,9 @@ ENTRY(vmx_alt_itlb_miss)
   19.13  	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
   19.14  	;;
   19.15  	and r19=r19,r16		// clear ed, reserved bits, and PTE control bits
   19.16 -	shr.u r18=r16,57	// move address bit 61 to bit 4
   19.17 +	shr.u r18=r16,55	// move address bit 59 to bit 4
   19.18  	;;
   19.19 -	andcm r18=0x10,r18	// bit 4=~address-bit(61)
   19.20 +	and r18=0x10,r18	// bit 4=address-bit(61)
   19.21  	or r19=r17,r19		// insert PTE control bits into r19
   19.22  	;;
   19.23  	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
   19.24 @@ -280,11 +280,11 @@ ENTRY(vmx_alt_dtlb_miss)
   19.25  	;;
   19.26  	and r22=IA64_ISR_CODE_MASK,r20		// get the isr.code field
   19.27  	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT	// is speculation bit on?
   19.28 -	shr.u r18=r16,57			// move address bit 61 to bit 4
   19.29 +	shr.u r18=r16,55			// move address bit 59 to bit 4
   19.30  	and r19=r19,r16				// clear ed, reserved bits, and PTE control bits
   19.31  	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
   19.32  	;;
   19.33 -	andcm r18=0x10,r18	// bit 4=~address-bit(61)
   19.34 +	and r18=0x10,r18	// bit 4=address-bit(61)
   19.35  (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
   19.36  	dep r24=-1,r24,IA64_PSR_ED_BIT,1
   19.37  	or r19=r19,r17		// insert PTE control bits into r19
   19.38 @@ -346,7 +346,12 @@ END(vmx_daccess_bit)
   19.39  ENTRY(vmx_break_fault)
   19.40  	mov r31=pr
   19.41      mov r19=11
   19.42 -    br.sptk.many vmx_dispatch_break_fault
   19.43 +    mov r30=cr.iim
   19.44 +    mov r29=0x1100
   19.45 +    ;;
   19.46 +    cmp4.eq  p6,p7=r29,r30
   19.47 +    (p6) br.dptk.few vmx_hypercall_dispatch
   19.48 +    (p7) br.sptk.many vmx_dispatch_break_fault
   19.49  END(vmx_break_fault)
   19.50  
   19.51  	.org vmx_ia64_ivt+0x3000
   19.52 @@ -929,9 +934,8 @@ END(vmx_dispatch_tlb_miss)
   19.53  
   19.54  
   19.55  ENTRY(vmx_dispatch_break_fault)
   19.56 -    cmp.ne pEml,pNonEml=r0,r0       /* force pNonEml =1, don't save r4 ~ r7 */
   19.57 +    VMX_SAVE_MIN_WITH_COVER_R19
   19.58      ;;
   19.59 -    VMX_SAVE_MIN_WITH_COVER_R19
   19.60      ;;
   19.61      alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
   19.62      mov out0=cr.ifa
   19.63 @@ -951,9 +955,37 @@ ENTRY(vmx_dispatch_break_fault)
   19.64      ;;
   19.65      mov rp=r14
   19.66      br.call.sptk.many b6=vmx_ia64_handle_break
   19.67 +    ;;
   19.68  END(vmx_dispatch_break_fault)
   19.69  
   19.70  
   19.71 +ENTRY(vmx_hypercall_dispatch)
   19.72 +    VMX_SAVE_MIN_WITH_COVER
   19.73 +    ssm psr.ic
   19.74 +    ;;
   19.75 +    srlz.i                  // guarantee that interruption collection is on
   19.76 +    ;;
   19.77 +    ssm psr.i               // restore psr.i
   19.78 +    adds r3=16,r2                // set up second base pointer
   19.79 +    ;;
   19.80 +    VMX_SAVE_REST
   19.81 +    ;;
   19.82 +    movl r14=ia64_leave_hypervisor
   19.83 +    movl r2=hyper_call_table
   19.84 +    ;;
   19.85 +    mov rp=r14
   19.86 +    shladd r2=r15,3,r2
   19.87 +    ;;
   19.88 +    ld8 r2=[r2]
   19.89 +    ;;
   19.90 +    mov b6=r2
   19.91 +    ;;
   19.92 +    br.call.sptk.many b6=b6
   19.93 +    ;;
   19.94 +END(vmx_hypercall_dispatch)
   19.95 +
   19.96 +
   19.97 +
   19.98  ENTRY(vmx_dispatch_interrupt)
   19.99      cmp.ne pEml,pNonEml=r0,r0       /* force pNonEml =1, don't save r4 ~ r7 */
  19.100      ;;
  19.101 @@ -976,3 +1008,39 @@ ENTRY(vmx_dispatch_interrupt)
  19.102  	mov rp=r14
  19.103  	br.call.sptk.many b6=vmx_ia64_handle_irq
  19.104  END(vmx_dispatch_interrupt)
  19.105 +
  19.106 +
  19.107 +
  19.108 +    .rodata
  19.109 +    .align 8
  19.110 +    .globl hyper_call_table
  19.111 +hyper_call_table:
  19.112 +    data8 hyper_not_support     //hyper_set_trap_table     /*  0 */
  19.113 +    data8 hyper_mmu_update
  19.114 +    data8 hyper_not_support     //hyper_set_gdt
  19.115 +    data8 hyper_not_support     //hyper_stack_switch
  19.116 +    data8 hyper_not_support     //hyper_set_callbacks
  19.117 +    data8 hyper_not_support     //hyper_fpu_taskswitch     /*  5 */
  19.118 +    data8 hyper_sched_op
  19.119 +    data8 hyper_dom0_op
  19.120 +    data8 hyper_not_support     //hyper_set_debugreg
  19.121 +    data8 hyper_not_support     //hyper_get_debugreg
  19.122 +    data8 hyper_not_support     //hyper_update_descriptor  /* 10 */
  19.123 +    data8 hyper_not_support     //hyper_set_fast_trap
  19.124 +    data8 hyper_dom_mem_op
  19.125 +    data8 hyper_not_support     //hyper_multicall
  19.126 +    data8 hyper_not_support     //hyper_update_va_mapping
  19.127 +    data8 hyper_not_support     //hyper_set_timer_op       /* 15 */
  19.128 +    data8 hyper_event_channel_op
  19.129 +    data8 hyper_xen_version
  19.130 +    data8 hyper_not_support     //hyper_console_io
  19.131 +    data8 hyper_not_support     //hyper_physdev_op
  19.132 +    data8 hyper_not_support     //hyper_grant_table_op     /* 20 */
  19.133 +    data8 hyper_not_support     //hyper_vm_assist
  19.134 +    data8 hyper_not_support     //hyper_update_va_mapping_otherdomain
  19.135 +    data8 hyper_not_support     //hyper_switch_vm86
  19.136 +    data8 hyper_not_support     //hyper_boot_vcpu
  19.137 +    data8 hyper_not_support     //hyper_ni_hypercall       /* 25 */
  19.138 +    data8 hyper_not_support     //hyper_mmuext_op
  19.139 +    data8 hyper_lock_page
  19.140 +    data8 hyper_set_shared_page
    20.1 --- a/xen/arch/ia64/vmx_minstate.h	Sun Jun 19 08:48:29 2005 +0000
    20.2 +++ b/xen/arch/ia64/vmx_minstate.h	Sun Jun 19 16:17:17 2005 +0000
    20.3 @@ -282,11 +282,9 @@
    20.4      ;;                  \
    20.5  .mem.offset 0,0; st8.spill [r4]=r20,16;     \
    20.6  .mem.offset 8,0; st8.spill [r5]=r21,16;     \
    20.7 -    mov r18=b6;         \
    20.8      ;;                  \
    20.9  .mem.offset 0,0; st8.spill [r4]=r22,16;     \
   20.10  .mem.offset 8,0; st8.spill [r5]=r23,16;     \
   20.11 -    mov r19=b7;     \
   20.12      ;;                  \
   20.13  .mem.offset 0,0; st8.spill [r4]=r24,16;     \
   20.14  .mem.offset 8,0; st8.spill [r5]=r25,16;     \
   20.15 @@ -296,9 +294,11 @@
   20.16      ;;                  \
   20.17  .mem.offset 0,0; st8.spill [r4]=r28,16;     \
   20.18  .mem.offset 8,0; st8.spill [r5]=r29,16;     \
   20.19 +    mov r26=b6;         \
   20.20      ;;                  \
   20.21  .mem.offset 0,0; st8.spill [r4]=r30,16;     \
   20.22  .mem.offset 8,0; st8.spill [r5]=r31,16;     \
   20.23 +    mov r27=b7;     \
   20.24      ;;                  \
   20.25      mov r30=ar.unat;    \
   20.26      ;;      \
   20.27 @@ -317,8 +317,8 @@
   20.28      adds r2=PT(B6)-PT(F10),r2;      \
   20.29      adds r3=PT(B7)-PT(F11),r3;      \
   20.30      ;;          \
   20.31 -    st8 [r2]=r18,16;       /* b6 */    \
   20.32 -    st8 [r3]=r19,16;       /* b7 */    \
   20.33 +    st8 [r2]=r26,16;       /* b6 */    \
   20.34 +    st8 [r3]=r27,16;       /* b7 */    \
   20.35      ;;                  \
   20.36      st8 [r2]=r9;           /* ar.csd */    \
   20.37      st8 [r3]=r10;          /* ar.ssd */    \
    21.1 --- a/xen/arch/ia64/vmx_process.c	Sun Jun 19 08:48:29 2005 +0000
    21.2 +++ b/xen/arch/ia64/vmx_process.c	Sun Jun 19 16:17:17 2005 +0000
    21.3 @@ -116,7 +116,6 @@ vmx_ia64_handle_break (unsigned long ifa
    21.4  		    case FW_HYPERCALL_EFI_GET_TIME:
    21.5  			{
    21.6  			unsigned long *tv, *tc;
    21.7 -			fooefi();
    21.8  			vmx_vcpu_get_gr(v, 32, &tv);
    21.9  			vmx_vcpu_get_gr(v, 33, &tc);
   21.10  			printf("efi_get_time(%p,%p) called...",tv,tc);
    22.1 --- a/xen/arch/ia64/vtlb.c	Sun Jun 19 08:48:29 2005 +0000
    22.2 +++ b/xen/arch/ia64/vtlb.c	Sun Jun 19 16:17:17 2005 +0000
    22.3 @@ -252,7 +252,7 @@ static thash_data_t *_vtlb_next_overlap_
    22.4  
    22.5      /* Find overlap TLB entry */
    22.6      for (cch=priv->cur_cch; cch; cch = cch->next) {
    22.7 -        if ( ((1UL<<cch->section) & priv->s_sect.v) &&
    22.8 +        if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr )  &&
    22.9              __is_tlb_overlap(hcb, cch, priv->rid, priv->cl,
   22.10                  priv->_curva, priv->_eva) ) {
   22.11              return cch;
   22.12 @@ -322,7 +322,7 @@ int __tlb_to_vhpt(thash_cb_t *hcb,
   22.13  
   22.14  void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx)
   22.15  {
   22.16 -    if ( hcb->ht != THASH_TLB || entry->section != THASH_TLB_TR ) {
   22.17 +    if ( hcb->ht != THASH_TLB || entry->tc ) {
   22.18          panic("wrong parameter\n");
   22.19      }
   22.20      entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
   22.21 @@ -356,7 +356,7 @@ thash_data_t *__alloc_chain(thash_cb_t *
   22.22   *  3: The caller need to make sure the new entry will not overlap 
   22.23   *     with any existed entry.
   22.24   */
   22.25 -static void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
   22.26 +void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
   22.27  {
   22.28      thash_data_t    *hash_table, *cch;
   22.29      rr_t  vrr;
   22.30 @@ -411,7 +411,7 @@ void thash_insert(thash_cb_t *hcb, thash
   22.31      rr_t  vrr;
   22.32      
   22.33      vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
   22.34 -    if ( entry->ps != vrr.ps && entry->section==THASH_TLB_TC) {
   22.35 +    if ( entry->ps != vrr.ps && entry->tc ) {
   22.36          panic("Not support for multiple page size now\n");
   22.37      }
   22.38      entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
   22.39 @@ -450,7 +450,7 @@ static void rem_vtlb(thash_cb_t *hcb, th
   22.40      thash_internal_t *priv = &hcb->priv;
   22.41      int idx;
   22.42      
   22.43 -    if ( entry->section == THASH_TLB_TR ) {
   22.44 +    if ( !entry->tc ) {
   22.45          return rem_tr(hcb, entry->cl, entry->tr_idx);
   22.46      }
   22.47      rem_thash(hcb, entry);
   22.48 @@ -525,19 +525,19 @@ thash_data_t *thash_find_overlap(thash_c
   22.49              thash_data_t *in, search_section_t s_sect)
   22.50  {
   22.51      return (hcb->find_overlap)(hcb, in->vadr, 
   22.52 -            in->ps, in->rid, in->cl, s_sect);
   22.53 +            PSIZE(in->ps), in->rid, in->cl, s_sect);
   22.54  }
   22.55  
   22.56  static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb, 
   22.57 -        u64 va, u64 ps, int rid, char cl, search_section_t s_sect)
   22.58 +        u64 va, u64 size, int rid, char cl, search_section_t s_sect)
   22.59  {
   22.60      thash_data_t    *hash_table;
   22.61      thash_internal_t *priv = &hcb->priv;
   22.62      u64     tag;
   22.63      rr_t    vrr;
   22.64  
   22.65 -    priv->_curva = PAGEALIGN(va,ps);
   22.66 -    priv->_eva = priv->_curva + PSIZE(ps);
   22.67 +    priv->_curva = va & ~(size-1);
   22.68 +    priv->_eva = priv->_curva + size;
   22.69      priv->rid = rid;
   22.70      vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
   22.71      priv->ps = vrr.ps;
   22.72 @@ -553,15 +553,15 @@ static thash_data_t *vtlb_find_overlap(t
   22.73  }
   22.74  
   22.75  static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb, 
   22.76 -        u64 va, u64 ps, int rid, char cl, search_section_t s_sect)
   22.77 +        u64 va, u64 size, int rid, char cl, search_section_t s_sect)
   22.78  {
   22.79      thash_data_t    *hash_table;
   22.80      thash_internal_t *priv = &hcb->priv;
   22.81      u64     tag;
   22.82      rr_t    vrr;
   22.83  
   22.84 -    priv->_curva = PAGEALIGN(va,ps);
   22.85 -    priv->_eva = priv->_curva + PSIZE(ps);
   22.86 +    priv->_curva = va & ~(size-1);
   22.87 +    priv->_eva = priv->_curva + size;
   22.88      priv->rid = rid;
   22.89      vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
   22.90      priv->ps = vrr.ps;
   22.91 @@ -691,13 +691,46 @@ void thash_purge_entries_ex(thash_cb_t *
   22.92  {
   22.93      thash_data_t    *ovl;
   22.94  
   22.95 -    ovl = (hcb->find_overlap)(hcb, va, ps, rid, cl, p_sect);
   22.96 +    ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
   22.97      while ( ovl != NULL ) {
   22.98          (hcb->rem_hash)(hcb, ovl);
   22.99          ovl = (hcb->next_overlap)(hcb);
  22.100      };
  22.101  }
  22.102  
  22.103 +/*
  22.104 + * Purge overlap TCs and then insert the new entry to emulate itc ops.
  22.105 + *    Notes: Only TC entry can purge and insert.
  22.106 + */
  22.107 +void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in)
  22.108 +{
  22.109 +    thash_data_t    *ovl;
  22.110 +    search_section_t sections;
  22.111 +
  22.112 +#ifdef   XEN_DEBUGGER
  22.113 +    vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr);
  22.114 +	if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
  22.115 +		panic ("Oops, wrong call for purge_and_insert\n");
  22.116 +		return;
  22.117 +	}
  22.118 +#endif
  22.119 +    in->vadr = PAGEALIGN(in->vadr,in->ps);
  22.120 +    in->ppn = PAGEALIGN(in->ppn, in->ps-12);
  22.121 +    sections.tr = 0;
  22.122 +    sections.tc = 1;
  22.123 +    ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
  22.124 +    				 in->rid, in->cl, sections);
  22.125 +    if(ovl)
  22.126 +        (hcb->rem_hash)(hcb, ovl);
  22.127 +#ifdef   XEN_DEBUGGER
  22.128 +    ovl = (hcb->next_overlap)(hcb);
  22.129 +    if ( ovl ) {
  22.130 +		panic ("Oops, 2+ overlaps for purge_and_insert\n");
  22.131 +		return;
  22.132 +    }
  22.133 +#endif
  22.134 +    (hcb->ins_hash)(hcb, in, in->vadr);
  22.135 +}
  22.136  
  22.137  /*
  22.138   * Purge all TCs or VHPT entries including those in Hash table.
  22.139 @@ -766,6 +799,42 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t 
  22.140      return NULL;
  22.141  }
  22.142  
  22.143 +/*
  22.144 + * Lock/Unlock TC if found.
  22.145 + *     NOTES: Only the page in prefered size can be handled.
  22.146 + *   return:
  22.147 + *          1: failure
  22.148 + *          0: success
  22.149 + */
  22.150 +int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock)
  22.151 +{
  22.152 +	thash_data_t	*ovl;
  22.153 +	search_section_t	sections;
  22.154 +
  22.155 +    sections.tr = 1;
  22.156 +    sections.tc = 1;
  22.157 +	ovl = (hcb->find_overlap)(hcb, va, size, rid, cl, sections);
  22.158 +	if ( ovl ) {
  22.159 +		if ( !ovl->tc ) {
  22.160 +//			panic("Oops, TR for lock\n");
  22.161 +			return 0;
  22.162 +		}
  22.163 +		else if ( lock ) {
  22.164 +			if ( ovl->locked ) {
  22.165 +				DPRINTK("Oops, already locked entry\n");
  22.166 +			}
  22.167 +			ovl->locked = 1;
  22.168 +		}
  22.169 +		else if ( !lock ) {
  22.170 +			if ( !ovl->locked ) {
  22.171 +				DPRINTK("Oops, already unlocked entry\n");
  22.172 +			}
  22.173 +			ovl->locked = 0;
  22.174 +		}
  22.175 +		return 0;
  22.176 +	}
  22.177 +	return 1;
  22.178 +}
  22.179  
  22.180  /*
  22.181   * Notifier when TLB is deleted from hash table and its collision chain.
  22.182 @@ -824,7 +893,6 @@ void thash_init(thash_cb_t *hcb, u64 sz)
  22.183      }
  22.184  }
  22.185  
  22.186 -
  22.187  #ifdef  VTLB_DEBUG
  22.188  static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
  22.189  u64  sanity_check=0;
    23.1 --- a/xen/arch/ia64/xenmem.c	Sun Jun 19 08:48:29 2005 +0000
    23.2 +++ b/xen/arch/ia64/xenmem.c	Sun Jun 19 16:17:17 2005 +0000
    23.3 @@ -52,7 +52,7 @@ paging_init (void)
    23.4  		panic("Not enough memory to bootstrap Xen.\n");
    23.5  
    23.6  	printk("machine to physical table: 0x%lx\n", (u64)mpt_table);
    23.7 -	memset(mpt_table, 0x55, mpt_table_size);
    23.8 +	memset(mpt_table, INVALID_M2P_ENTRY, mpt_table_size);
    23.9  
   23.10  	/* Any more setup here? On VMX enabled platform,
   23.11  	 * there's no need to keep guest linear pg table,
    24.1 --- a/xen/arch/ia64/xenmisc.c	Sun Jun 19 08:48:29 2005 +0000
    24.2 +++ b/xen/arch/ia64/xenmisc.c	Sun Jun 19 16:17:17 2005 +0000
    24.3 @@ -63,13 +63,7 @@ void sync_lazy_execstate_mask(cpumask_t 
    24.4  void sync_lazy_execstate_all(void) {}
    24.5  
    24.6  int grant_table_create(struct domain *d) { return 0; }
    24.7 -void grant_table_destroy(struct domain *d)
    24.8 -{
    24.9 -	printf("grant_table_destroy: domain_destruct not tested!!!\n");
   24.10 -	printf("grant_table_destroy: ensure atomic_* calls work in domain_destruct!!\n");
   24.11 -	dummy();
   24.12 -	return;
   24.13 -}
   24.14 +void grant_table_destroy(struct domain *d) { return; }
   24.15  
   24.16  struct pt_regs *guest_cpu_user_regs(void) { return ia64_task_regs(current); }
   24.17  
    25.1 --- a/xen/common/Makefile	Sun Jun 19 08:48:29 2005 +0000
    25.2 +++ b/xen/common/Makefile	Sun Jun 19 16:17:17 2005 +0000
    25.3 @@ -1,8 +1,8 @@
    25.4  
    25.5  include $(BASEDIR)/Rules.mk
    25.6  
    25.7 -ifeq ($(TARGET_ARCH),ia64) 
    25.8 -OBJS := $(subst dom_mem_ops.o,,$(OBJS))
    25.9 +ifeq ($(TARGET_ARCH),ia64)
   25.10 +#OBJS := $(subst dom_mem_ops.o,,$(OBJS))
   25.11  OBJS := $(subst grant_table.o,,$(OBJS))
   25.12  endif
   25.13  
    26.1 --- a/xen/include/asm-ia64/config.h	Sun Jun 19 08:48:29 2005 +0000
    26.2 +++ b/xen/include/asm-ia64/config.h	Sun Jun 19 16:17:17 2005 +0000
    26.3 @@ -177,8 +177,7 @@ void sort_main_extable(void);
    26.4  // see include/asm-x86/atomic.h (different from standard linux)
    26.5  #define _atomic_set(v,i) (((v).counter) = (i))
    26.6  #define _atomic_read(v) ((v).counter)
    26.7 -// FIXME following needs work
    26.8 -#define atomic_compareandswap(old, new, v) old
    26.9 +#define atomic_compareandswap(old, new, v) ((atomic_t){ cmpxchg(v, _atomic_read(old), _atomic_read(new)) })
   26.10  
   26.11  // see include/asm-ia64/mm.h, handle remaining pfn_info uses until gone
   26.12  #define pfn_info page
   26.13 @@ -227,6 +226,8 @@ struct screen_info { };
   26.14  
   26.15  #define FORCE_CRASH()	asm("break 0;;");
   26.16  
   26.17 +#define dummy()	dummy_called(__FUNCTION__)
   26.18 +
   26.19  // these declarations got moved at some point, find a better place for them
   26.20  extern int ht_per_core;
   26.21  
    27.1 --- a/xen/include/asm-ia64/domain.h	Sun Jun 19 08:48:29 2005 +0000
    27.2 +++ b/xen/include/asm-ia64/domain.h	Sun Jun 19 16:17:17 2005 +0000
    27.3 @@ -2,18 +2,17 @@
    27.4  #define __ASM_DOMAIN_H__
    27.5  
    27.6  #include <linux/thread_info.h>
    27.7 +#include <asm/tlb.h>
    27.8  #ifdef CONFIG_VTI
    27.9  #include <asm/vmx_vpd.h>
   27.10  #include <asm/vmmu.h>
   27.11  #include <asm/regionreg.h>
   27.12 +#include <public/arch-ia64.h>
   27.13  #endif // CONFIG_VTI
   27.14  #include <xen/list.h>
   27.15  
   27.16  extern void arch_do_createdomain(struct vcpu *);
   27.17  
   27.18 -extern int arch_final_setup_guestos(
   27.19 -    struct vcpu *, struct vcpu_guest_context *);
   27.20 -
   27.21  extern void domain_relinquish_resources(struct domain *);
   27.22  
   27.23  #ifdef CONFIG_VTI
   27.24 @@ -36,7 +35,15 @@ struct arch_domain {
   27.25      int imp_va_msb;
   27.26      ia64_rr emul_phy_rr0;
   27.27      ia64_rr emul_phy_rr4;
   27.28 -    u64 *pmt;	/* physical to machine table */
   27.29 +    unsigned long *pmt;	/* physical to machine table */
   27.30 +    /*
   27.31 +     * max_pfn is the maximum page frame in guest physical space, including
   27.32 +     * inter-middle I/O ranges and memory holes. This is different with
   27.33 +     * max_pages in domain struct, which indicates maximum memory size
   27.34 +     */
   27.35 +    unsigned long max_pfn;
   27.36 +    unsigned int section_nr;
   27.37 +    mm_section_t *sections;	/* Describe memory hole except for Dom0 */
   27.38  #endif  //CONFIG_VTI
   27.39      u64 xen_vastart;
   27.40      u64 xen_vaend;
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/xen/include/asm-ia64/event.h	Sun Jun 19 16:17:17 2005 +0000
    28.3 @@ -0,0 +1,16 @@
    28.4 +/******************************************************************************
    28.5 + * event.h
    28.6 + *
    28.7 + * A nice interface for passing asynchronous events to guest OSes.
    28.8 + * (architecture-dependent part)
    28.9 + *
   28.10 + */
   28.11 +
   28.12 +#ifndef __ASM_EVENT_H__
   28.13 +#define __ASM_EVENT_H__
   28.14 +
   28.15 +static inline void evtchn_notify(struct vcpu *v)
   28.16 +{
   28.17 +}
   28.18 +
   28.19 +#endif
    29.1 --- a/xen/include/asm-ia64/mm.h	Sun Jun 19 08:48:29 2005 +0000
    29.2 +++ b/xen/include/asm-ia64/mm.h	Sun Jun 19 16:17:17 2005 +0000
    29.3 @@ -27,43 +27,12 @@ typedef unsigned long page_flags_t;
    29.4  
    29.5  /*
    29.6   * Per-page-frame information.
    29.7 + * 
    29.8 + * Every architecture must ensure the following:
    29.9 + *  1. 'struct pfn_info' contains a 'struct list_head list'.
   29.10 + *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
   29.11   */
   29.12 -
   29.13 -//FIXME: This can go away when common/dom0_ops.c is fully arch-independent
   29.14 -#if 0
   29.15 -struct pfn_info
   29.16 -{
   29.17 -    /* Each frame can be threaded onto a doubly-linked list. */
   29.18 -    struct list_head list;
   29.19 -    /* Context-dependent fields follow... */
   29.20 -    union {
   29.21 -
   29.22 -        /* Page is in use by a domain. */
   29.23 -        struct {
   29.24 -            /* Owner of this page. */
   29.25 -            struct domain *domain;
   29.26 -            /* Reference count and various PGC_xxx flags and fields. */
   29.27 -            u32 count_info;
   29.28 -            /* Type reference count and various PGT_xxx flags and fields. */
   29.29 -            u32 type_info;
   29.30 -        } inuse;
   29.31 -
   29.32 -        /* Page is on a free list. */
   29.33 -        struct {
   29.34 -            /* Mask of possibly-tainted TLBs. */
   29.35 -            unsigned long cpu_mask;
   29.36 -            /* Must be at same offset as 'u.inuse.count_flags'. */
   29.37 -            u32 __unavailable;
   29.38 -            /* Order-size of the free chunk this page is the head of. */
   29.39 -            u8 order;
   29.40 -        } free;
   29.41 -
   29.42 -    } u;
   29.43 -
   29.44 -    /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
   29.45 -    u32 tlbflush_timestamp;
   29.46 -};
   29.47 -#endif
   29.48 +#define PFN_ORDER(_pfn)	((_pfn)->u.free.order)
   29.49  
   29.50  struct page
   29.51  {
   29.52 @@ -82,7 +51,7 @@ struct page
   29.53          /* Page is in use by a domain. */
   29.54          struct {
   29.55              /* Owner of this page. */
   29.56 -            u64	_domain;
   29.57 +            u32	_domain;
   29.58              /* Type reference count and various PGT_xxx flags and fields. */
   29.59              u32 type_info;
   29.60          } inuse;
   29.61 @@ -104,37 +73,49 @@ struct page
   29.62  
   29.63  #define set_page_count(p,v) 	atomic_set(&(p)->_count, v - 1)
   29.64  
   29.65 -//FIXME: These can go away when common/dom0_ops.c is fully arch-independent
   29.66 - /* The following page types are MUTUALLY EXCLUSIVE. */
   29.67 +/* Still small set of flags defined by far on IA-64 */
   29.68 +/* The following page types are MUTUALLY EXCLUSIVE. */
   29.69  #define PGT_none            (0<<29) /* no special uses of this page */
   29.70  #define PGT_l1_page_table   (1<<29) /* using this page as an L1 page table? */
   29.71  #define PGT_l2_page_table   (2<<29) /* using this page as an L2 page table? */
   29.72  #define PGT_l3_page_table   (3<<29) /* using this page as an L3 page table? */
   29.73  #define PGT_l4_page_table   (4<<29) /* using this page as an L4 page table? */
   29.74 -#define PGT_gdt_page        (5<<29) /* using this page in a GDT? */
   29.75 -#define PGT_ldt_page        (6<<29) /* using this page in an LDT? */
   29.76 -#define PGT_writeable_page  (7<<29) /* has writable mappings of this page? */
   29.77 -#define PGT_type_mask       (7<<29) /* Bits 29-31. */
   29.78 +#define PGT_writeable_page  (5<<29) /* has writable mappings of this page? */
   29.79 +#define PGT_type_mask       (5<<29) /* Bits 29-31. */
   29.80 +
   29.81   /* Has this page been validated for use as its current type? */
   29.82  #define _PGT_validated      28
   29.83  #define PGT_validated       (1<<_PGT_validated)
   29.84 - /* 28-bit count of uses of this frame as its current type. */
   29.85 -#define PGT_count_mask      ((1<<28)-1)
   29.86 +/* Owning guest has pinned this page to its current type? */
   29.87 +#define _PGT_pinned         27
   29.88 +#define PGT_pinned          (1U<<_PGT_pinned)
   29.89 +
   29.90 +/* 27-bit count of uses of this frame as its current type. */
   29.91 +#define PGT_count_mask      ((1U<<27)-1)
   29.92  
   29.93  /* Cleared when the owning guest 'frees' this page. */
   29.94  #define _PGC_allocated      31
   29.95  #define PGC_allocated       (1U<<_PGC_allocated)
   29.96 -#define PFN_ORDER(_pfn)	((_pfn)->u.free.order)
   29.97 +/* Set when the page is used as a page table */
   29.98 +#define _PGC_page_table     30
   29.99 +#define PGC_page_table      (1U<<_PGC_page_table)
  29.100 +/* 30-bit count of references to this frame. */
  29.101 +#define PGC_count_mask      ((1U<<30)-1)
  29.102  
  29.103  #define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
  29.104  				 && (page_to_phys(_pfn) >= xen_pstart))
  29.105  
  29.106 -#define pickle_domptr(_d)	((u64)(_d))
  29.107 -#define unpickle_domptr(_d)	((struct domain*)(_d))
  29.108 +static inline struct domain *unpickle_domptr(u32 _d)
  29.109 +{ return (_d == 0) ? NULL : __va(_d); }
  29.110 +static inline u32 pickle_domptr(struct domain *_d)
  29.111 +{ return (_d == NULL) ? 0 : (u32)__pa(_d); }
  29.112  
  29.113  #define page_get_owner(_p)	(unpickle_domptr((_p)->u.inuse._domain))
  29.114  #define page_set_owner(_p, _d)	((_p)->u.inuse._domain = pickle_domptr(_d))
  29.115  
  29.116 +/* Dummy now */
  29.117 +#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) do { } while (0)
  29.118 +
  29.119  extern struct pfn_info *frame_table;
  29.120  extern unsigned long frame_table_size;
  29.121  extern struct list_head free_list;
  29.122 @@ -151,16 +132,46 @@ void add_to_domain_alloc_list(unsigned l
  29.123  
  29.124  static inline void put_page(struct pfn_info *page)
  29.125  {
  29.126 -	dummy();
  29.127 +    u32 nx, x, y = page->count_info;
  29.128 +
  29.129 +    do {
  29.130 +	x = y;
  29.131 +	nx = x - 1;
  29.132 +    }
  29.133 +    while (unlikely((y = cmpxchg(&page->count_info, x, nx)) != x));
  29.134 +
  29.135 +    if (unlikely((nx & PGC_count_mask) == 0))
  29.136 +	free_domheap_page(page);
  29.137  }
  29.138  
  29.139 -
  29.140 +/* count_info and ownership are checked atomically. */
  29.141  static inline int get_page(struct pfn_info *page,
  29.142                             struct domain *domain)
  29.143  {
  29.144 -	dummy();
  29.145 +    u64 x, nx, y = *((u64*)&page->count_info);
  29.146 +    u32 _domain = pickle_domptr(domain);
  29.147 +
  29.148 +    do {
  29.149 +	x = y;
  29.150 +	nx = x + 1;
  29.151 +	if (unlikely((x & PGC_count_mask) == 0) ||	/* Not allocated? */
  29.152 +	    unlikely((nx & PGC_count_mask) == 0) ||	/* Count overflow? */
  29.153 +	    unlikely((x >> 32) != _domain)) {		/* Wrong owner? */
  29.154 +	    DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
  29.155 +		page_to_pfn(page), domain, unpickle_domptr(d),
  29.156 +		x, page->u.inuse.typeinfo);
  29.157 +	    return 0;
  29.158 +	}
  29.159 +    }
  29.160 +    while(unlikely(y = cmpxchg(&page->count_info, x, nx)) != x);
  29.161 +
  29.162 +    return 1;
  29.163  }
  29.164  
  29.165 +/* No type info now */
  29.166 +#define put_page_and_type(page) put_page((page))
  29.167 +#define get_page_and_type(page, domain, type) get_page((page))
  29.168 +
  29.169  #define	set_machinetophys(_mfn, _pfn) do { } while(0);
  29.170  
  29.171  #ifdef MEMORY_GUARD
  29.172 @@ -364,17 +375,40 @@ extern unsigned long *mpt_table;
  29.173  #undef machine_to_phys_mapping
  29.174  #define machine_to_phys_mapping	mpt_table
  29.175  
  29.176 +#define INVALID_M2P_ENTRY        (~0U)
  29.177 +#define VALID_M2P(_e)            (!((_e) & (1U<<63)))
  29.178 +#define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e))
  29.179  /* If pmt table is provided by control pannel later, we need __get_user
  29.180  * here. However if it's allocated by HV, we should access it directly
  29.181  */
  29.182 -#define phys_to_machine_mapping(d, gpfn)	\
  29.183 -    ((d) == dom0 ? gpfn : (d)->arch.pmt[(gpfn)])
  29.184 +#define phys_to_machine_mapping(d, gpfn)			\
  29.185 +    ((d) == dom0 ? gpfn : 					\
  29.186 +	(gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] :	\
  29.187 +		INVALID_MFN))
  29.188  
  29.189  #define __mfn_to_gpfn(_d, mfn)			\
  29.190      machine_to_phys_mapping[(mfn)]
  29.191  
  29.192  #define __gpfn_to_mfn(_d, gpfn)			\
  29.193      phys_to_machine_mapping((_d), (gpfn))
  29.194 +
  29.195 +#define __gpfn_invalid(_d, gpfn)			\
  29.196 +	(__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK)
  29.197 +
  29.198 +#define __gpfn_valid(_d, gpfn)	!__gpfn_invalid(_d, gpfn)
  29.199 +
  29.200 +/* Return I/O type if trye */
  29.201 +#define __gpfn_is_io(_d, gpfn)				\
  29.202 +	(__gpfn_valid(_d, gpfn) ? 			\
  29.203 +	(__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) : 0)
  29.204 +
  29.205 +#define __gpfn_is_mem(_d, gpfn)				\
  29.206 +	(__gpfn_valid(_d, gpfn) ?			\
  29.207 +	((__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) == GPFN_MEM) : 0)
  29.208 +
  29.209 +
  29.210 +#define __gpa_to_mpa(_d, gpa)   \
  29.211 +    ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
  29.212  #endif // CONFIG_VTI
  29.213  
  29.214  #endif /* __ASM_IA64_MM_H__ */
    30.1 --- a/xen/include/asm-ia64/tlb.h	Sun Jun 19 08:48:29 2005 +0000
    30.2 +++ b/xen/include/asm-ia64/tlb.h	Sun Jun 19 16:17:17 2005 +0000
    30.3 @@ -39,11 +39,11 @@ typedef struct {
    30.4  typedef union {
    30.5          unsigned long   value;
    30.6          struct {
    30.7 -                uint64_t ve : 1;
    30.8 -                uint64_t rv1 : 1;
    30.9 -                uint64_t ps  : 6;
   30.10 -                uint64_t rid : 24;
   30.11 -                uint64_t rv2 : 32;
   30.12 +                unsigned long ve : 1;
   30.13 +                unsigned long rv1 : 1;
   30.14 +                unsigned long ps  : 6;
   30.15 +                unsigned long rid : 24;
   30.16 +                unsigned long rv2 : 32;
   30.17          };
   30.18  } rr_t;
   30.19  #endif // CONFIG_VTI
    31.1 --- a/xen/include/asm-ia64/vcpu.h	Sun Jun 19 08:48:29 2005 +0000
    31.2 +++ b/xen/include/asm-ia64/vcpu.h	Sun Jun 19 16:17:17 2005 +0000
    31.3 @@ -23,8 +23,8 @@ typedef struct pt_regs REGS;
    31.4  
    31.5  #define PRIVOP_ADDR_COUNT
    31.6  #ifdef PRIVOP_ADDR_COUNT
    31.7 -#define _RSM 0
    31.8 -#define _SSM 1
    31.9 +#define _GET_IFA 0
   31.10 +#define _THASH 1
   31.11  #define PRIVOP_COUNT_NINSTS 2
   31.12  #define PRIVOP_COUNT_NADDRS 30
   31.13  
    32.1 --- a/xen/include/asm-ia64/vhpt.h	Sun Jun 19 08:48:29 2005 +0000
    32.2 +++ b/xen/include/asm-ia64/vhpt.h	Sun Jun 19 16:17:17 2005 +0000
    32.3 @@ -140,12 +140,20 @@ CC_##Name:;							\
    32.4  	mov r16 = cr.ifa;					\
    32.5  	movl r30 = int_counts;					\
    32.6  	;;							\
    32.7 +	extr.u r17=r16,59,5					\
    32.8 +	;;							\
    32.9 +	cmp.eq p6,p0=0x1e,r17;					\
   32.10 +(p6)	br.cond.spnt	.Alt_##Name				\
   32.11 +	;;							\
   32.12 +	cmp.eq p6,p0=0x1d,r17;					\
   32.13 +(p6)	br.cond.spnt	.Alt_##Name				\
   32.14 +	;;							\
   32.15  	thash r28 = r16;					\
   32.16  	adds  r30 = CAUSE_VHPT_CC_HANDLED << 3, r30;		\
   32.17  	;;							\
   32.18  	ttag r19 = r16;						\
   32.19 -	ld8 r27 = [r30];					\
   32.20 -	adds r17 = VLE_CCHAIN_OFFSET, r28;			\
   32.21 +ld8 r27 = [r30];					\
   32.22 +adds r17 = VLE_CCHAIN_OFFSET, r28;			\
   32.23  	;;							\
   32.24  	ld8 r17 = [r17];					\
   32.25  	;;							\
   32.26 @@ -192,6 +200,11 @@ CC_##Name:;							\
   32.27  	rfi;							\
   32.28  	;;							\
   32.29  								\
   32.30 +.Alt_##Name:;							\
   32.31 +	mov pr = r31, 0x1ffff;					\
   32.32 +	;;							\
   32.33 +	br.cond.sptk late_alt_##Name				\
   32.34 +	;;							\
   32.35  .Out_##Name:;							\
   32.36  	mov pr = r31, 0x1ffff;					\
   32.37  	;;							\
    33.1 --- a/xen/include/asm-ia64/vmmu.h	Sun Jun 19 08:48:29 2005 +0000
    33.2 +++ b/xen/include/asm-ia64/vmmu.h	Sun Jun 19 16:17:17 2005 +0000
    33.3 @@ -28,13 +28,13 @@
    33.4  #include "public/xen.h"
    33.5  #include "asm/tlb.h"
    33.6  
    33.7 -#define         THASH_TLB_TR            0
    33.8 -#define         THASH_TLB_TC            1
    33.9 -#define         THASH_TLB_FM            2       // foreign map
   33.10 +//#define         THASH_TLB_TR            0
   33.11 +//#define         THASH_TLB_TC            1
   33.12 +
   33.13  
   33.14 -#define         THASH_SECTION_TR        (1<<0)
   33.15 -#define         THASH_SECTION_TC        (1<<1)
   33.16 -#define         THASH_SECTION_FM        (1<<2)
   33.17 +// bit definition of TR, TC search cmobination
   33.18 +//#define         THASH_SECTION_TR        (1<<0)
   33.19 +//#define         THASH_SECTION_TC        (1<<1)
   33.20  
   33.21  /*
   33.22   * Next bit definition must be same with THASH_TLB_XX
   33.23 @@ -43,8 +43,7 @@ typedef union search_section {
   33.24          struct {
   33.25                  u32 tr : 1;
   33.26                  u32 tc : 1;
   33.27 -                u32 fm : 1;
   33.28 -                u32 rsv: 29;
   33.29 +                u32 rsv: 30;
   33.30          };
   33.31          u32     v;
   33.32  } search_section_t;
   33.33 @@ -80,12 +79,10 @@ typedef struct thash_data {
   33.34              u64 ig1  :  11; //53-63
   33.35          };
   33.36          struct {
   33.37 -            u64 __rv1 : 12;
   33.38 -            // sizeof(domid_t) must be less than 38!!! Refer to its definition
   33.39 -            u64 fm_dom : 38; // 12-49 foreign map domain ID
   33.40 -            u64 __rv2 : 3;   // 50-52
   33.41 +            u64 __rv1 : 53;	// 0-52
   33.42              // next extension to ig1, only for TLB instance
   33.43 -            u64 section : 2;     // 53-54 TR, TC or FM (thash_TLB_XX)
   33.44 +            u64 tc : 1;     // 53 TR or TC
   33.45 +            u64 locked  : 1;	// 54 entry locked or not
   33.46              CACHE_LINE_TYPE cl : 1; // I side or D side cache line
   33.47              u64 nomap : 1;   // entry cann't be inserted into machine TLB.
   33.48              u64 __ig1  :  5; // 56-61
   33.49 @@ -227,8 +224,8 @@ typedef struct thash_cb {
   33.50             INVALID_ENTRY(hcb, hash) = 1;        \
   33.51             hash->next = NULL; }
   33.52  
   33.53 -#define PURGABLE_ENTRY(hcb,en)          \
   33.54 -                ((hcb)->ht == THASH_VHPT || (en)->section == THASH_TLB_TC)
   33.55 +#define PURGABLE_ENTRY(hcb,en)  \
   33.56 +		((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
   33.57  
   33.58  
   33.59  /*
   33.60 @@ -306,7 +303,7 @@ extern void thash_purge_entries_ex(thash
   33.61                          u64 rid, u64 va, u64 sz, 
   33.62                          search_section_t p_sect, 
   33.63                          CACHE_LINE_TYPE cl);
   33.64 -extern thash_cb_t *init_domain_tlb(struct vcpu *d);
   33.65 +extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in);
   33.66  
   33.67  /*
   33.68   * Purge all TCs or VHPT entries including those in Hash table.
   33.69 @@ -323,6 +320,7 @@ extern thash_data_t *vtlb_lookup(thash_c
   33.70                          thash_data_t *in);
   33.71  extern thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb, 
   33.72                          u64 rid, u64 va,CACHE_LINE_TYPE cl);
   33.73 +extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock);
   33.74  
   33.75  
   33.76  #define   ITIR_RV_MASK      (((1UL<<32)-1)<<32 | 0x3)
   33.77 @@ -332,6 +330,7 @@ extern u64 machine_thash(PTA pta, u64 va
   33.78  extern void purge_machine_tc_by_domid(domid_t domid);
   33.79  extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
   33.80  extern rr_t vmmu_get_rr(struct vcpu *vcpu, u64 va);
   33.81 +extern thash_cb_t *init_domain_tlb(struct vcpu *d);
   33.82  
   33.83  #define   VTLB_DEBUG
   33.84  #ifdef   VTLB_DEBUG
    34.1 --- a/xen/include/asm-ia64/vmx_platform.h	Sun Jun 19 08:48:29 2005 +0000
    34.2 +++ b/xen/include/asm-ia64/vmx_platform.h	Sun Jun 19 16:17:17 2005 +0000
    34.3 @@ -25,7 +25,7 @@
    34.4  struct mmio_list;
    34.5  typedef struct virutal_platform_def {
    34.6      //unsigned long          *real_mode_data; /* E820, etc. */
    34.7 -    //unsigned long          shared_page_va;
    34.8 +    unsigned long          shared_page_va;
    34.9      //struct vmx_virpit_t    vmx_pit;
   34.10      //struct vmx_handler_t   vmx_handler;
   34.11      //struct mi_per_cpu_info mpci;            /* MMIO */
    35.1 --- a/xen/include/asm-ia64/vmx_ptrace.h	Sun Jun 19 08:48:29 2005 +0000
    35.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.3 @@ -1,97 +0,0 @@
    35.4 -/*
    35.5 - * Copyright (C) 1998-2003 Hewlett-Packard Co
    35.6 - *  David Mosberger-Tang <davidm@hpl.hp.com>
    35.7 - *  Stephane Eranian <eranian@hpl.hp.com>
    35.8 - * Copyright (C) 2003 Intel Co
    35.9 - *  Suresh Siddha <suresh.b.siddha@intel.com>
   35.10 - *  Fenghua Yu <fenghua.yu@intel.com>
   35.11 - *  Arun Sharma <arun.sharma@intel.com>
   35.12 - *
   35.13 - * 12/07/98 S. Eranian  added pt_regs & switch_stack
   35.14 - * 12/21/98 D. Mosberger    updated to match latest code
   35.15 - *  6/17/99 D. Mosberger    added second unat member to "struct switch_stack"
   35.16 - *  4/28/05 Anthony Xu	  ported to Xen
   35.17 - *
   35.18 - */
   35.19 -
   35.20 -struct pt_regs {
   35.21 -	/* The following registers are saved by SAVE_MIN: */
   35.22 -	unsigned long b6;		/* scratch */
   35.23 -	unsigned long b7;		/* scratch */
   35.24 -
   35.25 -	unsigned long ar_csd;           /* used by cmp8xchg16 (scratch) */
   35.26 -	unsigned long ar_ssd;           /* reserved for future use (scratch) */
   35.27 -
   35.28 -	unsigned long r8;		/* scratch (return value register 0) */
   35.29 -	unsigned long r9;		/* scratch (return value register 1) */
   35.30 -	unsigned long r10;		/* scratch (return value register 2) */
   35.31 -	unsigned long r11;		/* scratch (return value register 3) */
   35.32 -
   35.33 -	unsigned long cr_ipsr;		/* interrupted task's psr */
   35.34 -	unsigned long cr_iip;		/* interrupted task's instruction pointer */
   35.35 -	unsigned long cr_ifs;		/* interrupted task's function state */
   35.36 -
   35.37 -	unsigned long ar_unat;		/* interrupted task's NaT register (preserved) */
   35.38 -	unsigned long ar_pfs;		/* prev function state  */
   35.39 -	unsigned long ar_rsc;		/* RSE configuration */
   35.40 -	/* The following two are valid only if cr_ipsr.cpl > 0: */
   35.41 -	unsigned long ar_rnat;		/* RSE NaT */
   35.42 -	unsigned long ar_bspstore;	/* RSE bspstore */
   35.43 -
   35.44 -	unsigned long pr;		/* 64 predicate registers (1 bit each) */
   35.45 -	unsigned long b0;		/* return pointer (bp) */
   35.46 -	unsigned long loadrs;		/* size of dirty partition << 16 */
   35.47 -
   35.48 -	unsigned long r1;		/* the gp pointer */
   35.49 -	unsigned long r12;		/* interrupted task's memory stack pointer */
   35.50 -	unsigned long r13;		/* thread pointer */
   35.51 -
   35.52 -	unsigned long ar_fpsr;		/* floating point status (preserved) */
   35.53 -	unsigned long r15;		/* scratch */
   35.54 -
   35.55 -	/* The remaining registers are NOT saved for system calls.  */
   35.56 -
   35.57 -	unsigned long r14;		/* scratch */
   35.58 -	unsigned long r2;		/* scratch */
   35.59 -	unsigned long r3;		/* scratch */
   35.60 -	unsigned long r4;		/* preserved */
   35.61 -	unsigned long r5;		/* preserved */
   35.62 -	unsigned long r6;		/* preserved */
   35.63 -	unsigned long r7;		/* preserved */
   35.64 -    unsigned long cr_iipa;   /* for emulation */
   35.65 -    unsigned long cr_isr;    /* for emulation */
   35.66 -    unsigned long eml_unat;    /* used for emulating instruction */
   35.67 -    unsigned long rfi_pfs;     /* used for elulating rfi */
   35.68 -
   35.69 -	/* The following registers are saved by SAVE_REST: */
   35.70 -	unsigned long r16;		/* scratch */
   35.71 -	unsigned long r17;		/* scratch */
   35.72 -	unsigned long r18;		/* scratch */
   35.73 -	unsigned long r19;		/* scratch */
   35.74 -	unsigned long r20;		/* scratch */
   35.75 -	unsigned long r21;		/* scratch */
   35.76 -	unsigned long r22;		/* scratch */
   35.77 -	unsigned long r23;		/* scratch */
   35.78 -	unsigned long r24;		/* scratch */
   35.79 -	unsigned long r25;		/* scratch */
   35.80 -	unsigned long r26;		/* scratch */
   35.81 -	unsigned long r27;		/* scratch */
   35.82 -	unsigned long r28;		/* scratch */
   35.83 -	unsigned long r29;		/* scratch */
   35.84 -	unsigned long r30;		/* scratch */
   35.85 -	unsigned long r31;		/* scratch */
   35.86 -
   35.87 -	unsigned long ar_ccv;		/* compare/exchange value (scratch) */
   35.88 -
   35.89 -	/*
   35.90 -	 * Floating point registers that the kernel considers scratch:
   35.91 -	 */
   35.92 -	struct ia64_fpreg f6;		/* scratch */
   35.93 -	struct ia64_fpreg f7;		/* scratch */
   35.94 -	struct ia64_fpreg f8;		/* scratch */
   35.95 -	struct ia64_fpreg f9;		/* scratch */
   35.96 -	struct ia64_fpreg f10;		/* scratch */
   35.97 -	struct ia64_fpreg f11;		/* scratch */
   35.98 -};
   35.99 -
  35.100 -
    36.1 --- a/xen/include/asm-ia64/vmx_vpd.h	Sun Jun 19 08:48:29 2005 +0000
    36.2 +++ b/xen/include/asm-ia64/vmx_vpd.h	Sun Jun 19 16:17:17 2005 +0000
    36.3 @@ -26,6 +26,7 @@
    36.4  
    36.5  #include <asm/vtm.h>
    36.6  #include <asm/vmx_platform.h>
    36.7 +#include <public/arch-ia64.h>
    36.8  
    36.9  #define VPD_SHIFT	17	/* 128K requirement */
   36.10  #define VPD_SIZE	(1 << VPD_SHIFT)
    37.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    37.2 +++ b/xen/include/asm-x86/event.h	Sun Jun 19 16:17:17 2005 +0000
    37.3 @@ -0,0 +1,16 @@
    37.4 +/******************************************************************************
    37.5 + * event.h
    37.6 + *
    37.7 + * A nice interface for passing asynchronous events to guest OSes.
    37.8 + * (architecture-dependent part)
    37.9 + *
   37.10 + */
   37.11 +
   37.12 +#ifndef __ASM_EVENT_H__
   37.13 +#define __ASM_EVENT_H__
   37.14 +
   37.15 +static inline void evtchn_notify(struct vcpu *v)
   37.16 +{
   37.17 +}
   37.18 +
   37.19 +#endif
    38.1 --- a/xen/include/public/arch-ia64.h	Sun Jun 19 08:48:29 2005 +0000
    38.2 +++ b/xen/include/public/arch-ia64.h	Sun Jun 19 16:17:17 2005 +0000
    38.3 @@ -14,11 +14,41 @@
    38.4  #define _MEMORY_PADDING(_X)
    38.5  #define MEMORY_PADDING 
    38.6  
    38.7 +/* Maximum number of virtual CPUs in multi-processor guests. */
    38.8 +/* WARNING: before changing this, check that shared_info fits on a page */
    38.9 +#define MAX_VIRT_CPUS 1
   38.10 +
   38.11  #ifndef __ASSEMBLY__
   38.12  
   38.13  /* NB. Both the following are 64 bits each. */
   38.14  typedef unsigned long memory_t;   /* Full-sized pointer/address/memory-size. */
   38.15  
   38.16 +#define MAX_NR_SECTION  32  // at most 32 memory holes
   38.17 +typedef struct {
   38.18 +    unsigned long	start; 	/* start of memory hole */
   38.19 +    unsigned long	end;	/* end of memory hole */
   38.20 +} mm_section_t;
   38.21 +
   38.22 +typedef struct {
   38.23 +    unsigned long	mfn : 56;
   38.24 +    unsigned long	type: 8;
   38.25 +} pmt_entry_t;
   38.26 +
   38.27 +#define GPFN_MEM		(0UL << 56)	/* Guest pfn is normal mem */
   38.28 +#define GPFN_FRAME_BUFFER	(1UL << 56)	/* VGA framebuffer */
   38.29 +#define GPFN_LOW_MMIO		(2UL << 56)	/* Low MMIO range */
   38.30 +#define GPFN_PIB		(3UL << 56)	/* PIB base */
   38.31 +#define GPFN_IOSAPIC		(4UL << 56)	/* IOSAPIC base */
   38.32 +#define GPFN_LEGACY_IO		(5UL << 56)	/* Legacy I/O base */
   38.33 +#define GPFN_GFW		(6UL << 56)	/* Guest Firmware */
   38.34 +#define GPFN_HIGH_MMIO		(7UL << 56)	/* High MMIO range */
   38.35 +
   38.36 +#define GPFN_IO_MASK		(7UL << 56)	/* Guest pfn is I/O type */
   38.37 +#define GPFN_INV_MASK		(31UL << 59)	/* Guest pfn is invalid */
   38.38 +
   38.39 +#define INVALID_MFN              (~0UL)
   38.40 +
   38.41 +
   38.42  typedef struct
   38.43  {
   38.44  } PACKED cpu_user_regs;
   38.45 @@ -28,11 +58,99 @@ typedef struct
   38.46   * structure size will still be 8 bytes, so no other alignments will change.
   38.47   */
   38.48  typedef struct {
   38.49 -    u32  tsc_bits;      /* 0: 32 bits read from the CPU's TSC. */
   38.50 -    u32  tsc_bitshift;  /* 4: 'tsc_bits' uses N:N+31 of TSC.   */
   38.51 +    unsigned int  tsc_bits;      /* 0: 32 bits read from the CPU's TSC. */
   38.52 +    unsigned int  tsc_bitshift;  /* 4: 'tsc_bits' uses N:N+31 of TSC.   */
   38.53  } PACKED tsc_timestamp_t; /* 8 bytes */
   38.54  
   38.55 -#include <asm/tlb.h>	/* TR_ENTRY */
   38.56 +struct pt_fpreg {
   38.57 +        union {
   38.58 +                unsigned long bits[2];
   38.59 +                long double __dummy;    /* force 16-byte alignment */
   38.60 +        } u;
   38.61 +};
   38.62 +
   38.63 +struct pt_regs {
   38.64 +	/* The following registers are saved by SAVE_MIN: */
   38.65 +	unsigned long b6;		/* scratch */
   38.66 +	unsigned long b7;		/* scratch */
   38.67 +
   38.68 +	unsigned long ar_csd;           /* used by cmp8xchg16 (scratch) */
   38.69 +	unsigned long ar_ssd;           /* reserved for future use (scratch) */
   38.70 +
   38.71 +	unsigned long r8;		/* scratch (return value register 0) */
   38.72 +	unsigned long r9;		/* scratch (return value register 1) */
   38.73 +	unsigned long r10;		/* scratch (return value register 2) */
   38.74 +	unsigned long r11;		/* scratch (return value register 3) */
   38.75 +
   38.76 +	unsigned long cr_ipsr;		/* interrupted task's psr */
   38.77 +	unsigned long cr_iip;		/* interrupted task's instruction pointer */
   38.78 +	unsigned long cr_ifs;		/* interrupted task's function state */
   38.79 +
   38.80 +	unsigned long ar_unat;		/* interrupted task's NaT register (preserved) */
   38.81 +	unsigned long ar_pfs;		/* prev function state  */
   38.82 +	unsigned long ar_rsc;		/* RSE configuration */
   38.83 +	/* The following two are valid only if cr_ipsr.cpl > 0: */
   38.84 +	unsigned long ar_rnat;		/* RSE NaT */
   38.85 +	unsigned long ar_bspstore;	/* RSE bspstore */
   38.86 +
   38.87 +	unsigned long pr;		/* 64 predicate registers (1 bit each) */
   38.88 +	unsigned long b0;		/* return pointer (bp) */
   38.89 +	unsigned long loadrs;		/* size of dirty partition << 16 */
   38.90 +
   38.91 +	unsigned long r1;		/* the gp pointer */
   38.92 +	unsigned long r12;		/* interrupted task's memory stack pointer */
   38.93 +	unsigned long r13;		/* thread pointer */
   38.94 +
   38.95 +	unsigned long ar_fpsr;		/* floating point status (preserved) */
   38.96 +	unsigned long r15;		/* scratch */
   38.97 +
   38.98 +	/* The remaining registers are NOT saved for system calls.  */
   38.99 +
  38.100 +	unsigned long r14;		/* scratch */
  38.101 +	unsigned long r2;		/* scratch */
  38.102 +	unsigned long r3;		/* scratch */
  38.103 +
  38.104 +#ifdef CONFIG_VTI
  38.105 +	unsigned long r4;		/* preserved */
  38.106 +	unsigned long r5;		/* preserved */
  38.107 +	unsigned long r6;		/* preserved */
  38.108 +	unsigned long r7;		/* preserved */
  38.109 +	unsigned long cr_iipa;   /* for emulation */
  38.110 +	unsigned long cr_isr;    /* for emulation */
  38.111 +	unsigned long eml_unat;    /* used for emulating instruction */
  38.112 +	unsigned long rfi_pfs;     /* used for elulating rfi */
  38.113 +#endif
  38.114 +
  38.115 +	/* The following registers are saved by SAVE_REST: */
  38.116 +	unsigned long r16;		/* scratch */
  38.117 +	unsigned long r17;		/* scratch */
  38.118 +	unsigned long r18;		/* scratch */
  38.119 +	unsigned long r19;		/* scratch */
  38.120 +	unsigned long r20;		/* scratch */
  38.121 +	unsigned long r21;		/* scratch */
  38.122 +	unsigned long r22;		/* scratch */
  38.123 +	unsigned long r23;		/* scratch */
  38.124 +	unsigned long r24;		/* scratch */
  38.125 +	unsigned long r25;		/* scratch */
  38.126 +	unsigned long r26;		/* scratch */
  38.127 +	unsigned long r27;		/* scratch */
  38.128 +	unsigned long r28;		/* scratch */
  38.129 +	unsigned long r29;		/* scratch */
  38.130 +	unsigned long r30;		/* scratch */
  38.131 +	unsigned long r31;		/* scratch */
  38.132 +
  38.133 +	unsigned long ar_ccv;		/* compare/exchange value (scratch) */
  38.134 +
  38.135 +	/*
  38.136 +	 * Floating point registers that the kernel considers scratch:
  38.137 +	 */
  38.138 +	struct pt_fpreg f6;		/* scratch */
  38.139 +	struct pt_fpreg f7;		/* scratch */
  38.140 +	struct pt_fpreg f8;		/* scratch */
  38.141 +	struct pt_fpreg f9;		/* scratch */
  38.142 +	struct pt_fpreg f10;		/* scratch */
  38.143 +	struct pt_fpreg f11;		/* scratch */
  38.144 +};
  38.145  
  38.146  typedef struct {
  38.147  	unsigned long ipsr;
  38.148 @@ -64,18 +182,20 @@ typedef struct {
  38.149  	unsigned long krs[8];	// kernel registers
  38.150  	unsigned long pkrs[8];	// protection key registers
  38.151  	unsigned long tmp[8];	// temp registers (e.g. for hyperprivops)
  38.152 -//} PACKED arch_shared_info_t;
  38.153 +//} PACKED arch_vcpu_info_t;
  38.154  } arch_vcpu_info_t;		// DON'T PACK 
  38.155  
  38.156  typedef struct {
  38.157 +	int evtchn_vector;
  38.158 +	int domain_controller_evtchn;
  38.159 +	unsigned int flags;
  38.160 +//} PACKED arch_shared_info_t;
  38.161  } arch_shared_info_t;		// DON'T PACK 
  38.162  
  38.163 -/*
  38.164 - * The following is all CPU context. Note that the i387_ctxt block is filled 
  38.165 - * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
  38.166 - */
  38.167  typedef struct vcpu_guest_context {
  38.168 -    //unsigned long flags;
  38.169 +	struct pt_regs regs;
  38.170 +	arch_vcpu_info_t vcpu;
  38.171 +	arch_shared_info_t shared;
  38.172  } PACKED vcpu_guest_context_t;
  38.173  
  38.174  #endif /* !__ASSEMBLY__ */
    39.1 --- a/xen/include/public/arch-x86_32.h	Sun Jun 19 08:48:29 2005 +0000
    39.2 +++ b/xen/include/public/arch-x86_32.h	Sun Jun 19 16:17:17 2005 +0000
    39.3 @@ -73,6 +73,9 @@
    39.4  #define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
    39.5  #endif
    39.6  
    39.7 +/* Maximum number of virtual CPUs in multi-processor guests. */
    39.8 +#define MAX_VIRT_CPUS 32
    39.9 +
   39.10  #ifndef __ASSEMBLY__
   39.11  
   39.12  /* NB. Both the following are 32 bits each. */
    40.1 --- a/xen/include/public/arch-x86_64.h	Sun Jun 19 08:48:29 2005 +0000
    40.2 +++ b/xen/include/public/arch-x86_64.h	Sun Jun 19 16:17:17 2005 +0000
    40.3 @@ -73,6 +73,9 @@
    40.4  #define HYPERVISOR_VIRT_END   (0xFFFF880000000000UL)
    40.5  #endif
    40.6  
    40.7 +/* Maximum number of virtual CPUs in multi-processor guests. */
    40.8 +#define MAX_VIRT_CPUS 32
    40.9 +
   40.10  #ifndef __ASSEMBLY__
   40.11  
   40.12  /* The machine->physical mapping table starts at this address, read-only. */
    41.1 --- a/xen/include/public/xen.h	Sun Jun 19 08:48:29 2005 +0000
    41.2 +++ b/xen/include/public/xen.h	Sun Jun 19 16:17:17 2005 +0000
    41.3 @@ -287,9 +287,6 @@ typedef struct
    41.4  /* Event channel endpoints per domain. */
    41.5  #define NR_EVENT_CHANNELS 1024
    41.6  
    41.7 -/* Support for multi-processor guests. */
    41.8 -#define MAX_VIRT_CPUS 32
    41.9 -
   41.10  /*
   41.11   * Per-VCPU information goes here. This will be cleaned up more when Xen 
   41.12   * actually supports multi-VCPU guests.
    42.1 --- a/xen/include/xen/event.h	Sun Jun 19 08:48:29 2005 +0000
    42.2 +++ b/xen/include/xen/event.h	Sun Jun 19 16:17:17 2005 +0000
    42.3 @@ -13,6 +13,7 @@
    42.4  #include <xen/sched.h>
    42.5  #include <xen/smp.h>
    42.6  #include <asm/bitops.h>
    42.7 +#include <asm/event.h>
    42.8  
    42.9  /*
   42.10   * EVENT-CHANNEL NOTIFICATIONS
   42.11 @@ -34,6 +35,7 @@ static inline void evtchn_set_pending(st
   42.12      {
   42.13          /* The VCPU pending flag must be set /after/ update to evtchn-pend. */
   42.14          set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
   42.15 +        evtchn_notify(v);
   42.16  
   42.17          /*
   42.18           * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of