ia64/xen-unstable

changeset 4841:f71cef640151

bitkeeper revision 1.1389.15.7 (42813c93SmrfHJlQgDNIRy72aAVeoA)

Merge bk://xen.bkbits.net/xeno-unstable.bk
into bkbits.net:/repos/x/xen-ia64/xeno-unstable-ia64.bk
author xen-ia64.adm@bkbits.net
date Tue May 10 22:58:27 2005 +0000 (2005-05-10)
parents a5b33d336f17 4bc6f1b1425e
children 1f84d0497a59 0fadb891522c
files xen/arch/ia64/asm-offsets.c xen/arch/ia64/domain.c xen/arch/ia64/irq.c xen/arch/ia64/ivt.S xen/arch/ia64/mm_init.c xen/arch/ia64/patch/linux-2.6.11/unaligned.c xen/arch/ia64/privop.c xen/arch/ia64/process.c xen/arch/ia64/vcpu.c xen/arch/ia64/xenmisc.c xen/arch/ia64/xentime.c xen/include/asm-ia64/config.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/vcpu.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Tue May 10 22:28:46 2005 +0000
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Tue May 10 22:58:27 2005 +0000
     1.3 @@ -8,6 +8,7 @@
     1.4  #include <xen/sched.h>
     1.5  #include <asm/processor.h>
     1.6  #include <asm/ptrace.h>
     1.7 +#include <public/xen.h>
     1.8  
     1.9  #define task_struct exec_domain
    1.10  
    1.11 @@ -37,6 +38,9 @@ void foo(void)
    1.12  
    1.13  	BLANK();
    1.14  
    1.15 +	DEFINE(XSI_PSR_IC_OFS, offsetof(vcpu_info_t, arch.interrupt_collection_enabled));
    1.16 +	DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.interrupt_collection_enabled)));
    1.17 +	DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t, arch.interrupt_delivery_enabled));
    1.18  	//DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
    1.19  	//DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
    1.20  	//DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
    1.21 @@ -46,8 +50,8 @@ void foo(void)
    1.22  	//DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand));
    1.23  	//DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
    1.24  	//DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
    1.25 -	DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct task_struct, thread.ksp));
    1.26 -	DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct task_struct, thread.on_ustack));
    1.27 +	DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct exec_domain, arch._thread.ksp));
    1.28 +	DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct exec_domain, arch._thread.on_ustack));
    1.29  
    1.30  	BLANK();
    1.31  
     2.1 --- a/xen/arch/ia64/domain.c	Tue May 10 22:28:46 2005 +0000
     2.2 +++ b/xen/arch/ia64/domain.c	Tue May 10 22:58:27 2005 +0000
     2.3 @@ -191,7 +191,7 @@ void arch_do_createdomain(struct exec_do
     2.4  	// stay on kernel stack because may get interrupts!
     2.5  	// ia64_ret_from_clone (which b0 gets in new_thread) switches
     2.6  	// to user stack
     2.7 -	ed->thread.on_ustack = 0;
     2.8 +	ed->arch._thread.on_ustack = 0;
     2.9  }
    2.10  
    2.11  void arch_do_boot_vcpu(struct exec_domain *p)
    2.12 @@ -261,7 +261,7 @@ void new_thread(struct exec_domain *ed,
    2.13  printf("new_thread: ed=%p, start_pc=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
    2.14  ed,start_pc,regs,sw,new_rbs,IA64_STK_OFFSET,&regs->r8);
    2.15  	sw->b0 = (unsigned long) &ia64_ret_from_clone;
    2.16 -	ed->thread.ksp = (unsigned long) sw - 16;
    2.17 +	ed->arch._thread.ksp = (unsigned long) sw - 16;
    2.18  	//ed->thread_info->flags = 0;
    2.19  printk("new_thread, about to call init_all_rr\n");
    2.20  	init_all_rr(ed);
     3.1 --- a/xen/arch/ia64/irq.c	Tue May 10 22:28:46 2005 +0000
     3.2 +++ b/xen/arch/ia64/irq.c	Tue May 10 22:58:27 2005 +0000
     3.3 @@ -1406,9 +1406,11 @@ int pirq_guest_bind(struct exec_domain *
     3.4          desc->handler->startup(irq);
     3.5  
     3.6          /* Attempt to bind the interrupt target to the correct CPU. */
     3.7 +#if 0 /* FIXME CONFIG_SMP ??? */
     3.8          if ( desc->handler->set_affinity != NULL )
     3.9              desc->handler->set_affinity(
    3.10                  irq, apicid_to_phys_cpu_present(d->processor));
    3.11 +#endif
    3.12      }
    3.13      else if ( !will_share || !action->shareable )
    3.14      {
     4.1 --- a/xen/arch/ia64/ivt.S	Tue May 10 22:28:46 2005 +0000
     4.2 +++ b/xen/arch/ia64/ivt.S	Tue May 10 22:58:27 2005 +0000
     4.3 @@ -778,10 +778,22 @@ ENTRY(break_fault)
     4.4  	mov r17=cr.iim
     4.5  	mov r31=pr
     4.6  	;;
     4.7 +	movl r18=XSI_PSR_IC
     4.8 +	;;
     4.9 +	ld8 r19=[r18]
    4.10 +	;;
    4.11  	cmp.eq p7,p0=r0,r17			// is this a psuedo-cover?
    4.12 -	// FIXME: may also need to check slot==2?
    4.13  (p7)	br.sptk.many dispatch_privop_fault
    4.14 +	;;
    4.15 +	cmp.ne p7,p0=r0,r19
    4.16 +(p7)	br.sptk.many dispatch_break_fault
    4.17 +	// If we get to here, we have a hyperprivop
    4.18 +	// For now, hyperprivops are handled through the break mechanism
    4.19 +	// Later, they will be fast hand-coded assembly with psr.ic off
    4.20 +	// which means no calls, no use of r1-r15 and no memory accesses
    4.21 +	// except to pinned addresses!
    4.22  	br.sptk.many dispatch_break_fault
    4.23 +	;;
    4.24  #endif
    4.25  	mov r16=IA64_KR(CURRENT)		// r16 = current task; 12 cycle read lat.
    4.26  	mov r17=cr.iim
     5.1 --- a/xen/arch/ia64/mm_init.c	Tue May 10 22:28:46 2005 +0000
     5.2 +++ b/xen/arch/ia64/mm_init.c	Tue May 10 22:58:27 2005 +0000
     5.3 @@ -227,7 +227,7 @@ ia64_set_rbs_bot (void)
     5.4  
     5.5  	if (stack_size > MAX_USER_STACK_SIZE)
     5.6  		stack_size = MAX_USER_STACK_SIZE;
     5.7 -	current->thread.rbs_bot = STACK_TOP - stack_size;
     5.8 +	current->arch._thread.rbs_bot = STACK_TOP - stack_size;
     5.9  }
    5.10  
    5.11  /*
    5.12 @@ -255,7 +255,7 @@ printf("ia64_init_addr_space: called, no
    5.13  	if (vma) {
    5.14  		memset(vma, 0, sizeof(*vma));
    5.15  		vma->vm_mm = current->mm;
    5.16 -		vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
    5.17 +		vma->vm_start = current->arch._thread.rbs_bot & PAGE_MASK;
    5.18  		vma->vm_end = vma->vm_start + PAGE_SIZE;
    5.19  		vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
    5.20  		vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
     6.1 --- a/xen/arch/ia64/patch/linux-2.6.11/unaligned.c	Tue May 10 22:28:46 2005 +0000
     6.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/unaligned.c	Tue May 10 22:58:27 2005 +0000
     6.3 @@ -1,5 +1,5 @@
     6.4  --- ../../linux-2.6.11/arch/ia64/kernel/unaligned.c	2005-03-02 00:38:25.000000000 -0700
     6.5 -+++ arch/ia64/unaligned.c	2005-04-28 15:40:13.000000000 -0600
     6.6 ++++ arch/ia64/unaligned.c	2005-05-10 15:46:09.000000000 -0600
     6.7  @@ -437,7 +437,11 @@
     6.8   }
     6.9   
    6.10 @@ -12,7 +12,31 @@
    6.11   setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
    6.12   {
    6.13   	struct switch_stack *sw = (struct switch_stack *) regs - 1;
    6.14 -@@ -611,7 +615,11 @@
    6.15 +@@ -522,7 +526,11 @@
    6.16 + 	 */
    6.17 + 	if (regnum >= IA64_FIRST_ROTATING_FR) {
    6.18 + 		ia64_sync_fph(current);
    6.19 ++#ifdef XEN
    6.20 ++		current->arch._thread.fph[fph_index(regs, regnum)] = *fpval;
    6.21 ++#else
    6.22 + 		current->thread.fph[fph_index(regs, regnum)] = *fpval;
    6.23 ++#endif
    6.24 + 	} else {
    6.25 + 		/*
    6.26 + 		 * pt_regs or switch_stack ?
    6.27 +@@ -581,7 +589,11 @@
    6.28 + 	 */
    6.29 + 	if (regnum >= IA64_FIRST_ROTATING_FR) {
    6.30 + 		ia64_flush_fph(current);
    6.31 ++#ifdef XEN
    6.32 ++		*fpval = current->arch._thread.fph[fph_index(regs, regnum)];
    6.33 ++#else
    6.34 + 		*fpval = current->thread.fph[fph_index(regs, regnum)];
    6.35 ++#endif
    6.36 + 	} else {
    6.37 + 		/*
    6.38 + 		 * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
    6.39 +@@ -611,7 +623,11 @@
    6.40   }
    6.41   
    6.42   
    6.43 @@ -24,7 +48,7 @@
    6.44   getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
    6.45   {
    6.46   	struct switch_stack *sw = (struct switch_stack *) regs - 1;
    6.47 -@@ -1294,6 +1302,9 @@
    6.48 +@@ -1294,6 +1310,9 @@
    6.49   void
    6.50   ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
    6.51   {
    6.52 @@ -34,7 +58,7 @@
    6.53   	struct ia64_psr *ipsr = ia64_psr(regs);
    6.54   	mm_segment_t old_fs = get_fs();
    6.55   	unsigned long bundle[2];
    6.56 -@@ -1502,4 +1513,5 @@
    6.57 +@@ -1502,4 +1521,5 @@
    6.58   	si.si_imm = 0;
    6.59   	force_sig_info(SIGBUS, &si, current);
    6.60   	goto done;
     7.1 --- a/xen/arch/ia64/privop.c	Tue May 10 22:28:46 2005 +0000
     7.2 +++ b/xen/arch/ia64/privop.c	Tue May 10 22:58:27 2005 +0000
     7.3 @@ -205,7 +205,8 @@ IA64FAULT priv_itc_d(VCPU *vcpu, INST64 
     7.4  		return(IA64_ILLOP_FAULT);
     7.5  	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
     7.6  		return(IA64_ILLOP_FAULT);
     7.7 -	pte = vcpu_get_gr(vcpu,inst.M41.r2);
     7.8 +	if (!inst.inst) pte = vcpu_get_tmp(vcpu,0);
     7.9 +	else pte = vcpu_get_gr(vcpu,inst.M41.r2);
    7.10  
    7.11  	return (vcpu_itc_d(vcpu,pte,itir,ifa));
    7.12  }
    7.13 @@ -219,7 +220,8 @@ IA64FAULT priv_itc_i(VCPU *vcpu, INST64 
    7.14  		return(IA64_ILLOP_FAULT);
    7.15  	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
    7.16  		return(IA64_ILLOP_FAULT);
    7.17 -	pte = vcpu_get_gr(vcpu,inst.M41.r2);
    7.18 +	if (!inst.inst) pte = vcpu_get_tmp(vcpu,0);
    7.19 +	else pte = vcpu_get_gr(vcpu,inst.M41.r2);
    7.20  
    7.21  	return (vcpu_itc_i(vcpu,pte,itir,ifa));
    7.22  }
    7.23 @@ -417,10 +419,17 @@ IA64FAULT priv_mov_from_pmc(VCPU *vcpu, 
    7.24  	UINT64 val;
    7.25  	IA64FAULT fault;
    7.26  	
    7.27 -	fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
    7.28 -	if (fault == IA64_NO_FAULT)
    7.29 -		return vcpu_set_gr(vcpu, inst.M43.r1, val);
    7.30 -	else return fault;
    7.31 +	if (inst.M43.r1 > 63) { // privified mov from pmd
    7.32 +		fault = vcpu_get_pmd(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
    7.33 +		if (fault == IA64_NO_FAULT)
    7.34 +			return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
    7.35 +	}
    7.36 +	else {
    7.37 +		fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
    7.38 +		if (fault == IA64_NO_FAULT)
    7.39 +			return vcpu_set_gr(vcpu, inst.M43.r1, val);
    7.40 +	}
    7.41 +	return fault;
    7.42  }
    7.43  
    7.44  unsigned long from_cr_cnt[128] = { 0 };
    7.45 @@ -531,6 +540,8 @@ struct {
    7.46  	unsigned long bsw0;
    7.47  	unsigned long bsw1;
    7.48  	unsigned long cover;
    7.49 +	unsigned long fc;
    7.50 +	unsigned long cpuid;
    7.51  	unsigned long Mpriv_cnt[64];
    7.52  } privcnt = { 0 };
    7.53  
    7.54 @@ -631,7 +642,11 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
    7.55  				else x6 = 0x1a;
    7.56  			}
    7.57  		}
    7.58 -		privcnt.Mpriv_cnt[x6]++;
    7.59 +		if (x6 == 52 && inst.M28.r3 > 63)
    7.60 +			privcnt.fc++;
    7.61 +		else if (x6 == 16 && inst.M43.r3 > 63)
    7.62 +			privcnt.cpuid++;
    7.63 +		else privcnt.Mpriv_cnt[x6]++;
    7.64  		return (*pfunc)(vcpu,inst);
    7.65  		break;
    7.66  	    case B:
    7.67 @@ -682,7 +697,7 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
    7.68          //printf("We who are about do die salute you\n");
    7.69  	printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
    7.70  		 iip, (UINT64)inst.inst, slot, slot_type);
    7.71 -        //printf("vtop(0x%lx)==0x%lx\r\n", iip, tr_vtop(iip));
    7.72 +        //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
    7.73          //thread_mozambique("privop fault\n");
    7.74  	return (IA64_ILLOP_FAULT);
    7.75  }
    7.76 @@ -745,6 +760,64 @@ priv_emulate(VCPU *vcpu, REGS *regs, UIN
    7.77  }
    7.78  
    7.79  
    7.80 +// FIXME: Move these to include/public/arch-ia64?
    7.81 +#define HYPERPRIVOP_RFI			0x1
    7.82 +#define HYPERPRIVOP_RSM_DT		0x2
    7.83 +#define HYPERPRIVOP_SSM_DT		0x3
    7.84 +#define HYPERPRIVOP_COVER		0x4
    7.85 +#define HYPERPRIVOP_ITC_D		0x5
    7.86 +#define HYPERPRIVOP_ITC_I		0x6
    7.87 +#define HYPERPRIVOP_MAX			0x6
    7.88 +
    7.89 +char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
    7.90 +	0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i",
    7.91 +	0
    7.92 +};
    7.93 +
    7.94 +unsigned long hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
    7.95 +
    7.96 +/* hyperprivops are generally executed in assembly (with physical psr.ic off)
    7.97 + * so this code is primarily used for debugging them */
    7.98 +int
    7.99 +ia64_hyperprivop(unsigned long iim, REGS *regs)
   7.100 +{
   7.101 +	struct exec_domain *ed = (struct domain *) current;
   7.102 +	INST64 inst;
   7.103 +	UINT64 val;
   7.104 +
   7.105 +// FIXME: Add instrumentation for these
   7.106 +// FIXME: Handle faults appropriately for these
   7.107 +	if (!iim || iim > HYPERPRIVOP_MAX) {
   7.108 +		printf("bad hyperprivop; ignored\n");
   7.109 +		return 1;
   7.110 +	}
   7.111 +	hyperpriv_cnt[iim]++;
   7.112 +	switch(iim) {
   7.113 +	    case HYPERPRIVOP_RFI:
   7.114 +		(void)vcpu_rfi(ed);
   7.115 +		return 0;	// don't update iip
   7.116 +	    case HYPERPRIVOP_RSM_DT:
   7.117 +		(void)vcpu_reset_psr_dt(ed);
   7.118 +		return 1;
   7.119 +	    case HYPERPRIVOP_SSM_DT:
   7.120 +		(void)vcpu_set_psr_dt(ed);
   7.121 +		return 1;
   7.122 +	    case HYPERPRIVOP_COVER:
   7.123 +		(void)vcpu_cover(ed);
   7.124 +		return 1;
   7.125 +	    case HYPERPRIVOP_ITC_D:
   7.126 +		inst.inst = 0;
   7.127 +		(void)priv_itc_d(ed,inst);
   7.128 +		return 1;
   7.129 +	    case HYPERPRIVOP_ITC_I:
   7.130 +		inst.inst = 0;
   7.131 +		(void)priv_itc_i(ed,inst);
   7.132 +		return 1;
   7.133 +	}
   7.134 +	return 0;
   7.135 +}
   7.136 +
   7.137 +
   7.138  /**************************************************************************
   7.139  Privileged operation instrumentation routines
   7.140  **************************************************************************/
   7.141 @@ -798,55 +871,61 @@ int dump_privop_counts(char *buf)
   7.142  	sum += privcnt.rfi; sum += privcnt.bsw0;
   7.143  	sum += privcnt.bsw1; sum += privcnt.cover;
   7.144  	for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
   7.145 -	s += sprintf(s,"Privop statistics: (Total privops: %ld)\r\n",sum);
   7.146 +	s += sprintf(s,"Privop statistics: (Total privops: %ld)\n",sum);
   7.147  	if (privcnt.mov_to_ar_imm)
   7.148 -		s += sprintf(s,"%10d  %s [%d%%]\r\n", privcnt.mov_to_ar_imm,
   7.149 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.mov_to_ar_imm,
   7.150  			"mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
   7.151  	if (privcnt.mov_to_ar_reg)
   7.152 -		s += sprintf(s,"%10d  %s [%d%%]\r\n", privcnt.mov_to_ar_reg,
   7.153 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.mov_to_ar_reg,
   7.154  			"mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
   7.155  	if (privcnt.mov_from_ar)
   7.156 -		s += sprintf(s,"%10d  %s [%d%%]\r\n", privcnt.mov_from_ar,
   7.157 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.mov_from_ar,
   7.158  			"privified-mov_from_ar", (privcnt.mov_from_ar*100L)/sum);
   7.159  	if (privcnt.ssm)
   7.160 -		s += sprintf(s,"%10d  %s [%d%%]\r\n", privcnt.ssm,
   7.161 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.ssm,
   7.162  			"ssm", (privcnt.ssm*100L)/sum);
   7.163  	if (privcnt.rsm)
   7.164 -		s += sprintf(s,"%10d  %s [%d%%]\r\n", privcnt.rsm,
   7.165 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.rsm,
   7.166  			"rsm", (privcnt.rsm*100L)/sum);
   7.167  	if (privcnt.rfi)
   7.168 -		s += sprintf(s,"%10d  %s [%d%%]\r\n", privcnt.rfi,
   7.169 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.rfi,
   7.170  			"rfi", (privcnt.rfi*100L)/sum);
   7.171  	if (privcnt.bsw0)
   7.172 -		s += sprintf(s,"%10d  %s [%d%%]\r\n", privcnt.bsw0,
   7.173 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.bsw0,
   7.174  			"bsw0", (privcnt.bsw0*100L)/sum);
   7.175  	if (privcnt.bsw1)
   7.176 -		s += sprintf(s,"%10d  %s [%d%%]\r\n", privcnt.bsw1,
   7.177 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.bsw1,
   7.178  			"bsw1", (privcnt.bsw1*100L)/sum);
   7.179  	if (privcnt.cover)
   7.180 -		s += sprintf(s,"%10d  %s [%d%%]\r\n", privcnt.cover,
   7.181 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.cover,
   7.182  			"cover", (privcnt.cover*100L)/sum);
   7.183 +	if (privcnt.fc)
   7.184 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.fc,
   7.185 +			"privified-fc", (privcnt.fc*100L)/sum);
   7.186 +	if (privcnt.cpuid)
   7.187 +		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.cpuid,
   7.188 +			"privified-getcpuid", (privcnt.cpuid*100L)/sum);
   7.189  	for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
   7.190 -		if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\r\n");
   7.191 -		else s += sprintf(s,"%10d  %s [%d%%]\r\n", privcnt.Mpriv_cnt[i],
   7.192 +		if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\n");
   7.193 +		else s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.Mpriv_cnt[i],
   7.194  			Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
   7.195  		if (i == 0x24) { // mov from CR
   7.196  			s += sprintf(s,"            [");
   7.197  			for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
   7.198  				if (!cr_str[j])
   7.199 -					s += sprintf(s,"PRIVSTRING NULL!!\r\n");
   7.200 +					s += sprintf(s,"PRIVSTRING NULL!!\n");
   7.201  				s += sprintf(s,"%s(%d),",cr_str[j],from_cr_cnt[j]);
   7.202  			}
   7.203 -			s += sprintf(s,"]\r\n");
   7.204 +			s += sprintf(s,"]\n");
   7.205  		}
   7.206  		else if (i == 0x2c) { // mov to CR
   7.207  			s += sprintf(s,"            [");
   7.208  			for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
   7.209  				if (!cr_str[j])
   7.210 -					s += sprintf(s,"PRIVSTRING NULL!!\r\n");
   7.211 +					s += sprintf(s,"PRIVSTRING NULL!!\n");
   7.212  				s += sprintf(s,"%s(%d),",cr_str[j],to_cr_cnt[j]);
   7.213  			}
   7.214 -			s += sprintf(s,"]\r\n");
   7.215 +			s += sprintf(s,"]\n");
   7.216  		}
   7.217  	}
   7.218  	return s - buf;
   7.219 @@ -864,19 +943,88 @@ int zero_privop_counts(char *buf)
   7.220  	privcnt.ssm = 0; privcnt.rsm = 0;
   7.221  	privcnt.rfi = 0; privcnt.bsw0 = 0;
   7.222  	privcnt.bsw1 = 0; privcnt.cover = 0;
   7.223 +	privcnt.fc = 0; privcnt.cpuid = 0;
   7.224  	for (i=0; i < 64; i++) privcnt.Mpriv_cnt[i] = 0;
   7.225  	for (j=0; j < 128; j++) from_cr_cnt[j] = 0;
   7.226  	for (j=0; j < 128; j++) to_cr_cnt[j] = 0;
   7.227 -	s += sprintf(s,"All privop statistics zeroed\r\n");
   7.228 +	s += sprintf(s,"All privop statistics zeroed\n");
   7.229  	return s - buf;
   7.230  }
   7.231  
   7.232 +#ifdef PRIVOP_ADDR_COUNT
   7.233 +
   7.234 +extern struct privop_addr_count privop_addr_counter[];
   7.235 +
   7.236 +void privop_count_addr(unsigned long iip, int inst)
   7.237 +{
   7.238 +	struct privop_addr_count *v = &privop_addr_counter[inst];
   7.239 +	int i;
   7.240 +
   7.241 +	for (i = 0; i < PRIVOP_COUNT_NADDRS; i++) {
   7.242 +		if (!v->addr[i]) { v->addr[i] = iip; v->count[i]++; return; }
   7.243 +		else if (v->addr[i] == iip)  { v->count[i]++; return; }
   7.244 +	}
   7.245 +	v->overflow++;;
   7.246 +}
   7.247 +
   7.248 +int dump_privop_addrs(char *buf)
   7.249 +{
   7.250 +	int i,j;
   7.251 +	char *s = buf;
   7.252 +	s += sprintf(s,"Privop addresses:\n");
   7.253 +	for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
   7.254 +		struct privop_addr_count *v = &privop_addr_counter[i];
   7.255 +		s += sprintf(s,"%s:\n",v->instname);
   7.256 +		for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) {
   7.257 +			if (!v->addr[j]) break;
   7.258 +			s += sprintf(s," @%p #%ld\n",v->addr[j],v->count[j]);
   7.259 +		}
   7.260 +		if (v->overflow) 
   7.261 +			s += sprintf(s," other #%ld\n",v->overflow);
   7.262 +	}
   7.263 +	return s - buf;
   7.264 +}
   7.265 +
   7.266 +void zero_privop_addrs(void)
   7.267 +{
   7.268 +	int i,j;
   7.269 +	for (i = 0; i < PRIVOP_COUNT_NINSTS; i++) {
   7.270 +		struct privop_addr_count *v = &privop_addr_counter[i];
   7.271 +		for (j = 0; j < PRIVOP_COUNT_NADDRS; j++)
   7.272 +			v->addr[j] = v->count[j] = 0;
   7.273 +		v->overflow = 0;
   7.274 +	}
   7.275 +}
   7.276 +#endif
   7.277 +
   7.278 +int dump_hyperprivop_counts(char *buf)
   7.279 +{
   7.280 +	int i;
   7.281 +	char *s = buf;
   7.282 +	s += sprintf(s,"Hyperprivops:\n");
   7.283 +	for (i = 1; i <= HYPERPRIVOP_MAX; i++)
   7.284 +		if (hyperpriv_cnt[i])
   7.285 +			s += sprintf(s,"%10d %s\n",
   7.286 +				hyperpriv_cnt[i], hyperpriv_str[i]);
   7.287 +	return s - buf;
   7.288 +}
   7.289 +
   7.290 +void zero_hyperprivop_counts(void)
   7.291 +{
   7.292 +	int i;
   7.293 +	for (i = 0; i <= HYPERPRIVOP_MAX; i++) hyperpriv_cnt[i] = 0;
   7.294 +}
   7.295 +
   7.296  #define TMPBUFLEN 8*1024
   7.297  int dump_privop_counts_to_user(char __user *ubuf, int len)
   7.298  {
   7.299  	char buf[TMPBUFLEN];
   7.300  	int n = dump_privop_counts(buf);
   7.301  
   7.302 +	n += dump_hyperprivop_counts(buf + n);
   7.303 +#ifdef PRIVOP_ADDR_COUNT
   7.304 +	n += dump_privop_addrs(buf + n);
   7.305 +#endif
   7.306  	if (len < TMPBUFLEN) return -1;
   7.307  	if (__copy_to_user(ubuf,buf,n)) return -1;
   7.308  	return n;
   7.309 @@ -887,6 +1035,10 @@ int zero_privop_counts_to_user(char __us
   7.310  	char buf[TMPBUFLEN];
   7.311  	int n = zero_privop_counts(buf);
   7.312  
   7.313 +	zero_hyperprivop_counts();
   7.314 +#ifdef PRIVOP_ADDR_COUNT
   7.315 +	zero_privop_addrs();
   7.316 +#endif
   7.317  	if (len < TMPBUFLEN) return -1;
   7.318  	if (__copy_to_user(ubuf,buf,n)) return -1;
   7.319  	return n;
     8.1 --- a/xen/arch/ia64/process.c	Tue May 10 22:28:46 2005 +0000
     8.2 +++ b/xen/arch/ia64/process.c	Tue May 10 22:58:27 2005 +0000
     8.3 @@ -51,6 +51,7 @@ extern unsigned long dom0_start, dom0_si
     8.4  			IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
     8.5  
     8.6  #define PSCB(x,y)	x->vcpu_info->arch.y
     8.7 +#define PSCBX(x,y)	x->arch.y
     8.8  
     8.9  extern unsigned long vcpu_verbose;
    8.10  
    8.11 @@ -154,7 +155,7 @@ panic_domain(regs,"psr.ic off, deliverin
    8.12  		}
    8.13  //printf("Delivering NESTED DATA TLB fault\n");
    8.14  		vector = IA64_DATA_NESTED_TLB_VECTOR;
    8.15 -		regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
    8.16 +		regs->cr_iip = ((unsigned long) PSCBX(ed,iva) + vector) & ~0xffUL;
    8.17  		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    8.18  // NOTE: nested trap must NOT pass PSCB address
    8.19  		//regs->r31 = (unsigned long) &PSCB(ed);
    8.20 @@ -187,7 +188,7 @@ panic_domain(regs,"psr.ic off, deliverin
    8.21  	PSCB(ed,ifs) = 0;
    8.22  	PSCB(ed,incomplete_regframe) = 0;
    8.23  
    8.24 -	regs->cr_iip = ((unsigned long) PSCB(ed,iva) + vector) & ~0xffUL;
    8.25 +	regs->cr_iip = ((unsigned long) PSCBX(ed,iva) + vector) & ~0xffUL;
    8.26  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    8.27  #ifdef CONFIG_SMP
    8.28  #error "sharedinfo doesn't handle smp yet"
    8.29 @@ -516,7 +517,7 @@ printf("ia64_fault, vector=0x%p, ifa=%p,
    8.30  	      case 32: /* fp fault */
    8.31  	      case 33: /* fp trap */
    8.32  		//result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
    8.33 -		if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
    8.34 +		//if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
    8.35  			//siginfo.si_signo = SIGFPE;
    8.36  			//siginfo.si_errno = 0;
    8.37  			//siginfo.si_code = FPE_FLTINV;
    8.38 @@ -525,7 +526,7 @@ printf("ia64_fault, vector=0x%p, ifa=%p,
    8.39  			//siginfo.si_isr = isr;
    8.40  			//siginfo.si_imm = 0;
    8.41  			//force_sig_info(SIGFPE, &siginfo, current);
    8.42 -		}
    8.43 +		//}
    8.44  		//return;
    8.45  		sprintf(buf, "FP fault/trap");
    8.46  		break;
    8.47 @@ -722,6 +723,10 @@ ia64_handle_break (unsigned long ifa, st
    8.48  		if (ia64_hypercall(regs))
    8.49  			vcpu_increment_iip(current);
    8.50  	}
    8.51 +	else if (!PSCB(ed,interrupt_collection_enabled)) {
    8.52 +		if (ia64_hyperprivop(iim,regs))
    8.53 +			vcpu_increment_iip(current);
    8.54 +	}
    8.55  	else reflect_interruption(ifa,isr,iim,regs,IA64_BREAK_VECTOR);
    8.56  }
    8.57  
     9.1 --- a/xen/arch/ia64/vcpu.c	Tue May 10 22:28:46 2005 +0000
     9.2 +++ b/xen/arch/ia64/vcpu.c	Tue May 10 22:58:27 2005 +0000
     9.3 @@ -1,6 +1,6 @@
     9.4  /*
     9.5   * Virtualized CPU functions
     9.6 - * 
     9.7 + *
     9.8   * Copyright (C) 2004 Hewlett-Packard Co.
     9.9   *	Dan Magenheimer (dan.magenheimer@hp.com)
    9.10   *
    9.11 @@ -26,6 +26,7 @@ typedef	union {
    9.12  // this def for vcpu_regs won't work if kernel stack is present
    9.13  #define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
    9.14  #define	PSCB(x,y)	x->vcpu_info->arch.y
    9.15 +#define	PSCBX(x,y)	x->arch.y
    9.16  
    9.17  #define	TRUE	1
    9.18  #define	FALSE	0
    9.19 @@ -37,6 +38,17 @@ typedef	union {
    9.20  
    9.21  #define STATIC
    9.22  
    9.23 +#ifdef PRIVOP_ADDR_COUNT
    9.24 +struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
    9.25 +	{ "rsm", { 0 }, { 0 }, 0 },
    9.26 +	{ "ssm", { 0 }, { 0 }, 0 }
    9.27 +};
    9.28 +extern void privop_count_addr(unsigned long addr, int inst);
    9.29 +#define	PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
    9.30 +#else
    9.31 +#define	PRIVOP_COUNT_ADDR(x,y) do {} while (0)
    9.32 +#endif
    9.33 +
    9.34  unsigned long vcpu_verbose = 0;
    9.35  #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
    9.36  
    9.37 @@ -77,30 +89,20 @@ vcpu_set_gr(VCPU *vcpu, unsigned reg, UI
    9.38  IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
    9.39  {
    9.40  	if (reg == 44) return (vcpu_set_itc(vcpu,val));
    9.41 -	if (reg == 27) return (IA64_ILLOP_FAULT);
    9.42 -	if (reg > 7) return (IA64_ILLOP_FAULT);
    9.43 -	PSCB(vcpu,krs[reg]) = val;
    9.44 -#if 0
    9.45 -// for now, privify kr read's so all kr accesses are privileged
    9.46 -	switch (reg) {
    9.47 -	      case 0: asm volatile ("mov ar.k0=%0" :: "r"(val)); break;
    9.48 -	      case 1: asm volatile ("mov ar.k1=%0" :: "r"(val)); break;
    9.49 -	      case 2: asm volatile ("mov ar.k2=%0" :: "r"(val)); break;
    9.50 -	      case 3: asm volatile ("mov ar.k3=%0" :: "r"(val)); break;
    9.51 -	      case 4: asm volatile ("mov ar.k4=%0" :: "r"(val)); break;
    9.52 -	      case 5: asm volatile ("mov ar.k5=%0" :: "r"(val)); break;
    9.53 -	      case 6: asm volatile ("mov ar.k6=%0" :: "r"(val)); break;
    9.54 -	      case 7: asm volatile ("mov ar.k7=%0" :: "r"(val)); break;
    9.55 -	      case 27: asm volatile ("mov ar.cflg=%0" :: "r"(val)); break;
    9.56 -	}
    9.57 -#endif
    9.58 +	else if (reg == 27) return (IA64_ILLOP_FAULT);
    9.59 +	else if (reg == 24)
    9.60 +	    printf("warning: setting ar.eflg is a no-op; no IA-32 support\n");
    9.61 +	else if (reg > 7) return (IA64_ILLOP_FAULT);
    9.62 +	else PSCB(vcpu,krs[reg]) = val;
    9.63  	return IA64_NO_FAULT;
    9.64  }
    9.65  
    9.66  IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
    9.67  {
    9.68 -	if (reg > 7) return (IA64_ILLOP_FAULT);
    9.69 -	*val = PSCB(vcpu,krs[reg]);
    9.70 +	if (reg == 24)
    9.71 +	    printf("warning: getting ar.eflg is a no-op; no IA-32 support\n");
    9.72 +	else if (reg > 7) return (IA64_ILLOP_FAULT);
    9.73 +	else *val = PSCB(vcpu,krs[reg]);
    9.74  	return IA64_NO_FAULT;
    9.75  }
    9.76  
    9.77 @@ -119,11 +121,18 @@ void vcpu_set_metaphysical_mode(VCPU *vc
    9.78  	}
    9.79  }
    9.80  
    9.81 +IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu)
    9.82 +{
    9.83 +	vcpu_set_metaphysical_mode(vcpu,TRUE);
    9.84 +	return IA64_NO_FAULT;
    9.85 +}
    9.86 +
    9.87  IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
    9.88  {
    9.89  	struct ia64_psr psr, imm, *ipsr;
    9.90  	REGS *regs = vcpu_regs(vcpu);
    9.91  
    9.92 +	PRIVOP_COUNT_ADDR(regs,_RSM);
    9.93  	// TODO: All of these bits need to be virtualized
    9.94  	// TODO: Only allowed for current vcpu
    9.95  	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
    9.96 @@ -152,12 +161,19 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 
    9.97  extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
    9.98  #define SPURIOUS_VECTOR 0xf
    9.99  
   9.100 +IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
   9.101 +{
   9.102 +	vcpu_set_metaphysical_mode(vcpu,FALSE);
   9.103 +	return IA64_NO_FAULT;
   9.104 +}
   9.105 +
   9.106  IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
   9.107  {
   9.108  	struct ia64_psr psr, imm, *ipsr;
   9.109  	REGS *regs = vcpu_regs(vcpu);
   9.110  	UINT64 mask, enabling_interrupts = 0;
   9.111  
   9.112 +	PRIVOP_COUNT_ADDR(regs,_SSM);
   9.113  	// TODO: All of these bits need to be virtualized
   9.114  	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   9.115  	imm = *(struct ia64_psr *)&imm24;
   9.116 @@ -274,9 +290,9 @@ BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
   9.117  
   9.118  UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
   9.119  {
   9.120 -	UINT64 dcr = PSCB(vcpu,dcr);
   9.121 +	UINT64 dcr = PSCBX(vcpu,dcr);
   9.122  	PSR psr = {0};
   9.123 -	
   9.124 +
   9.125  	//printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
   9.126  	psr.i64 = prevpsr;
   9.127  	psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
   9.128 @@ -302,13 +318,13 @@ extern unsigned long privop_trace;
   9.129  //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
   9.130  	// Reads of cr.dcr on Xen always have the sign bit set, so
   9.131  	// a domain can differentiate whether it is running on SP or not
   9.132 -	*pval = PSCB(vcpu,dcr) | 0x8000000000000000L;
   9.133 +	*pval = PSCBX(vcpu,dcr) | 0x8000000000000000L;
   9.134  	return (IA64_NO_FAULT);
   9.135  }
   9.136  
   9.137  IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
   9.138  {
   9.139 -	*pval = PSCB(vcpu,iva) & ~0x7fffL;
   9.140 +	*pval = PSCBX(vcpu,iva) & ~0x7fffL;
   9.141  	return (IA64_NO_FAULT);
   9.142  }
   9.143  
   9.144 @@ -408,13 +424,13 @@ extern unsigned long privop_trace;
   9.145  	// a domain can differentiate whether it is running on SP or not
   9.146  	// Thus, writes of DCR should ignore the sign bit
   9.147  //verbose("vcpu_set_dcr: called\n");
   9.148 -	PSCB(vcpu,dcr) = val & ~0x8000000000000000L;
   9.149 +	PSCBX(vcpu,dcr) = val & ~0x8000000000000000L;
   9.150  	return (IA64_NO_FAULT);
   9.151  }
   9.152  
   9.153  IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
   9.154  {
   9.155 -	PSCB(vcpu,iva) = val & ~0x7fffL;
   9.156 +	PSCBX(vcpu,iva) = val & ~0x7fffL;
   9.157  	return (IA64_NO_FAULT);
   9.158  }
   9.159  
   9.160 @@ -508,16 +524,16 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
   9.161  		return;
   9.162  	}
   9.163  	if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
   9.164 -	if (test_bit(vector,PSCB(vcpu,irr))) {
   9.165 +	if (test_bit(vector,PSCBX(vcpu,irr))) {
   9.166  //printf("vcpu_pend_interrupt: overrun\n");
   9.167  	}
   9.168 -	set_bit(vector,PSCB(vcpu,irr));
   9.169 +	set_bit(vector,PSCBX(vcpu,irr));
   9.170  	PSCB(vcpu,pending_interruption) = 1;
   9.171  }
   9.172  
   9.173  void early_tick(VCPU *vcpu)
   9.174  {
   9.175 -	UINT64 *p = &PSCB(vcpu,irr[3]);
   9.176 +	UINT64 *p = &PSCBX(vcpu,irr[3]);
   9.177  	printf("vcpu_check_pending: about to deliver early tick\n");
   9.178  	printf("&irr[0]=%p, irr[0]=0x%lx\n",p,*p);
   9.179  }
   9.180 @@ -535,9 +551,9 @@ UINT64 vcpu_check_pending_interrupts(VCP
   9.181  {
   9.182  	UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
   9.183  
   9.184 -	p = &PSCB(vcpu,irr[3]);
   9.185 +	p = &PSCBX(vcpu,irr[3]);
   9.186  	q = &PSCB(vcpu,delivery_mask[3]);
   9.187 -	r = &PSCB(vcpu,insvc[3]);
   9.188 +	r = &PSCBX(vcpu,insvc[3]);
   9.189  	for (i = 3; ; p--, q--, r--, i--) {
   9.190  		bits = *p & *q;
   9.191  		if (bits) break; // got a potential interrupt
   9.192 @@ -577,9 +593,9 @@ UINT64 vcpu_check_pending_interrupts(VCP
   9.193  #if 0
   9.194  if (vector == (PSCB(vcpu,itv) & 0xff)) {
   9.195  	UINT64 now = ia64_get_itc();
   9.196 -	UINT64 itm = PSCB(vcpu,domain_itm);
   9.197 +	UINT64 itm = PSCBX(vcpu,domain_itm);
   9.198  	if (now < itm) early_tick(vcpu);
   9.199 -	
   9.200 +
   9.201  }
   9.202  #endif
   9.203  	return vector;
   9.204 @@ -639,13 +655,13 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
   9.205  	i = vector >> 6;
   9.206  	mask = 1L << (vector & 0x3f);
   9.207  //printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
   9.208 -	PSCB(vcpu,insvc[i]) |= mask;
   9.209 -	PSCB(vcpu,irr[i]) &= ~mask;
   9.210 +	PSCBX(vcpu,insvc[i]) |= mask;
   9.211 +	PSCBX(vcpu,irr[i]) &= ~mask;
   9.212  	//PSCB(vcpu,pending_interruption)--;
   9.213  	*pval = vector;
   9.214  	// if delivering a timer interrupt, remember domain_itm
   9.215  	if (vector == (PSCB(vcpu,itv) & 0xff)) {
   9.216 -		PSCB(vcpu,domain_itm_last) = PSCB(vcpu,domain_itm);
   9.217 +		PSCBX(vcpu,domain_itm_last) = PSCBX(vcpu,domain_itm);
   9.218  	}
   9.219  	return IA64_NO_FAULT;
   9.220  }
   9.221 @@ -760,7 +776,7 @@ IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT6
   9.222  	UINT64 *p, bits, vec, bitnum;
   9.223  	int i;
   9.224  
   9.225 -	p = &PSCB(vcpu,insvc[3]);
   9.226 +	p = &PSCBX(vcpu,insvc[3]);
   9.227  	for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
   9.228  	if (i < 0) {
   9.229  		printf("Trying to EOI interrupt when none are in-service.\r\n");
   9.230 @@ -811,8 +827,8 @@ extern unsigned long privop_trace;
   9.231  	if (val & 0xef00) return (IA64_ILLOP_FAULT);
   9.232  	PSCB(vcpu,itv) = val;
   9.233  	if (val & 0x10000) {
   9.234 -printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu,domain_itm));
   9.235 -		PSCB(vcpu,domain_itm) = 0;
   9.236 +printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCBX(vcpu,domain_itm));
   9.237 +		PSCBX(vcpu,domain_itm) = 0;
   9.238  	}
   9.239  	else vcpu_enable_timer(vcpu,1000000L);
   9.240  	return (IA64_NO_FAULT);
   9.241 @@ -833,6 +849,20 @@ IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT
   9.242  }
   9.243  
   9.244  /**************************************************************************
   9.245 + VCPU temporary register access routines
   9.246 +**************************************************************************/
   9.247 +UINT64 vcpu_get_tmp(VCPU *vcpu, UINT64 index)
   9.248 +{
   9.249 +	if (index > 7) return 0;
   9.250 +	return PSCB(vcpu,tmp[index]);
   9.251 +}
   9.252 +
   9.253 +void vcpu_set_tmp(VCPU *vcpu, UINT64 index, UINT64 val)
   9.254 +{
   9.255 +	if (index <= 7) PSCB(vcpu,tmp[index]) = val;
   9.256 +}
   9.257 +
   9.258 +/**************************************************************************
   9.259  Interval timer routines
   9.260  **************************************************************************/
   9.261  
   9.262 @@ -845,14 +875,14 @@ BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
   9.263  BOOLEAN vcpu_timer_inservice(VCPU *vcpu)
   9.264  {
   9.265  	UINT64 itv = PSCB(vcpu,itv);
   9.266 -	return (test_bit(itv, PSCB(vcpu,insvc)));
   9.267 +	return (test_bit(itv, PSCBX(vcpu,insvc)));
   9.268  }
   9.269  
   9.270  BOOLEAN vcpu_timer_expired(VCPU *vcpu)
   9.271  {
   9.272 -	unsigned long domain_itm = PSCB(vcpu,domain_itm);
   9.273 +	unsigned long domain_itm = PSCBX(vcpu,domain_itm);
   9.274  	unsigned long now = ia64_get_itc();
   9.275 - 
   9.276 +
   9.277  	if (!domain_itm) return FALSE;
   9.278  	if (now < domain_itm) return FALSE;
   9.279  	if (vcpu_timer_disabled(vcpu)) return FALSE;
   9.280 @@ -877,36 +907,36 @@ void vcpu_safe_set_itm(unsigned long val
   9.281  
   9.282  void vcpu_set_next_timer(VCPU *vcpu)
   9.283  {
   9.284 -	UINT64 d = PSCB(vcpu,domain_itm);
   9.285 -	//UINT64 s = PSCB(vcpu,xen_itm);
   9.286 +	UINT64 d = PSCBX(vcpu,domain_itm);
   9.287 +	//UINT64 s = PSCBX(vcpu,xen_itm);
   9.288  	UINT64 s = local_cpu_data->itm_next;
   9.289  	UINT64 now = ia64_get_itc();
   9.290 -	//UINT64 interval = PSCB(vcpu,xen_timer_interval);
   9.291 +	//UINT64 interval = PSCBX(vcpu,xen_timer_interval);
   9.292  
   9.293  	/* gloss over the wraparound problem for now... we know it exists
   9.294  	 * but it doesn't matter right now */
   9.295  
   9.296  #if 0
   9.297  	/* ensure at least next SP tick is in the future */
   9.298 -	if (!interval) PSCB(vcpu,xen_itm) = now +
   9.299 +	if (!interval) PSCBX(vcpu,xen_itm) = now +
   9.300  #if 0
   9.301  		(running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
   9.302 -		 			DEFAULT_CLOCK_RATE);
   9.303 +					DEFAULT_CLOCK_RATE);
   9.304  #else
   9.305  	3000000;
   9.306  //printf("vcpu_set_next_timer: HACK!\n");
   9.307  #endif
   9.308  #if 0
   9.309 -	if (PSCB(vcpu,xen_itm) < now)
   9.310 -		while (PSCB(vcpu,xen_itm) < now + (interval>>1))
   9.311 -			PSCB(vcpu,xen_itm) += interval;
   9.312 +	if (PSCBX(vcpu,xen_itm) < now)
   9.313 +		while (PSCBX(vcpu,xen_itm) < now + (interval>>1))
   9.314 +			PSCBX(vcpu,xen_itm) += interval;
   9.315  #endif
   9.316  #endif
   9.317  
   9.318  	if (is_idle_task(vcpu->domain)) {
   9.319  		printf("****** vcpu_set_next_timer called during idle!!\n");
   9.320  	}
   9.321 -	//s = PSCB(vcpu,xen_itm);
   9.322 +	//s = PSCBX(vcpu,xen_itm);
   9.323  	if (d && (d > now) && (d < s)) {
   9.324  		vcpu_safe_set_itm(d);
   9.325  		//using_domain_as_itm++;
   9.326 @@ -920,10 +950,10 @@ void vcpu_set_next_timer(VCPU *vcpu)
   9.327  // parameter is a time interval specified in cycles
   9.328  void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
   9.329  {
   9.330 -    PSCB(vcpu,xen_timer_interval) = cycles;
   9.331 +    PSCBX(vcpu,xen_timer_interval) = cycles;
   9.332      vcpu_set_next_timer(vcpu);
   9.333      printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
   9.334 -             PSCB(vcpu,xen_timer_interval));
   9.335 +             PSCBX(vcpu,xen_timer_interval));
   9.336      __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
   9.337  }
   9.338  
   9.339 @@ -933,30 +963,30 @@ IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT6
   9.340  
   9.341  	//if (val < now) val = now + 1000;
   9.342  //printf("*** vcpu_set_itm: called with %lx\n",val);
   9.343 -	PSCB(vcpu,domain_itm) = val;
   9.344 +	PSCBX(vcpu,domain_itm) = val;
   9.345  	vcpu_set_next_timer(vcpu);
   9.346  	return (IA64_NO_FAULT);
   9.347  }
   9.348  
   9.349  IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
   9.350  {
   9.351 -	
   9.352 +
   9.353  	UINT64 oldnow = ia64_get_itc();
   9.354 -	UINT64 olditm = PSCB(vcpu,domain_itm);
   9.355 +	UINT64 olditm = PSCBX(vcpu,domain_itm);
   9.356  	unsigned long d = olditm - oldnow;
   9.357  	unsigned long x = local_cpu_data->itm_next - oldnow;
   9.358 -	
   9.359 +
   9.360  	UINT64 newnow = val, min_delta;
   9.361  
   9.362  	local_irq_disable();
   9.363  	if (olditm) {
   9.364  printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
   9.365 -		PSCB(vcpu,domain_itm) = newnow + d;
   9.366 +		PSCBX(vcpu,domain_itm) = newnow + d;
   9.367  	}
   9.368  	local_cpu_data->itm_next = newnow + x;
   9.369 -	d = PSCB(vcpu,domain_itm);
   9.370 +	d = PSCBX(vcpu,domain_itm);
   9.371  	x = local_cpu_data->itm_next;
   9.372 -	
   9.373 +
   9.374  	ia64_set_itc(newnow);
   9.375  	if (d && (d > newnow) && (d < x)) {
   9.376  		vcpu_safe_set_itm(d);
   9.377 @@ -991,7 +1021,7 @@ void vcpu_pend_timer(VCPU *vcpu)
   9.378  
   9.379  	if (vcpu_timer_disabled(vcpu)) return;
   9.380  	//if (vcpu_timer_inservice(vcpu)) return;
   9.381 -	if (PSCB(vcpu,domain_itm_last) == PSCB(vcpu,domain_itm)) {
   9.382 +	if (PSCBX(vcpu,domain_itm_last) == PSCBX(vcpu,domain_itm)) {
   9.383  		// already delivered an interrupt for this so
   9.384  		// don't deliver another
   9.385  		return;
   9.386 @@ -999,7 +1029,7 @@ void vcpu_pend_timer(VCPU *vcpu)
   9.387  #if 0
   9.388  	// attempt to flag "timer tick before its due" source
   9.389  	{
   9.390 -	UINT64 itm = PSCB(vcpu,domain_itm);
   9.391 +	UINT64 itm = PSCBX(vcpu,domain_itm);
   9.392  	UINT64 now = ia64_get_itc();
   9.393  	if (now < itm) printf("******* vcpu_pend_timer: pending before due!\n");
   9.394  	}
   9.395 @@ -1011,7 +1041,7 @@ void vcpu_pend_timer(VCPU *vcpu)
   9.396  UINT64 vcpu_timer_pending_early(VCPU *vcpu)
   9.397  {
   9.398  	UINT64 now = ia64_get_itc();
   9.399 -	UINT64 itm = PSCB(vcpu,domain_itm);
   9.400 +	UINT64 itm = PSCBX(vcpu,domain_itm);
   9.401  
   9.402  	if (vcpu_timer_disabled(vcpu)) return 0;
   9.403  	if (!itm) return 0;
   9.404 @@ -1023,7 +1053,7 @@ void vcpu_poke_timer(VCPU *vcpu)
   9.405  {
   9.406  	UINT64 itv = PSCB(vcpu,itv) & 0xff;
   9.407  	UINT64 now = ia64_get_itc();
   9.408 -	UINT64 itm = PSCB(vcpu,domain_itm);
   9.409 +	UINT64 itm = PSCBX(vcpu,domain_itm);
   9.410  	UINT64 irr;
   9.411  
   9.412  	if (vcpu_timer_disabled(vcpu)) return;
   9.413 @@ -1033,8 +1063,8 @@ void vcpu_poke_timer(VCPU *vcpu)
   9.414  		while(1);
   9.415  	}
   9.416  	// using 0xef instead of itv so can get real irr
   9.417 -	if (now > itm && !test_bit(0xefL, PSCB(vcpu,insvc))) {
   9.418 -		if (!test_bit(0xefL,PSCB(vcpu,irr))) {
   9.419 +	if (now > itm && !test_bit(0xefL, PSCBX(vcpu,insvc))) {
   9.420 +		if (!test_bit(0xefL,PSCBX(vcpu,irr))) {
   9.421  			irr = ia64_getreg(_IA64_REG_CR_IRR3);
   9.422  			if (irr & (1L<<(0xef-0xc0))) return;
   9.423  if (now-itm>0x800000)
   9.424 @@ -1091,7 +1121,7 @@ printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGE
   9.425  printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs));
   9.426  while(1);
   9.427  }
   9.428 -		// TODO: validate PSCB(vcpu,iip) 
   9.429 +		// TODO: validate PSCB(vcpu,iip)
   9.430  		// TODO: PSCB(vcpu,ipsr) = psr;
   9.431  		PSCB(vcpu,ipsr) = psr.i64;
   9.432  		// now set up the trampoline
   9.433 @@ -1338,7 +1368,6 @@ IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UIN
   9.434  
   9.435  unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
   9.436  {
   9.437 -	
   9.438  	ia64_rr rr;
   9.439  
   9.440  	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
   9.441 @@ -1348,7 +1377,6 @@ unsigned long vcpu_get_rr_ve(VCPU *vcpu,
   9.442  
   9.443  unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
   9.444  {
   9.445 -	
   9.446  	ia64_rr rr;
   9.447  
   9.448  	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
   9.449 @@ -1358,7 +1386,6 @@ unsigned long vcpu_get_rr_ps(VCPU *vcpu,
   9.450  
   9.451  unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
   9.452  {
   9.453 -	
   9.454  	ia64_rr rr;
   9.455  
   9.456  	rr.rrval = PSCB(vcpu,rrs)[vadr>>61];
   9.457 @@ -1445,8 +1472,8 @@ TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu
   9.458  	for (i = 0; i < count; i++, trp++) {
   9.459  		if (!trp->p) continue;
   9.460  		if (physicalize_rid(vcpu,trp->rid) != rid) continue;
   9.461 -        	if (ifa < trp->vadr) continue;
   9.462 -        	if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
   9.463 +		if (ifa < trp->vadr) continue;
   9.464 +		if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
   9.465  		//if (trp->key && !match_pkr(vcpu,trp->key)) continue;
   9.466  		return trp;
   9.467  	}
   9.468 @@ -1457,9 +1484,9 @@ TR_ENTRY *match_tr(VCPU *vcpu, unsigned 
   9.469  {
   9.470  	TR_ENTRY *trp;
   9.471  
   9.472 -	trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.dtrs,ifa,NDTRS);
   9.473 +	trp = vcpu_match_tr_entry(vcpu,vcpu->arch.dtrs,ifa,NDTRS);
   9.474  	if (trp) return trp;
   9.475 -	trp = vcpu_match_tr_entry(vcpu,vcpu->vcpu_info->arch.itrs,ifa,NITRS);
   9.476 +	trp = vcpu_match_tr_entry(vcpu,vcpu->arch.itrs,ifa,NITRS);
   9.477  	if (trp) return trp;
   9.478  	return 0;
   9.479  }
   9.480 @@ -1470,7 +1497,8 @@ IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 
   9.481  	TR_ENTRY *trp;
   9.482  
   9.483  	if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
   9.484 -	trp = &PSCB(vcpu,dtrs[slot]);
   9.485 +	trp = &PSCBX(vcpu,dtrs[slot]);
   9.486 +//printf("***** itr.d: setting slot %d: ifa=%p\n",slot,ifa);
   9.487  	vcpu_set_tr_entry(trp,pte,itir,ifa);
   9.488  	return IA64_NO_FAULT;
   9.489  }
   9.490 @@ -1481,7 +1509,8 @@ IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 
   9.491  	TR_ENTRY *trp;
   9.492  
   9.493  	if (slot >= NITRS) return IA64_RSVDREG_FAULT;
   9.494 -	trp = &PSCB(vcpu,itrs[slot]);
   9.495 +	trp = &PSCBX(vcpu,itrs[slot]);
   9.496 +//printf("***** itr.i: setting slot %d: ifa=%p\n",slot,ifa);
   9.497  	vcpu_set_tr_entry(trp,pte,itir,ifa);
   9.498  	return IA64_NO_FAULT;
   9.499  }
   9.500 @@ -1524,12 +1553,12 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64
   9.501  #endif
   9.502  	if (IorD & 0x4) return;  // don't place in 1-entry TLB
   9.503  	if (IorD & 0x1) {
   9.504 -		vcpu_set_tr_entry(&PSCB(vcpu,itlb),pte,ps<<2,vaddr);
   9.505 -		PSCB(vcpu,itlb_pte) = mp_pte;
   9.506 +		vcpu_set_tr_entry(&PSCBX(vcpu,itlb),pte,ps<<2,vaddr);
   9.507 +		PSCBX(vcpu,itlb_pte) = mp_pte;
   9.508  	}
   9.509  	if (IorD & 0x2) {
   9.510 -		vcpu_set_tr_entry(&PSCB(vcpu,dtlb),pte,ps<<2,vaddr);
   9.511 -		PSCB(vcpu,dtlb_pte) = mp_pte;
   9.512 +		vcpu_set_tr_entry(&PSCBX(vcpu,dtlb),pte,ps<<2,vaddr);
   9.513 +		PSCBX(vcpu,dtlb_pte) = mp_pte;
   9.514  	}
   9.515  }
   9.516  
   9.517 @@ -1539,9 +1568,9 @@ unsigned long match_dtlb(VCPU *vcpu, uns
   9.518  {
   9.519  	TR_ENTRY *trp;
   9.520  
   9.521 -	if (trp = vcpu_match_tr_entry(vcpu,&vcpu->vcpu_info->arch.dtlb,ifa,1)) {
   9.522 +	if (trp = vcpu_match_tr_entry(vcpu,&vcpu->arch.dtlb,ifa,1)) {
   9.523  		if (ps) *ps = trp->ps;
   9.524 -		if (mp_pte) *mp_pte = vcpu->vcpu_info->arch.dtlb_pte;
   9.525 +		if (mp_pte) *mp_pte = vcpu->arch.dtlb_pte;
   9.526  		return (trp->page_flags);
   9.527  	}
   9.528  	return 0UL;
   9.529 @@ -1645,8 +1674,8 @@ IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 
   9.530  #endif
   9.531  	local_flush_tlb_all();
   9.532  	// just invalidate the "whole" tlb
   9.533 -	vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
   9.534 -	vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
   9.535 +	vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
   9.536 +	vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
   9.537  	return IA64_NO_FAULT;
   9.538  }
   9.539  
   9.540 @@ -1666,8 +1695,8 @@ IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 
   9.541  	vhpt_flush_address(vadr,addr_range);
   9.542  #endif
   9.543  	ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
   9.544 -	vcpu_purge_tr_entry(&PSCB(vcpu,dtlb));
   9.545 -	vcpu_purge_tr_entry(&PSCB(vcpu,itlb));
   9.546 +	vcpu_purge_tr_entry(&PSCBX(vcpu,dtlb));
   9.547 +	vcpu_purge_tr_entry(&PSCBX(vcpu,itlb));
   9.548  	return IA64_NO_FAULT;
   9.549  }
   9.550  
    10.1 --- a/xen/arch/ia64/xenmisc.c	Tue May 10 22:28:46 2005 +0000
    10.2 +++ b/xen/arch/ia64/xenmisc.c	Tue May 10 22:58:27 2005 +0000
    10.3 @@ -79,6 +79,14 @@ void raise_actimer_softirq(void)
    10.4  	raise_softirq(AC_TIMER_SOFTIRQ);
    10.5  }
    10.6  
    10.7 +unsigned long __hypercall_create_continuation(
    10.8 +	unsigned int op, unsigned int nr_args, ...)
    10.9 +{
   10.10 +	printf("__hypercall_create_continuation: not implemented!!!\n");
   10.11 +}
   10.12 +
   10.13 +///////////////////////////////
   10.14 +
   10.15  ///////////////////////////////
   10.16  // from arch/x86/apic.c
   10.17  ///////////////////////////////
   10.18 @@ -139,7 +147,7 @@ void free_page_type(struct pfn_info *pag
   10.19  void show_registers(struct pt_regs *regs)
   10.20  {
   10.21  	printf("*** ADD REGISTER DUMP HERE FOR DEBUGGING\n");
   10.22 -}	
   10.23 +}
   10.24  
   10.25  ///////////////////////////////
   10.26  // from common/keyhandler.c
    11.1 --- a/xen/arch/ia64/xentime.c	Tue May 10 22:28:46 2005 +0000
    11.2 +++ b/xen/arch/ia64/xentime.c	Tue May 10 22:58:27 2005 +0000
    11.3 @@ -84,6 +84,17 @@ xen_timer_interrupt (int irq, void *dev_
    11.4  {
    11.5  	unsigned long new_itm;
    11.6  
    11.7 +#define HEARTBEAT_FREQ 16	// period in seconds
    11.8 +#ifdef HEARTBEAT_FREQ
    11.9 +	static long count = 0;
   11.10 +	if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
   11.11 +		printf("Heartbeat... iip=%p,psr.i=%d,pend=%d\n",
   11.12 +			regs->cr_iip,
   11.13 +			current->vcpu_info->arch.interrupt_delivery_enabled,
   11.14 +			current->vcpu_info->arch.pending_interruption);
   11.15 +		count = 0;
   11.16 +	}
   11.17 +#endif
   11.18  #ifndef XEN
   11.19  	if (unlikely(cpu_is_offline(smp_processor_id()))) {
   11.20  		return IRQ_HANDLED;
    12.1 --- a/xen/include/asm-ia64/config.h	Tue May 10 22:28:46 2005 +0000
    12.2 +++ b/xen/include/asm-ia64/config.h	Tue May 10 22:58:27 2005 +0000
    12.3 @@ -166,12 +166,6 @@ struct device {
    12.4  #endif
    12.5  };
    12.6  
    12.7 -// from linux/include/linux/pci.h
    12.8 -struct pci_bus_region {
    12.9 -	unsigned long start;
   12.10 -	unsigned long end;
   12.11 -};
   12.12 -
   12.13  // warning: unless search_extable is declared, the return value gets
   12.14  // truncated to 32-bits, causing a very strange error in privop handling
   12.15  struct exception_table_entry;
   12.16 @@ -256,6 +250,14 @@ struct screen_info { };
   12.17  #define seq_printf(a,b...) printf(b)
   12.18  #define CONFIG_BLK_DEV_INITRD // needed to reserve memory for domain0
   12.19  
   12.20 +//
   12.21 +#define __smp_processor_id() (current->processor)
   12.22 +
   12.23 +// needed for newer ACPI code
   12.24 +#define asmlinkage
   12.25 +
   12.26 +#define FORCE_CRASH()	asm("break 0;;");
   12.27 +
   12.28  // these declarations got moved at some point, find a better place for them
   12.29  extern int opt_noht;
   12.30  extern int ht_per_core;
    13.1 --- a/xen/include/asm-ia64/domain.h	Tue May 10 22:28:46 2005 +0000
    13.2 +++ b/xen/include/asm-ia64/domain.h	Tue May 10 22:58:27 2005 +0000
    13.3 @@ -37,12 +37,30 @@ struct arch_domain {
    13.4  #define shared_info_va arch.shared_info_va
    13.5  
    13.6  struct arch_exec_domain {
    13.7 +#if 1
    13.8 +	TR_ENTRY itrs[NITRS];
    13.9 +	TR_ENTRY dtrs[NDTRS];
   13.10 +	TR_ENTRY itlb;
   13.11 +	TR_ENTRY dtlb;
   13.12 +	unsigned long itlb_pte;
   13.13 +	unsigned long dtlb_pte;
   13.14 +	unsigned long irr[4];
   13.15 +	unsigned long insvc[4];
   13.16 +	unsigned long iva;
   13.17 +	unsigned long dcr;
   13.18 +	unsigned long itc;
   13.19 +	unsigned long domain_itm;
   13.20 +	unsigned long domain_itm_last;
   13.21 +	unsigned long xen_itm;
   13.22 +	unsigned long xen_timer_interval;
   13.23 +#endif
   13.24      void *regs;	/* temporary until find a better way to do privops */
   13.25 -    struct thread_struct _thread;
   13.26      struct mm_struct *active_mm;
   13.27 +    struct thread_struct _thread;	// this must be last
   13.28  };
   13.29 +
   13.30  #define active_mm arch.active_mm
   13.31 -#define thread arch._thread
   13.32 +//#define thread arch._thread
   13.33  
   13.34  // FOLLOWING FROM linux-2.6.7/include/sched.h
   13.35  
    14.1 --- a/xen/include/asm-ia64/vcpu.h	Tue May 10 22:28:46 2005 +0000
    14.2 +++ b/xen/include/asm-ia64/vcpu.h	Tue May 10 22:58:27 2005 +0000
    14.3 @@ -21,6 +21,21 @@ typedef struct pt_regs REGS;
    14.4  //#define vcpu_regs(vcpu)		&((struct spk_thread_t *)vcpu)->thread_regs
    14.5  //#define vcpu_thread(vcpu)	((struct spk_thread_t *)vcpu)
    14.6  
    14.7 +#define PRIVOP_ADDR_COUNT
    14.8 +#ifdef PRIVOP_ADDR_COUNT
    14.9 +#define _RSM 0
   14.10 +#define _SSM 1
   14.11 +#define PRIVOP_COUNT_NINSTS 2
   14.12 +#define PRIVOP_COUNT_NADDRS 30
   14.13 +
   14.14 +struct privop_addr_count {
   14.15 +	char *instname;
   14.16 +	unsigned long addr[PRIVOP_COUNT_NADDRS];
   14.17 +	unsigned long count[PRIVOP_COUNT_NADDRS];
   14.18 +	unsigned long overflow;
   14.19 +};
   14.20 +#endif
   14.21 +
   14.22  /* general registers */
   14.23  extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned reg);
   14.24  extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value);
   14.25 @@ -132,6 +147,8 @@ extern void vcpu_set_next_timer(VCPU *vc
   14.26  extern BOOLEAN vcpu_timer_expired(VCPU *vcpu);
   14.27  extern UINT64 vcpu_deliverable_interrupts(VCPU *vcpu);
   14.28  extern void vcpu_itc_no_srlz(VCPU *vcpu, UINT64, UINT64, UINT64, UINT64, UINT64);
   14.29 +extern UINT64 vcpu_get_tmp(VCPU *, UINT64);
   14.30 +extern void vcpu_set_tmp(VCPU *, UINT64, UINT64);
   14.31  
   14.32  
   14.33  #endif
    15.1 --- a/xen/include/public/arch-ia64.h	Tue May 10 22:28:46 2005 +0000
    15.2 +++ b/xen/include/public/arch-ia64.h	Tue May 10 22:58:27 2005 +0000
    15.3 @@ -63,24 +63,8 @@ typedef struct {
    15.4  	unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
    15.5  	unsigned long rrs[8];	// region registers
    15.6  	unsigned long krs[8];	// kernel registers
    15.7 -	unsigned long pkrs[8]; // protection key registers
    15.8 -	// FIXME:  These shouldn't be here as they can be overwritten by guests
    15.9 -	// and validation at TLB miss time would be too expensive.
   15.10 -	TR_ENTRY itrs[NITRS];
   15.11 -	TR_ENTRY dtrs[NDTRS];
   15.12 -	TR_ENTRY itlb;
   15.13 -	TR_ENTRY dtlb;
   15.14 -	unsigned long itlb_pte;
   15.15 -	unsigned long dtlb_pte;
   15.16 -	unsigned long irr[4];
   15.17 -	unsigned long insvc[4];
   15.18 -	unsigned long iva;
   15.19 -	unsigned long dcr;
   15.20 -	unsigned long itc;
   15.21 -	unsigned long domain_itm;
   15.22 -	unsigned long domain_itm_last;
   15.23 -	unsigned long xen_itm;
   15.24 -	unsigned long xen_timer_interval;
   15.25 +	unsigned long pkrs[8];	// protection key registers
   15.26 +	unsigned long tmp[8];	// temp registers (e.g. for hyperprivops)
   15.27  //} PACKED arch_shared_info_t;
   15.28  } arch_vcpu_info_t;		// DON'T PACK 
   15.29