direct-io.hg

changeset 10380:f662f98d594b

[IA64] more cleanup

Clean-up: fw_emul.c created.
More definitions moved to mm.c
process.c is lighter and renamed to faults.c

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Mon Jun 05 14:28:39 2006 -0600 (2006-06-05)
parents 279628dc2d6f
children 0fff4c07af18
files xen/arch/ia64/xen/Makefile xen/arch/ia64/xen/dom_fw.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/faults.c xen/arch/ia64/xen/fw_emul.c xen/arch/ia64/xen/hypercall.c xen/arch/ia64/xen/mm.c xen/arch/ia64/xen/process.c xen/arch/ia64/xen/xenmisc.c xen/include/asm-ia64/domain.h
line diff
     1.1 --- a/xen/arch/ia64/xen/Makefile	Mon Jun 05 14:23:57 2006 -0600
     1.2 +++ b/xen/arch/ia64/xen/Makefile	Mon Jun 05 14:28:39 2006 -0600
     1.3 @@ -3,6 +3,7 @@ obj-y += dom0_ops.o
     1.4  obj-y += domain.o
     1.5  obj-y += dom_fw.o
     1.6  obj-y += efi_emul.o
     1.7 +obj-y += fw_emul.o
     1.8  obj-y += hpsimserial.o
     1.9  obj-y += hypercall.o
    1.10  obj-y += hyperprivop.o
    1.11 @@ -13,7 +14,7 @@ obj-y += mm.o
    1.12  obj-y += mm_init.o
    1.13  obj-y += pcdp.o
    1.14  obj-y += privop.o
    1.15 -obj-y += process.o
    1.16 +obj-y += faults.o
    1.17  obj-y += regionreg.o
    1.18  obj-y += sn_console.o
    1.19  obj-y += vcpu.o
     2.1 --- a/xen/arch/ia64/xen/dom_fw.c	Mon Jun 05 14:23:57 2006 -0600
     2.2 +++ b/xen/arch/ia64/xen/dom_fw.c	Mon Jun 05 14:28:39 2006 -0600
     2.3 @@ -23,7 +23,6 @@
     2.4  #include <xen/acpi.h>
     2.5  
     2.6  #include <asm/dom_fw.h>
     2.7 -#include <public/sched.h>
     2.8  
     2.9  static struct ia64_boot_param *dom_fw_init(struct domain *, const char *,int,char *,int);
    2.10  extern unsigned long domain_mpa_to_imva(struct domain *,unsigned long mpaddr);
    2.11 @@ -139,334 +138,6 @@ unsigned long dom_fw_setup(struct domain
    2.12  
    2.13  /* the following heavily leveraged from linux/arch/ia64/hp/sim/fw-emu.c */
    2.14  
    2.15 -struct sal_ret_values
    2.16 -sal_emulator (long index, unsigned long in1, unsigned long in2,
    2.17 -	      unsigned long in3, unsigned long in4, unsigned long in5,
    2.18 -	      unsigned long in6, unsigned long in7)
    2.19 -{
    2.20 -	unsigned long r9  = 0;
    2.21 -	unsigned long r10 = 0;
    2.22 -	long r11 = 0;
    2.23 -	long status;
    2.24 -
    2.25 -	status = 0;
    2.26 -	switch (index) {
    2.27 -	    case SAL_FREQ_BASE:
    2.28 -		if (!running_on_sim)
    2.29 -			status = ia64_sal_freq_base(in1,&r9,&r10);
    2.30 -		else switch (in1) {
    2.31 -		      case SAL_FREQ_BASE_PLATFORM:
    2.32 -			r9 = 200000000;
    2.33 -			break;
    2.34 -
    2.35 -		      case SAL_FREQ_BASE_INTERVAL_TIMER:
    2.36 -			r9 = 700000000;
    2.37 -			break;
    2.38 -
    2.39 -		      case SAL_FREQ_BASE_REALTIME_CLOCK:
    2.40 -			r9 = 1;
    2.41 -			break;
    2.42 -
    2.43 -		      default:
    2.44 -			status = -1;
    2.45 -			break;
    2.46 -		}
    2.47 -		break;
    2.48 -	    case SAL_PCI_CONFIG_READ:
    2.49 -		if (current->domain == dom0) {
    2.50 -			u64 value;
    2.51 -			// note that args 2&3 are swapped!!
    2.52 -			status = ia64_sal_pci_config_read(in1,in3,in2,&value);
    2.53 -			r9 = value;
    2.54 -		}
    2.55 -		else
    2.56 -		     printf("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
    2.57 -		break;
    2.58 -	    case SAL_PCI_CONFIG_WRITE:
    2.59 -		if (current->domain == dom0) {
    2.60 -			if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
    2.61 -			    (in4 > 1) ||
    2.62 -			    (in2 > 8) || (in2 & (in2-1)))
    2.63 -				printf("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
    2.64 -					in1,in4,in2,in3);
    2.65 -			// note that args are in a different order!!
    2.66 -			status = ia64_sal_pci_config_write(in1,in4,in2,in3);
    2.67 -		}
    2.68 -		else
    2.69 -		     printf("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
    2.70 -		break;
    2.71 -	    case SAL_SET_VECTORS:
    2.72 - 		if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
    2.73 - 			if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
    2.74 - 				/* Sanity check: cs_length1 must be 0,
    2.75 - 				   second vector is reserved.  */
    2.76 - 				status = -2;
    2.77 - 			}
    2.78 - 			else {
    2.79 -				struct domain *d = current->domain;
    2.80 -				d->arch.boot_rdv_ip = in2;
    2.81 -				d->arch.boot_rdv_r1 = in3;
    2.82 -			}
    2.83 - 		}
    2.84 - 		else
    2.85 - 			printf("*** CALLED SAL_SET_VECTORS %lu.  IGNORED...\n",
    2.86 - 			       in1);
    2.87 -		break;
    2.88 -	    case SAL_GET_STATE_INFO:
    2.89 -		/* No more info.  */
    2.90 -		status = -5;
    2.91 -		r9 = 0;
    2.92 -		break;
    2.93 -	    case SAL_GET_STATE_INFO_SIZE:
    2.94 -		/* Return a dummy size.  */
    2.95 -		status = 0;
    2.96 -		r9 = 128;
    2.97 -		break;
    2.98 -	    case SAL_CLEAR_STATE_INFO:
    2.99 -		/* Noop.  */
   2.100 -		break;
   2.101 -	    case SAL_MC_RENDEZ:
   2.102 -		printf("*** CALLED SAL_MC_RENDEZ.  IGNORED...\n");
   2.103 -		break;
   2.104 -	    case SAL_MC_SET_PARAMS:
   2.105 -		printf("*** CALLED SAL_MC_SET_PARAMS.  IGNORED...\n");
   2.106 -		break;
   2.107 -	    case SAL_CACHE_FLUSH:
   2.108 -		if (1) {
   2.109 -			/*  Flush using SAL.
   2.110 -			    This method is faster but has a side effect on
   2.111 -			    other vcpu running on this cpu.  */
   2.112 -			status = ia64_sal_cache_flush (in1);
   2.113 -		}
   2.114 -		else {
   2.115 -			/*  Flush with fc all the domain.
   2.116 -			    This method is slower but has no side effects.  */
   2.117 -			domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
   2.118 -			status = 0;
   2.119 -		}
   2.120 -		break;
   2.121 -	    case SAL_CACHE_INIT:
   2.122 -		printf("*** CALLED SAL_CACHE_INIT.  IGNORED...\n");
   2.123 -		break;
   2.124 -	    case SAL_UPDATE_PAL:
   2.125 -		printf("*** CALLED SAL_UPDATE_PAL.  IGNORED...\n");
   2.126 -		break;
   2.127 -	    default:
   2.128 -		printf("*** CALLED SAL_ WITH UNKNOWN INDEX.  IGNORED...\n");
   2.129 -		status = -1;
   2.130 -		break;
   2.131 -	}
   2.132 -	return ((struct sal_ret_values) {status, r9, r10, r11});
   2.133 -}
   2.134 -
   2.135 -struct ia64_pal_retval
   2.136 -xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
   2.137 -{
   2.138 -	unsigned long r9  = 0;
   2.139 -	unsigned long r10 = 0;
   2.140 -	unsigned long r11 = 0;
   2.141 -	long status = PAL_STATUS_UNIMPLEMENTED;
   2.142 -
   2.143 -	if (running_on_sim)
   2.144 -		return pal_emulator_static(index);
   2.145 -
   2.146 -	// pal code must be mapped by a TR when pal is called, however
   2.147 -	// calls are rare enough that we will map it lazily rather than
   2.148 -	// at every context switch
   2.149 -	//efi_map_pal_code();
   2.150 -	switch (index) {
   2.151 -	    case PAL_MEM_ATTRIB:
   2.152 -		status = ia64_pal_mem_attrib(&r9);
   2.153 -		break;
   2.154 -	    case PAL_FREQ_BASE:
   2.155 -		status = ia64_pal_freq_base(&r9);
   2.156 -		break;
   2.157 -	    case PAL_PROC_GET_FEATURES:
   2.158 -		status = ia64_pal_proc_get_features(&r9,&r10,&r11);
   2.159 -		break;
   2.160 -	    case PAL_BUS_GET_FEATURES:
   2.161 -		status = ia64_pal_bus_get_features(
   2.162 -				(pal_bus_features_u_t *) &r9,
   2.163 -				(pal_bus_features_u_t *) &r10,
   2.164 -				(pal_bus_features_u_t *) &r11);
   2.165 -		break;
   2.166 -	    case PAL_FREQ_RATIOS:
   2.167 -		status = ia64_pal_freq_ratios(
   2.168 -				(struct pal_freq_ratio *) &r9,
   2.169 -				(struct pal_freq_ratio *) &r10,
   2.170 -				(struct pal_freq_ratio *) &r11);
   2.171 -		break;
   2.172 -	    case PAL_PTCE_INFO:
   2.173 -		{
   2.174 -			// return hard-coded xen-specific values because ptc.e
   2.175 -			// is emulated on xen to always flush everything
   2.176 -			// these values result in only one ptc.e instruction
   2.177 -			status = 0; r9 = 0; r10 = (1L << 32) | 1L; r11 = 0;
   2.178 -		}
   2.179 -		break;
   2.180 -	    case PAL_VERSION:
   2.181 -		status = ia64_pal_version(
   2.182 -				(pal_version_u_t *) &r9,
   2.183 -				(pal_version_u_t *) &r10);
   2.184 -		break;
   2.185 -	    case PAL_VM_PAGE_SIZE:
   2.186 -		status = ia64_pal_vm_page_size(&r9,&r10);
   2.187 -		break;
   2.188 -	    case PAL_DEBUG_INFO:
   2.189 -		status = ia64_pal_debug_info(&r9,&r10);
   2.190 -		break;
   2.191 -	    case PAL_CACHE_SUMMARY:
   2.192 -		status = ia64_pal_cache_summary(&r9,&r10);
   2.193 -		break;
   2.194 -	    case PAL_VM_SUMMARY:
   2.195 -	        {
   2.196 -			/* Use xen-specific values.
   2.197 -			   hash_tag_id is somewhat random! */
   2.198 -			const pal_vm_info_1_u_t v1 =
   2.199 -				{.pal_vm_info_1_s =
   2.200 -				 { .vw = 1,
   2.201 -				   .phys_add_size = 44,
   2.202 -				   .key_size = 16,
   2.203 -				   .max_pkr = 15,
   2.204 -				   .hash_tag_id = 0x30,
   2.205 -				   .max_dtr_entry = NDTRS - 1,
   2.206 -				   .max_itr_entry = NITRS - 1,
   2.207 -#ifdef VHPT_GLOBAL
   2.208 -				   .max_unique_tcs = 3,
   2.209 -				   .num_tc_levels = 2
   2.210 -#else
   2.211 -				   .max_unique_tcs = 2,
   2.212 -				   .num_tc_levels = 1
   2.213 -#endif
   2.214 -				 }};
   2.215 -			const pal_vm_info_2_u_t v2 =
   2.216 -				{ .pal_vm_info_2_s =
   2.217 -				  { .impl_va_msb = 50,
   2.218 -				    .rid_size = current->domain->arch.rid_bits,
   2.219 -				    .reserved = 0 }};
   2.220 -			r9 = v1.pvi1_val;
   2.221 -			r10 = v2.pvi2_val;
   2.222 -			status = PAL_STATUS_SUCCESS;
   2.223 -		}
   2.224 -		break;
   2.225 -	    case PAL_VM_INFO:
   2.226 -#ifdef VHPT_GLOBAL
   2.227 -		if (in1 == 0 && in2 == 2) {
   2.228 -			/* Level 1: VHPT  */
   2.229 -			const pal_tc_info_u_t v =
   2.230 -				{ .pal_tc_info_s = {.num_sets = 128,
   2.231 -						    .associativity = 1,
   2.232 -						    .num_entries = 128,
   2.233 -						    .pf = 1,
   2.234 -						    .unified = 1,
   2.235 -						    .reduce_tr = 0,
   2.236 -						    .reserved = 0}};
   2.237 -			r9 = v.pti_val;
   2.238 -			/* Only support PAGE_SIZE tc.  */
   2.239 -			r10 = PAGE_SIZE;
   2.240 -			status = PAL_STATUS_SUCCESS;
   2.241 -		}
   2.242 -#endif
   2.243 -	        else if (
   2.244 -#ifdef VHPT_GLOBAL 
   2.245 -	                in1 == 1 /* Level 2. */
   2.246 -#else
   2.247 -			in1 == 0 /* Level 1. */
   2.248 -#endif
   2.249 -			 && (in2 == 1 || in2 == 2))
   2.250 -		{
   2.251 -			/* itlb/dtlb, 1 entry.  */
   2.252 -			const pal_tc_info_u_t v =
   2.253 -				{ .pal_tc_info_s = {.num_sets = 1,
   2.254 -						    .associativity = 1,
   2.255 -						    .num_entries = 1,
   2.256 -						    .pf = 1,
   2.257 -						    .unified = 0,
   2.258 -						    .reduce_tr = 0,
   2.259 -						    .reserved = 0}};
   2.260 -			r9 = v.pti_val;
   2.261 -			/* Only support PAGE_SIZE tc.  */
   2.262 -			r10 = PAGE_SIZE;
   2.263 -			status = PAL_STATUS_SUCCESS;
   2.264 -		}
   2.265 -	        else
   2.266 -			status = PAL_STATUS_EINVAL;
   2.267 -		break;
   2.268 -	    case PAL_RSE_INFO:
   2.269 -		status = ia64_pal_rse_info(
   2.270 -				&r9,
   2.271 -				(pal_hints_u_t *) &r10);
   2.272 -		break;
   2.273 -	    case PAL_REGISTER_INFO:
   2.274 -		status = ia64_pal_register_info(in1, &r9, &r10);
   2.275 -		break;
   2.276 -	    case PAL_CACHE_FLUSH:
   2.277 -		/* FIXME */
   2.278 -		printk("PAL_CACHE_FLUSH NOT IMPLEMENTED!\n");
   2.279 -		BUG();
   2.280 -		break;
   2.281 -	    case PAL_PERF_MON_INFO:
   2.282 -		{
   2.283 -			unsigned long pm_buffer[16];
   2.284 -			status = ia64_pal_perf_mon_info(
   2.285 -					pm_buffer,
   2.286 -					(pal_perf_mon_info_u_t *) &r9);
   2.287 -			if (status != 0) {
   2.288 -				while(1)
   2.289 -				printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
   2.290 -				break;
   2.291 -			}
   2.292 -			if (copy_to_user((void __user *)in1,pm_buffer,128)) {
   2.293 -				while(1)
   2.294 -				printk("xen_pal_emulator: PAL_PERF_MON_INFO "
   2.295 -					"can't copy to user!!!!\n");
   2.296 -				status = PAL_STATUS_UNIMPLEMENTED;
   2.297 -				break;
   2.298 -			}
   2.299 -		}
   2.300 -		break;
   2.301 -	    case PAL_CACHE_INFO:
   2.302 -		{
   2.303 -			pal_cache_config_info_t ci;
   2.304 -			status = ia64_pal_cache_config_info(in1,in2,&ci);
   2.305 -			if (status != 0) break;
   2.306 -			r9 = ci.pcci_info_1.pcci1_data;
   2.307 -			r10 = ci.pcci_info_2.pcci2_data;
   2.308 -		}
   2.309 -		break;
   2.310 -	    case PAL_VM_TR_READ:	/* FIXME: vcpu_get_tr?? */
   2.311 -		printk("PAL_VM_TR_READ NOT IMPLEMENTED, IGNORED!\n");
   2.312 -		break;
   2.313 -	    case PAL_HALT_INFO:
   2.314 -	        {
   2.315 -		    /* 1000 cycles to enter/leave low power state,
   2.316 -		       consumes 10 mW, implemented and cache/TLB coherent.  */
   2.317 -		    unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
   2.318 -			    | (1UL << 61) | (1UL << 60);
   2.319 -		    if (copy_to_user ((void *)in1, &res, sizeof (res)))
   2.320 -			    status = PAL_STATUS_EINVAL;    
   2.321 -		    else
   2.322 -			    status = PAL_STATUS_SUCCESS;
   2.323 -	        }
   2.324 -		break;
   2.325 -	    case PAL_HALT:
   2.326 -		    if (current->domain == dom0) {
   2.327 -			    printf ("Domain0 halts the machine\n");
   2.328 -			    (*efi.reset_system)(EFI_RESET_SHUTDOWN,0,0,NULL);
   2.329 -		    }
   2.330 -		    else
   2.331 -			    domain_shutdown (current->domain,
   2.332 -					     SHUTDOWN_poweroff);
   2.333 -		    break;
   2.334 -	    default:
   2.335 -		printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %lu!!!!\n",
   2.336 -				index);
   2.337 -		break;
   2.338 -	}
   2.339 -	return ((struct ia64_pal_retval) {status, r9, r10, r11});
   2.340 -}
   2.341 -
   2.342 -
   2.343  #define NFUNCPTRS 20
   2.344  
   2.345  static void print_md(efi_memory_desc_t *md)
   2.346 @@ -479,7 +150,6 @@ static void print_md(efi_memory_desc_t *
   2.347  #endif
   2.348  }
   2.349  
   2.350 -
   2.351  static u32 lsapic_nbr;
   2.352  
   2.353  /* Modify lsapic table.  Provides LPs.  */
     3.1 --- a/xen/arch/ia64/xen/domain.c	Mon Jun 05 14:23:57 2006 -0600
     3.2 +++ b/xen/arch/ia64/xen/domain.c	Mon Jun 05 14:28:39 2006 -0600
     3.3 @@ -78,21 +78,96 @@ extern char dom0_command_line[];
     3.4  #define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
     3.5  
     3.6  /* FIXME: where these declarations should be there ? */
     3.7 -extern long platform_is_hp_ski(void);
     3.8  extern void serial_input_init(void);
     3.9  static void init_switch_stack(struct vcpu *v);
    3.10 +extern void vmx_do_launch(struct vcpu *);
    3.11  void build_physmap_table(struct domain *d);
    3.12  
    3.13  /* this belongs in include/asm, but there doesn't seem to be a suitable place */
    3.14 -void arch_domain_destroy(struct domain *d)
    3.15 +unsigned long context_switch_count = 0;
    3.16 +
    3.17 +extern struct vcpu *ia64_switch_to (struct vcpu *next_task);
    3.18 +
    3.19 +#include <xen/sched-if.h>
    3.20 +
    3.21 +void schedule_tail(struct vcpu *prev)
    3.22 +{
    3.23 +	extern char ia64_ivt;
    3.24 +	context_saved(prev);
    3.25 +
    3.26 +	if (VMX_DOMAIN(current)) {
    3.27 +		vmx_do_launch(current);
    3.28 +	} else {
    3.29 +		ia64_set_iva(&ia64_ivt);
    3.30 +        	ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
    3.31 +		        VHPT_ENABLED);
    3.32 +		load_region_regs(current);
    3.33 +		vcpu_load_kernel_regs(current);
    3.34 +	}
    3.35 +}
    3.36 +
    3.37 +void context_switch(struct vcpu *prev, struct vcpu *next)
    3.38  {
    3.39 -	BUG_ON(d->arch.mm.pgd != NULL);
    3.40 -	if (d->shared_info != NULL)
    3.41 -		free_xenheap_page(d->shared_info);
    3.42 +    uint64_t spsr;
    3.43 +    uint64_t pta;
    3.44 +
    3.45 +    local_irq_save(spsr);
    3.46 +    context_switch_count++;
    3.47 +
    3.48 +    __ia64_save_fpu(prev->arch._thread.fph);
    3.49 +    __ia64_load_fpu(next->arch._thread.fph);
    3.50 +    if (VMX_DOMAIN(prev))
    3.51 +	    vmx_save_state(prev);
    3.52 +    if (VMX_DOMAIN(next))
    3.53 +	    vmx_load_state(next);
    3.54 +    /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
    3.55 +    prev = ia64_switch_to(next);
    3.56 +
    3.57 +    //cpu_set(smp_processor_id(), current->domain->domain_dirty_cpumask);
    3.58 +
    3.59 +    if (!VMX_DOMAIN(current)){
    3.60 +	    vcpu_set_next_timer(current);
    3.61 +    }
    3.62 +
    3.63  
    3.64 -	domain_flush_destroy (d);
    3.65 +// leave this debug for now: it acts as a heartbeat when more than
    3.66 +// one domain is active
    3.67 +{
    3.68 +static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
    3.69 +static int i = 100;
    3.70 +int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
    3.71 +if (!cnt[id]--) { cnt[id] = 500000; printk("%x",id); }
    3.72 +if (!i--) { i = 1000000; printk("+"); }
    3.73 +}
    3.74  
    3.75 -	deallocate_rid_range(d);
    3.76 +    if (VMX_DOMAIN(current)){
    3.77 +		vmx_load_all_rr(current);
    3.78 +    }else{
    3.79 +    	extern char ia64_ivt;
    3.80 +    	ia64_set_iva(&ia64_ivt);
    3.81 +    	if (!is_idle_domain(current->domain)) {
    3.82 +        	ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
    3.83 +			     VHPT_ENABLED);
    3.84 +	    	load_region_regs(current);
    3.85 +	    	vcpu_load_kernel_regs(current);
    3.86 +		if (vcpu_timer_expired(current))
    3.87 +			vcpu_pend_timer(current);
    3.88 +    	}else {
    3.89 +		/* When switching to idle domain, only need to disable vhpt
    3.90 +		 * walker. Then all accesses happen within idle context will
    3.91 +		 * be handled by TR mapping and identity mapping.
    3.92 +		 */
    3.93 +		pta = ia64_get_pta();
    3.94 +		ia64_set_pta(pta & ~VHPT_ENABLED);
    3.95 +        }
    3.96 +    }
    3.97 +    local_irq_restore(spsr);
    3.98 +    context_saved(prev);
    3.99 +}
   3.100 +
   3.101 +void continue_running(struct vcpu *same)
   3.102 +{
   3.103 +	/* nothing to do */
   3.104  }
   3.105  
   3.106  static void default_idle(void)
   3.107 @@ -259,6 +334,17 @@ fail_nomem:
   3.108  	return -ENOMEM;
   3.109  }
   3.110  
   3.111 +void arch_domain_destroy(struct domain *d)
   3.112 +{
   3.113 +	BUG_ON(d->arch.mm.pgd != NULL);
   3.114 +	if (d->shared_info != NULL)
   3.115 +		free_xenheap_page(d->shared_info);
   3.116 +
   3.117 +	domain_flush_destroy (d);
   3.118 +
   3.119 +	deallocate_rid_range(d);
   3.120 +}
   3.121 +
   3.122  void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
   3.123  {
   3.124  	c->regs = *vcpu_regs (v);
   3.125 @@ -543,7 +629,7 @@ static void loaddomainelfimage(struct do
   3.126  
   3.127  void alloc_dom0(void)
   3.128  {
   3.129 -	if (platform_is_hp_ski()) {
   3.130 +	if (running_on_sim) {
   3.131  		dom0_size = 128*1024*1024; //FIXME: Should be configurable
   3.132  	}
   3.133  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
   3.134 @@ -798,21 +884,21 @@ int construct_dom0(struct domain *d,
   3.135  
   3.136  void machine_restart(char * __unused)
   3.137  {
   3.138 -	if (platform_is_hp_ski()) dummy();
   3.139 +	if (running_on_sim) dummy();
   3.140  	printf("machine_restart called: spinning....\n");
   3.141  	while(1);
   3.142  }
   3.143  
   3.144  void machine_halt(void)
   3.145  {
   3.146 -	if (platform_is_hp_ski()) dummy();
   3.147 +	if (running_on_sim) dummy();
   3.148  	printf("machine_halt called: spinning....\n");
   3.149  	while(1);
   3.150  }
   3.151  
   3.152  void dummy_called(char *function)
   3.153  {
   3.154 -	if (platform_is_hp_ski()) asm("break 0;;");
   3.155 +	if (running_on_sim) asm("break 0;;");
   3.156  	printf("dummy called in %s: spinning....\n", function);
   3.157  	while(1);
   3.158  }
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/arch/ia64/xen/faults.c	Mon Jun 05 14:28:39 2006 -0600
     4.3 @@ -0,0 +1,662 @@
     4.4 +
     4.5 +/*
     4.6 + * Miscellaneous process/domain related routines
     4.7 + * 
     4.8 + * Copyright (C) 2004 Hewlett-Packard Co.
     4.9 + *	Dan Magenheimer (dan.magenheimer@hp.com)
    4.10 + *
    4.11 + */
    4.12 +
    4.13 +#include <xen/config.h>
    4.14 +#include <xen/lib.h>
    4.15 +#include <xen/errno.h>
    4.16 +#include <xen/sched.h>
    4.17 +#include <xen/smp.h>
    4.18 +#include <asm/ptrace.h>
    4.19 +#include <xen/delay.h>
    4.20 +
    4.21 +#include <asm/system.h>
    4.22 +#include <asm/processor.h>
    4.23 +#include <xen/irq.h>
    4.24 +#include <xen/event.h>
    4.25 +#include <asm/privop.h>
    4.26 +#include <asm/vcpu.h>
    4.27 +#include <asm/ia64_int.h>
    4.28 +#include <asm/dom_fw.h>
    4.29 +#include <asm/vhpt.h>
    4.30 +#include <asm/debugger.h>
    4.31 +#include <asm/fpswa.h>
    4.32 +
    4.33 +extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
    4.34 +/* FIXME: where these declarations shold be there ? */
    4.35 +extern int ia64_hyperprivop(unsigned long, REGS *);
    4.36 +extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
    4.37 +
    4.38 +#define IA64_PSR_CPL1	(__IA64_UL(1) << IA64_PSR_CPL1_BIT)
    4.39 +// note IA64_PSR_PK removed from following, why is this necessary?
    4.40 +#define	DELIVER_PSR_SET	(IA64_PSR_IC | IA64_PSR_I | \
    4.41 +			IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
    4.42 +			IA64_PSR_IT | IA64_PSR_BN)
    4.43 +
    4.44 +#define	DELIVER_PSR_CLR	(IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
    4.45 +			IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI |	\
    4.46 +			IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
    4.47 +			IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
    4.48 +			IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
    4.49 +			IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
    4.50 +
    4.51 +
    4.52 +extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
    4.53 +
    4.54 +unsigned long slow_reflect_count[0x80] = { 0 };
    4.55 +unsigned long fast_reflect_count[0x80] = { 0 };
    4.56 +
    4.57 +#define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
    4.58 +
    4.59 +void zero_reflect_counts(void)
    4.60 +{
    4.61 +	int i;
    4.62 +	for (i=0; i<0x80; i++) slow_reflect_count[i] = 0;
    4.63 +	for (i=0; i<0x80; i++) fast_reflect_count[i] = 0;
    4.64 +}
    4.65 +
    4.66 +int dump_reflect_counts(char *buf)
    4.67 +{
    4.68 +	int i,j,cnt;
    4.69 +	char *s = buf;
    4.70 +
    4.71 +	s += sprintf(s,"Slow reflections by vector:\n");
    4.72 +	for (i = 0, j = 0; i < 0x80; i++) {
    4.73 +		if ( (cnt = slow_reflect_count[i]) != 0 ) {
    4.74 +			s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
    4.75 +			if ((j++ & 3) == 3) s += sprintf(s,"\n");
    4.76 +		}
    4.77 +	}
    4.78 +	if (j & 3) s += sprintf(s,"\n");
    4.79 +	s += sprintf(s,"Fast reflections by vector:\n");
    4.80 +	for (i = 0, j = 0; i < 0x80; i++) {
    4.81 +		if ( (cnt = fast_reflect_count[i]) != 0 ) {
    4.82 +			s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
    4.83 +			if ((j++ & 3) == 3) s += sprintf(s,"\n");
    4.84 +		}
    4.85 +	}
    4.86 +	if (j & 3) s += sprintf(s,"\n");
    4.87 +	return s - buf;
    4.88 +}
    4.89 +
    4.90 +// should never panic domain... if it does, stack may have been overrun
    4.91 +void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
    4.92 +{
    4.93 +	struct vcpu *v = current;
    4.94 +
    4.95 +	if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
    4.96 +		panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
    4.97 +	}
    4.98 +	vector &= ~0xf;
    4.99 +	if (vector != IA64_DATA_TLB_VECTOR &&
   4.100 +	    vector != IA64_ALT_DATA_TLB_VECTOR &&
   4.101 +	    vector != IA64_VHPT_TRANS_VECTOR) {
   4.102 +		panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
   4.103 +		             vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
   4.104 +	}
   4.105 +}
   4.106 +
   4.107 +void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
   4.108 +{
   4.109 +	struct vcpu *v = current;
   4.110 +
   4.111 +	if (!PSCB(v,interrupt_collection_enabled))
   4.112 +		check_bad_nested_interruption(isr,regs,vector);
   4.113 +	PSCB(v,unat) = regs->ar_unat;  // not sure if this is really needed?
   4.114 +	PSCB(v,precover_ifs) = regs->cr_ifs;
   4.115 +	vcpu_bsw0(v);
   4.116 +	PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
   4.117 +	PSCB(v,isr) = isr;
   4.118 +	PSCB(v,iip) = regs->cr_iip;
   4.119 +	PSCB(v,ifs) = 0;
   4.120 +	PSCB(v,incomplete_regframe) = 0;
   4.121 +
   4.122 +	regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
   4.123 +	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   4.124 +	regs->r31 = XSI_IPSR;
   4.125 +
   4.126 +	v->vcpu_info->evtchn_upcall_mask = 1;
   4.127 +	PSCB(v,interrupt_collection_enabled) = 0;
   4.128 +
   4.129 +	inc_slow_reflect_count(vector);
   4.130 +}
   4.131 +
   4.132 +static unsigned long pending_false_positive = 0;
   4.133 +
   4.134 +void reflect_extint(struct pt_regs *regs)
   4.135 +{
   4.136 +	unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
   4.137 +	struct vcpu *v = current;
   4.138 +	static int first_extint = 1;
   4.139 +
   4.140 +	if (first_extint) {
   4.141 +		printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
   4.142 +		first_extint = 0;
   4.143 +	}
   4.144 +	if (vcpu_timer_pending_early(v))
   4.145 +printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
   4.146 +	PSCB(current,itir) = 0;
   4.147 +	reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
   4.148 +}
   4.149 +
   4.150 +void reflect_event(struct pt_regs *regs)
   4.151 +{
   4.152 +	unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
   4.153 +	struct vcpu *v = current;
   4.154 +
   4.155 +	/* Sanity check */
   4.156 +	if (is_idle_vcpu(v) || !user_mode(regs)) {
   4.157 +		//printk("WARN: invocation to reflect_event in nested xen\n");
   4.158 +		return;
   4.159 +	}
   4.160 +
   4.161 +	if (!event_pending(v))
   4.162 +		return;
   4.163 +
   4.164 +	if (!PSCB(v,interrupt_collection_enabled))
   4.165 +		printf("psr.ic off, delivering event, ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
   4.166 +		       regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
   4.167 +	PSCB(v,unat) = regs->ar_unat;  // not sure if this is really needed?
   4.168 +	PSCB(v,precover_ifs) = regs->cr_ifs;
   4.169 +	vcpu_bsw0(v);
   4.170 +	PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
   4.171 +	PSCB(v,isr) = isr;
   4.172 +	PSCB(v,iip) = regs->cr_iip;
   4.173 +	PSCB(v,ifs) = 0;
   4.174 +	PSCB(v,incomplete_regframe) = 0;
   4.175 +
   4.176 +	regs->cr_iip = v->arch.event_callback_ip;
   4.177 +	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   4.178 +	regs->r31 = XSI_IPSR;
   4.179 +
   4.180 +	v->vcpu_info->evtchn_upcall_mask = 1;
   4.181 +	PSCB(v,interrupt_collection_enabled) = 0;
   4.182 +}
   4.183 +
   4.184 +// ONLY gets called from ia64_leave_kernel
   4.185 +// ONLY call with interrupts disabled?? (else might miss one?)
   4.186 +// NEVER successful if already reflecting a trap/fault because psr.i==0
   4.187 +void deliver_pending_interrupt(struct pt_regs *regs)
   4.188 +{
   4.189 +	struct domain *d = current->domain;
   4.190 +	struct vcpu *v = current;
   4.191 +	// FIXME: Will this work properly if doing an RFI???
   4.192 +	if (!is_idle_domain(d) && user_mode(regs)) {
   4.193 +		if (vcpu_deliverable_interrupts(v))
   4.194 +			reflect_extint(regs);
   4.195 +		else if (PSCB(v,pending_interruption))
   4.196 +			++pending_false_positive;
   4.197 +	}
   4.198 +}
   4.199 +unsigned long lazy_cover_count = 0;
   4.200 +
   4.201 +static int
   4.202 +handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
   4.203 +{
   4.204 +	if (!PSCB(v,interrupt_collection_enabled)) {
   4.205 +		PSCB(v,ifs) = regs->cr_ifs;
   4.206 +		PSCB(v,incomplete_regframe) = 1;
   4.207 +		regs->cr_ifs = 0;
   4.208 +		lazy_cover_count++;
   4.209 +		return(1); // retry same instruction with cr.ifs off
   4.210 +	}
   4.211 +	return(0);
   4.212 +}
   4.213 +
   4.214 +void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
   4.215 +{
   4.216 +	unsigned long iip = regs->cr_iip, iha;
   4.217 +	// FIXME should validate address here
   4.218 +	unsigned long pteval;
   4.219 +	unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
   4.220 +	IA64FAULT fault;
   4.221 +
   4.222 +	if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) return;
   4.223 +	if ((isr & IA64_ISR_SP)
   4.224 +	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
   4.225 +	{
   4.226 +		/*
   4.227 +		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
   4.228 +		 * bit in the psr to ensure forward progress.  (Target register will get a
   4.229 +		 * NaT for ld.s, lfetch will be canceled.)
   4.230 +		 */
   4.231 +		ia64_psr(regs)->ed = 1;
   4.232 +		return;
   4.233 +	}
   4.234 +
   4.235 + again:
   4.236 +	fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
   4.237 +	if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
   4.238 +		u64 logps;
   4.239 +		pteval = translate_domain_pte(pteval, address, itir, &logps);
   4.240 +		vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
   4.241 +		if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
   4.242 +			/* dtlb has been purged in-between.  This dtlb was
   4.243 +			   matching.  Undo the work.  */
   4.244 +			vcpu_flush_tlb_vhpt_range (address, 1);
   4.245 +			goto again;
   4.246 +		}
   4.247 +		return;
   4.248 +	}
   4.249 +
   4.250 +	if (!user_mode (regs)) {
   4.251 +		/* The fault occurs inside Xen.  */
   4.252 +		if (!ia64_done_with_exception(regs)) {
   4.253 +			// should never happen.  If it does, region 0 addr may
   4.254 +			// indicate a bad xen pointer
   4.255 +			printk("*** xen_handle_domain_access: exception table"
   4.256 +			       " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
   4.257 +				iip, address);
   4.258 +			panic_domain(regs,"*** xen_handle_domain_access: exception table"
   4.259 +			       " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
   4.260 +				iip, address);
   4.261 +		}
   4.262 +		return;
   4.263 +	}
   4.264 +	if (!PSCB(current,interrupt_collection_enabled)) {
   4.265 +		check_bad_nested_interruption(isr,regs,fault);
   4.266 +		//printf("Delivering NESTED DATA TLB fault\n");
   4.267 +		fault = IA64_DATA_NESTED_TLB_VECTOR;
   4.268 +		regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
   4.269 +		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   4.270 +		// NOTE: nested trap must NOT pass PSCB address
   4.271 +		//regs->r31 = (unsigned long) &PSCB(current);
   4.272 +		inc_slow_reflect_count(fault);
   4.273 +		return;
   4.274 +	}
   4.275 +
   4.276 +	PSCB(current,itir) = itir;
   4.277 +	PSCB(current,iha) = iha;
   4.278 +	PSCB(current,ifa) = address;
   4.279 +	reflect_interruption(isr, regs, fault);
   4.280 +}
   4.281 +
   4.282 +fpswa_interface_t *fpswa_interface = 0;
   4.283 +
   4.284 +void trap_init (void)
   4.285 +{
   4.286 +	if (ia64_boot_param->fpswa)
   4.287 +		/* FPSWA fixup: make the interface pointer a virtual address: */
   4.288 +		fpswa_interface = __va(ia64_boot_param->fpswa);
   4.289 +	else
   4.290 +		printk("No FPSWA supported.\n");
   4.291 +}
   4.292 +
   4.293 +static fpswa_ret_t
   4.294 +fp_emulate (int fp_fault, void *bundle, unsigned long *ipsr,
   4.295 +	    unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
   4.296 +	    unsigned long *ifs, struct pt_regs *regs)
   4.297 +{
   4.298 +	fp_state_t fp_state;
   4.299 +	fpswa_ret_t ret;
   4.300 +
   4.301 +	if (!fpswa_interface)
   4.302 +		return ((fpswa_ret_t) {-1, 0, 0, 0});
   4.303 +
   4.304 +	memset(&fp_state, 0, sizeof(fp_state_t));
   4.305 +
   4.306 +	/*
   4.307 +	 * compute fp_state.  only FP registers f6 - f11 are used by the
   4.308 +	 * kernel, so set those bits in the mask and set the low volatile
   4.309 +	 * pointer to point to these registers.
   4.310 +	 */
   4.311 +	fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */
   4.312 +
   4.313 +	fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
   4.314 +	/*
   4.315 +	 * unsigned long (*EFI_FPSWA) (
   4.316 +	 *      unsigned long    trap_type,
   4.317 +	 *      void             *Bundle,
   4.318 +	 *      unsigned long    *pipsr,
   4.319 +	 *      unsigned long    *pfsr,
   4.320 +	 *      unsigned long    *pisr,
   4.321 +	 *      unsigned long    *ppreds,
   4.322 +	 *      unsigned long    *pifs,
   4.323 +	 *      void             *fp_state);
   4.324 +	 */
   4.325 +	ret = (*fpswa_interface->fpswa)(fp_fault, bundle,
   4.326 +					ipsr, fpsr, isr, pr, ifs, &fp_state);
   4.327 +
   4.328 +	return ret;
   4.329 +}
   4.330 +
   4.331 +/*
   4.332 + * Handle floating-point assist faults and traps for domain.
   4.333 + */
   4.334 +unsigned long
   4.335 +handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
   4.336 +{
   4.337 +	struct vcpu *v = current;
   4.338 +	IA64_BUNDLE bundle;
   4.339 +	IA64_BUNDLE __get_domain_bundle(UINT64);
   4.340 +	unsigned long fault_ip;
   4.341 +	fpswa_ret_t ret;
   4.342 +
   4.343 +	fault_ip = regs->cr_iip;
   4.344 +	/*
   4.345 +	 * When the FP trap occurs, the trapping instruction is completed.
   4.346 +	 * If ipsr.ri == 0, there is the trapping instruction in previous bundle.
   4.347 +	 */
   4.348 +	if (!fp_fault && (ia64_psr(regs)->ri == 0))
   4.349 +		fault_ip -= 16;
   4.350 +	bundle = __get_domain_bundle(fault_ip);
   4.351 +	if (!bundle.i64[0] && !bundle.i64[1]) {
   4.352 +		printk("%s: floating-point bundle at 0x%lx not mapped\n",
   4.353 +		       __FUNCTION__, fault_ip);
   4.354 +		return -1;
   4.355 +	}
   4.356 +
   4.357 +	ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
   4.358 +	                 &isr, &regs->pr, &regs->cr_ifs, regs);
   4.359 +
   4.360 +	if (ret.status) {
   4.361 +		PSCBX(v, fpswa_ret) = ret;
   4.362 +		printk("%s(%s): fp_emulate() returned %ld\n",
   4.363 +		       __FUNCTION__, fp_fault?"fault":"trap", ret.status);
   4.364 +	}
   4.365 +
   4.366 +	return ret.status;
   4.367 +}
   4.368 +
   4.369 +void
   4.370 +ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
   4.371 +	    unsigned long iim, unsigned long itir, unsigned long arg5,
   4.372 +	    unsigned long arg6, unsigned long arg7, unsigned long stack)
   4.373 +{
   4.374 +	struct pt_regs *regs = (struct pt_regs *) &stack;
   4.375 +	unsigned long code;
   4.376 +	static const char *reason[] = {
   4.377 +		"IA-64 Illegal Operation fault",
   4.378 +		"IA-64 Privileged Operation fault",
   4.379 +		"IA-64 Privileged Register fault",
   4.380 +		"IA-64 Reserved Register/Field fault",
   4.381 +		"Disabled Instruction Set Transition fault",
   4.382 +		"Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
   4.383 +		"Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
   4.384 +		"Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
   4.385 +	};
   4.386 +
   4.387 +	printf("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, ipsr=0x%016lx, isr=0x%016lx\n",
   4.388 +	       vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
   4.389 +
   4.390 +
   4.391 +	if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
   4.392 +		/*
   4.393 +		 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
   4.394 +		 * the lfetch.
   4.395 +		 */
   4.396 +		ia64_psr(regs)->ed = 1;
   4.397 +		printf("ia64_fault: handled lfetch.fault\n");
   4.398 +		return;
   4.399 +	}
   4.400 +
   4.401 +	switch (vector) {
   4.402 +	    case 0:
   4.403 +		printk("VHPT Translation.\n");
   4.404 +		break;
   4.405 +	  
   4.406 +	    case 4:
   4.407 +		printk("Alt DTLB.\n");
   4.408 +		break;
   4.409 +	  
   4.410 +	    case 6:
   4.411 +		printk("Instruction Key Miss.\n");
   4.412 +		break;
   4.413 +
   4.414 +	    case 7: 
   4.415 +		printk("Data Key Miss.\n");
   4.416 +		break;
   4.417 +
   4.418 +	    case 8: 
   4.419 +		printk("Dirty-bit.\n");
   4.420 +		break;
   4.421 +
   4.422 +	    case 20:
   4.423 +		printk("Page Not Found.\n");
   4.424 +		break;
   4.425 +
   4.426 +	    case 21:
   4.427 +		printk("Key Permission.\n");
   4.428 +		break;
   4.429 +
   4.430 +	    case 22:
   4.431 +		printk("Instruction Access Rights.\n");
   4.432 +		break;
   4.433 +
   4.434 +	    case 24: /* General Exception */
   4.435 +		code = (isr >> 4) & 0xf;
   4.436 +		printk("General Exception: %s%s.\n", reason[code],
   4.437 +		        (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
   4.438 +		                       " (data access)") : "");
   4.439 +		if (code == 8) {
   4.440 +# ifdef CONFIG_IA64_PRINT_HAZARDS
   4.441 +			printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
   4.442 +			       current->comm, current->pid,
   4.443 +			       regs->cr_iip + ia64_psr(regs)->ri,
   4.444 +			       regs->pr);
   4.445 +# endif
   4.446 +			printf("ia64_fault: returning on hazard\n");
   4.447 +			return;
   4.448 +		}
   4.449 +		break;
   4.450 +
   4.451 +	    case 25:
   4.452 +		printk("Disabled FP-Register.\n");
   4.453 +		break;
   4.454 +
   4.455 +	    case 26:
   4.456 +		printk("NaT consumption.\n");
   4.457 +		break;
   4.458 +
   4.459 +	    case 29:
   4.460 +		printk("Debug.\n");
   4.461 +		break;
   4.462 +
   4.463 +	    case 30:
   4.464 +		printk("Unaligned Reference.\n");
   4.465 +		break;
   4.466 +
   4.467 +	    case 31:
   4.468 +		printk("Unsupported data reference.\n");
   4.469 +		break;
   4.470 +
   4.471 +	    case 32:
   4.472 +		printk("Floating-Point Fault.\n");
   4.473 +		break;
   4.474 +
   4.475 +	    case 33:
   4.476 +		printk("Floating-Point Trap.\n");
   4.477 +		break;
   4.478 +
   4.479 +	    case 34:
   4.480 +		printk("Lower Privilege Transfer Trap.\n");
   4.481 +		break;
   4.482 +
   4.483 +	    case 35:
   4.484 +		printk("Taken Branch Trap.\n");
   4.485 +		break;
   4.486 +
   4.487 +	    case 36:
   4.488 +		printk("Single Step Trap.\n");
   4.489 +		break;
   4.490 +    
   4.491 +	    case 45:
   4.492 +		printk("IA-32 Exception.\n");
   4.493 +		break;
   4.494 +
   4.495 +	    case 46:
   4.496 +		printk("IA-32 Intercept.\n");
   4.497 +		break;
   4.498 +
   4.499 +	    case 47:
   4.500 +		printk("IA-32 Interrupt.\n");
   4.501 +		break;
   4.502 +
   4.503 +	    default:
   4.504 +		printk("Fault %lu\n", vector);
   4.505 +		break;
   4.506 +	}
   4.507 +
   4.508 +	show_registers(regs);
   4.509 +	panic("Fault in Xen.\n");
   4.510 +}
   4.511 +
   4.512 +unsigned long running_on_sim = 0;
   4.513 +
   4.514 +
   4.515 +/* Also read in hyperprivop.S  */
   4.516 +int first_break = 0;
   4.517 +
   4.518 +void
   4.519 +ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   4.520 +{
   4.521 +	struct domain *d = current->domain;
   4.522 +	struct vcpu *v = current;
   4.523 +	IA64FAULT vector;
   4.524 +
   4.525 +	if (iim == 0x80001 || iim == 0x80002) {	//FIXME: don't hardcode constant
   4.526 +		do_ssc(vcpu_get_gr(current,36), regs);
   4.527 +	} 
   4.528 +#ifdef CRASH_DEBUG
   4.529 +	else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
   4.530 +		if (iim == 0)
   4.531 +			show_registers(regs);
   4.532 +		debugger_trap_fatal(0 /* don't care */, regs);
   4.533 +	} 
   4.534 +#endif
   4.535 +	else if (iim == d->arch.breakimm) {
   4.536 +		/* by default, do not continue */
   4.537 +		v->arch.hypercall_continuation = 0;
   4.538 +
   4.539 +		if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
   4.540 +			if (!PSCBX(v, hypercall_continuation))
   4.541 +				vcpu_increment_iip(current);
   4.542 +		}
   4.543 +		else reflect_interruption(isr, regs, vector);
   4.544 +	}
   4.545 +	else if (!PSCB(v,interrupt_collection_enabled)) {
   4.546 +		if (ia64_hyperprivop(iim,regs))
   4.547 +			vcpu_increment_iip(current);
   4.548 +	}
   4.549 +	else {
   4.550 +		if (iim == 0) 
   4.551 +			die_if_kernel("bug check", regs, iim);
   4.552 +		PSCB(v,iim) = iim;
   4.553 +		reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
   4.554 +	}
   4.555 +}
   4.556 +
   4.557 +void
   4.558 +ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
   4.559 +{
   4.560 +	IA64FAULT vector;
   4.561 +
   4.562 +	vector = priv_emulate(current,regs,isr);
   4.563 +	if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
   4.564 +		// Note: if a path results in a vector to reflect that requires
   4.565 +		// iha/itir (e.g. vcpu_force_data_miss), they must be set there
   4.566 +		reflect_interruption(isr,regs,vector);
   4.567 +	}
   4.568 +}
   4.569 +
   4.570 +/* Used in vhpt.h.  */
   4.571 +#define INTR_TYPE_MAX	10
   4.572 +UINT64 int_counts[INTR_TYPE_MAX];
   4.573 +
   4.574 +void
   4.575 +ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
   4.576 +{
   4.577 +	struct vcpu *v = current;
   4.578 +	unsigned long check_lazy_cover = 0;
   4.579 +	unsigned long psr = regs->cr_ipsr;
   4.580 +
   4.581 +	/* Following faults shouldn'g be seen from Xen itself */
   4.582 +	if (!(psr & IA64_PSR_CPL)) BUG();
   4.583 +
   4.584 +	switch(vector) {
   4.585 +	    case 8:
   4.586 +		vector = IA64_DIRTY_BIT_VECTOR; break;
   4.587 +	    case 9:
   4.588 +		vector = IA64_INST_ACCESS_BIT_VECTOR; break;
   4.589 +	    case 10:
   4.590 +		check_lazy_cover = 1;
   4.591 +		vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
   4.592 +	    case 20:
   4.593 +		check_lazy_cover = 1;
   4.594 +		vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
   4.595 +	    case 22:
   4.596 +		vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
   4.597 +	    case 23:
   4.598 +		check_lazy_cover = 1;
   4.599 +		vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
   4.600 +	    case 25:
   4.601 +		vector = IA64_DISABLED_FPREG_VECTOR;
   4.602 +		break;
   4.603 +	    case 26:
   4.604 +		if (((isr >> 4L) & 0xfL) == 1) {
   4.605 +			//regs->eml_unat = 0;  FIXME: DO WE NEED THIS??
   4.606 +			printf("ia64_handle_reflection: handling regNaT fault\n");
   4.607 +			vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   4.608 +		}
   4.609 +#if 1
   4.610 +		// pass null pointer dereferences through with no error
   4.611 +		// but retain debug output for non-zero ifa
   4.612 +		if (!ifa) {
   4.613 +			vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   4.614 +		}
   4.615 +#endif
   4.616 +		printf("*** NaT fault... attempting to handle as privop\n");
   4.617 +		printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
   4.618 +		       isr, ifa, regs->cr_iip, psr);
   4.619 +		//regs->eml_unat = 0;  FIXME: DO WE NEED THIS???
   4.620 +		// certain NaT faults are higher priority than privop faults
   4.621 +		vector = priv_emulate(v,regs,isr);
   4.622 +		if (vector == IA64_NO_FAULT) {
   4.623 +			printf("*** Handled privop masquerading as NaT fault\n");
   4.624 +			return;
   4.625 +		}
   4.626 +		vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   4.627 +	    case 27:
   4.628 +		//printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
   4.629 +		PSCB(current,iim) = iim;
   4.630 +		vector = IA64_SPECULATION_VECTOR; break;
   4.631 +	    case 30:
   4.632 +		// FIXME: Should we handle unaligned refs in Xen??
   4.633 +		vector = IA64_UNALIGNED_REF_VECTOR; break;
   4.634 +	    case 32:
   4.635 +		if (!(handle_fpu_swa(1, regs, isr))) {
   4.636 +		    vcpu_increment_iip(v);
   4.637 +		    return;
   4.638 +		}
   4.639 +		printf("ia64_handle_reflection: handling FP fault\n");
   4.640 +		vector = IA64_FP_FAULT_VECTOR; break;
   4.641 +	    case 33:
   4.642 +		if (!(handle_fpu_swa(0, regs, isr))) return;
   4.643 +		printf("ia64_handle_reflection: handling FP trap\n");
   4.644 +		vector = IA64_FP_TRAP_VECTOR; break;
   4.645 +	    case 34:
   4.646 +		printf("ia64_handle_reflection: handling lowerpriv trap\n");
   4.647 +		vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
   4.648 +	    case 35:
   4.649 +		printf("ia64_handle_reflection: handling taken branch trap\n");
   4.650 +		vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
   4.651 +	    case 36:
   4.652 +		printf("ia64_handle_reflection: handling single step trap\n");
   4.653 +		vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
   4.654 +
   4.655 +	    default:
   4.656 +		printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
   4.657 +		while(vector);
   4.658 +		return;
   4.659 +	}
   4.660 +	if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return;
   4.661 +	PSCB(current,ifa) = ifa;
   4.662 +	PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
   4.663 +	reflect_interruption(isr,regs,vector);
   4.664 +}
   4.665 +
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/ia64/xen/fw_emul.c	Mon Jun 05 14:28:39 2006 -0600
     5.3 @@ -0,0 +1,453 @@
     5.4 +#include <xen/config.h>
     5.5 +#include <asm/system.h>
     5.6 +#include <asm/pgalloc.h>
     5.7 +
     5.8 +#include <linux/efi.h>
     5.9 +#include <asm/pal.h>
    5.10 +#include <asm/sal.h>
    5.11 +
    5.12 +#include <public/sched.h>
    5.13 +#include "hpsim_ssc.h"
    5.14 +#include <asm/vcpu.h>
    5.15 +#include <asm/dom_fw.h>
    5.16 +
    5.17 +extern unsigned long running_on_sim;
    5.18 +
    5.19 +struct sal_ret_values
    5.20 +sal_emulator (long index, unsigned long in1, unsigned long in2,
    5.21 +	      unsigned long in3, unsigned long in4, unsigned long in5,
    5.22 +	      unsigned long in6, unsigned long in7)
    5.23 +{
    5.24 +	unsigned long r9  = 0;
    5.25 +	unsigned long r10 = 0;
    5.26 +	long r11 = 0;
    5.27 +	long status;
    5.28 +
    5.29 +	status = 0;
    5.30 +	switch (index) {
    5.31 +	    case SAL_FREQ_BASE:
    5.32 +		if (!running_on_sim)
    5.33 +			status = ia64_sal_freq_base(in1,&r9,&r10);
    5.34 +		else switch (in1) {
    5.35 +		      case SAL_FREQ_BASE_PLATFORM:
    5.36 +			r9 = 200000000;
    5.37 +			break;
    5.38 +
    5.39 +		      case SAL_FREQ_BASE_INTERVAL_TIMER:
    5.40 +			r9 = 700000000;
    5.41 +			break;
    5.42 +
    5.43 +		      case SAL_FREQ_BASE_REALTIME_CLOCK:
    5.44 +			r9 = 1;
    5.45 +			break;
    5.46 +
    5.47 +		      default:
    5.48 +			status = -1;
    5.49 +			break;
    5.50 +		}
    5.51 +		break;
    5.52 +	    case SAL_PCI_CONFIG_READ:
    5.53 +		if (current->domain == dom0) {
    5.54 +			u64 value;
    5.55 +			// note that args 2&3 are swapped!!
    5.56 +			status = ia64_sal_pci_config_read(in1,in3,in2,&value);
    5.57 +			r9 = value;
    5.58 +		}
    5.59 +		else
    5.60 +		     printf("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_READ\n");
    5.61 +		break;
    5.62 +	    case SAL_PCI_CONFIG_WRITE:
    5.63 +		if (current->domain == dom0) {
    5.64 +			if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
    5.65 +			    (in4 > 1) ||
    5.66 +			    (in2 > 8) || (in2 & (in2-1)))
    5.67 +				printf("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
    5.68 +					in1,in4,in2,in3);
    5.69 +			// note that args are in a different order!!
    5.70 +			status = ia64_sal_pci_config_write(in1,in4,in2,in3);
    5.71 +		}
    5.72 +		else
    5.73 +		     printf("NON-PRIV DOMAIN CALLED SAL_PCI_CONFIG_WRITE\n");
    5.74 +		break;
    5.75 +	    case SAL_SET_VECTORS:
    5.76 + 		if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
    5.77 + 			if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
    5.78 + 				/* Sanity check: cs_length1 must be 0,
    5.79 + 				   second vector is reserved.  */
    5.80 + 				status = -2;
    5.81 + 			}
    5.82 + 			else {
    5.83 +				struct domain *d = current->domain;
    5.84 +				d->arch.boot_rdv_ip = in2;
    5.85 +				d->arch.boot_rdv_r1 = in3;
    5.86 +			}
    5.87 + 		}
    5.88 + 		else
    5.89 + 			printf("*** CALLED SAL_SET_VECTORS %lu.  IGNORED...\n",
    5.90 + 			       in1);
    5.91 +		break;
    5.92 +	    case SAL_GET_STATE_INFO:
    5.93 +		/* No more info.  */
    5.94 +		status = -5;
    5.95 +		r9 = 0;
    5.96 +		break;
    5.97 +	    case SAL_GET_STATE_INFO_SIZE:
    5.98 +		/* Return a dummy size.  */
    5.99 +		status = 0;
   5.100 +		r9 = 128;
   5.101 +		break;
   5.102 +	    case SAL_CLEAR_STATE_INFO:
   5.103 +		/* Noop.  */
   5.104 +		break;
   5.105 +	    case SAL_MC_RENDEZ:
   5.106 +		printf("*** CALLED SAL_MC_RENDEZ.  IGNORED...\n");
   5.107 +		break;
   5.108 +	    case SAL_MC_SET_PARAMS:
   5.109 +		printf("*** CALLED SAL_MC_SET_PARAMS.  IGNORED...\n");
   5.110 +		break;
   5.111 +	    case SAL_CACHE_FLUSH:
   5.112 +		if (1) {
   5.113 +			/*  Flush using SAL.
   5.114 +			    This method is faster but has a side effect on
   5.115 +			    other vcpu running on this cpu.  */
   5.116 +			status = ia64_sal_cache_flush (in1);
   5.117 +		}
   5.118 +		else {
   5.119 +			/*  Flush with fc all the domain.
   5.120 +			    This method is slower but has no side effects.  */
   5.121 +			domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
   5.122 +			status = 0;
   5.123 +		}
   5.124 +		break;
   5.125 +	    case SAL_CACHE_INIT:
   5.126 +		printf("*** CALLED SAL_CACHE_INIT.  IGNORED...\n");
   5.127 +		break;
   5.128 +	    case SAL_UPDATE_PAL:
   5.129 +		printf("*** CALLED SAL_UPDATE_PAL.  IGNORED...\n");
   5.130 +		break;
   5.131 +	    default:
   5.132 +		printf("*** CALLED SAL_ WITH UNKNOWN INDEX.  IGNORED...\n");
   5.133 +		status = -1;
   5.134 +		break;
   5.135 +	}
   5.136 +	return ((struct sal_ret_values) {status, r9, r10, r11});
   5.137 +}
   5.138 +
   5.139 +struct ia64_pal_retval
   5.140 +xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
   5.141 +{
   5.142 +	unsigned long r9  = 0;
   5.143 +	unsigned long r10 = 0;
   5.144 +	unsigned long r11 = 0;
   5.145 +	long status = PAL_STATUS_UNIMPLEMENTED;
   5.146 +
   5.147 +	if (running_on_sim)
   5.148 +		return pal_emulator_static(index);
   5.149 +
   5.150 +	// pal code must be mapped by a TR when pal is called, however
   5.151 +	// calls are rare enough that we will map it lazily rather than
   5.152 +	// at every context switch
   5.153 +	//efi_map_pal_code();
   5.154 +	switch (index) {
   5.155 +	    case PAL_MEM_ATTRIB:
   5.156 +		status = ia64_pal_mem_attrib(&r9);
   5.157 +		break;
   5.158 +	    case PAL_FREQ_BASE:
   5.159 +		status = ia64_pal_freq_base(&r9);
   5.160 +		break;
   5.161 +	    case PAL_PROC_GET_FEATURES:
   5.162 +		status = ia64_pal_proc_get_features(&r9,&r10,&r11);
   5.163 +		break;
   5.164 +	    case PAL_BUS_GET_FEATURES:
   5.165 +		status = ia64_pal_bus_get_features(
   5.166 +				(pal_bus_features_u_t *) &r9,
   5.167 +				(pal_bus_features_u_t *) &r10,
   5.168 +				(pal_bus_features_u_t *) &r11);
   5.169 +		break;
   5.170 +	    case PAL_FREQ_RATIOS:
   5.171 +		status = ia64_pal_freq_ratios(
   5.172 +				(struct pal_freq_ratio *) &r9,
   5.173 +				(struct pal_freq_ratio *) &r10,
   5.174 +				(struct pal_freq_ratio *) &r11);
   5.175 +		break;
   5.176 +	    case PAL_PTCE_INFO:
   5.177 +		{
   5.178 +			// return hard-coded xen-specific values because ptc.e
   5.179 +			// is emulated on xen to always flush everything
   5.180 +			// these values result in only one ptc.e instruction
   5.181 +			status = 0; r9 = 0; r10 = (1L << 32) | 1L; r11 = 0;
   5.182 +		}
   5.183 +		break;
   5.184 +	    case PAL_VERSION:
   5.185 +		status = ia64_pal_version(
   5.186 +				(pal_version_u_t *) &r9,
   5.187 +				(pal_version_u_t *) &r10);
   5.188 +		break;
   5.189 +	    case PAL_VM_PAGE_SIZE:
   5.190 +		status = ia64_pal_vm_page_size(&r9,&r10);
   5.191 +		break;
   5.192 +	    case PAL_DEBUG_INFO:
   5.193 +		status = ia64_pal_debug_info(&r9,&r10);
   5.194 +		break;
   5.195 +	    case PAL_CACHE_SUMMARY:
   5.196 +		status = ia64_pal_cache_summary(&r9,&r10);
   5.197 +		break;
   5.198 +	    case PAL_VM_SUMMARY:
   5.199 +	        {
   5.200 +			/* Use xen-specific values.
   5.201 +			   hash_tag_id is somewhat random! */
   5.202 +			const pal_vm_info_1_u_t v1 =
   5.203 +				{.pal_vm_info_1_s =
   5.204 +				 { .vw = 1,
   5.205 +				   .phys_add_size = 44,
   5.206 +				   .key_size = 16,
   5.207 +				   .max_pkr = 15,
   5.208 +				   .hash_tag_id = 0x30,
   5.209 +				   .max_dtr_entry = NDTRS - 1,
   5.210 +				   .max_itr_entry = NITRS - 1,
   5.211 +#ifdef VHPT_GLOBAL
   5.212 +				   .max_unique_tcs = 3,
   5.213 +				   .num_tc_levels = 2
   5.214 +#else
   5.215 +				   .max_unique_tcs = 2,
   5.216 +				   .num_tc_levels = 1
   5.217 +#endif
   5.218 +				 }};
   5.219 +			const pal_vm_info_2_u_t v2 =
   5.220 +				{ .pal_vm_info_2_s =
   5.221 +				  { .impl_va_msb = 50,
   5.222 +				    .rid_size = current->domain->arch.rid_bits,
   5.223 +				    .reserved = 0 }};
   5.224 +			r9 = v1.pvi1_val;
   5.225 +			r10 = v2.pvi2_val;
   5.226 +			status = PAL_STATUS_SUCCESS;
   5.227 +		}
   5.228 +		break;
   5.229 +	    case PAL_VM_INFO:
   5.230 +#ifdef VHPT_GLOBAL
   5.231 +		if (in1 == 0 && in2 == 2) {
   5.232 +			/* Level 1: VHPT  */
   5.233 +			const pal_tc_info_u_t v =
   5.234 +				{ .pal_tc_info_s = {.num_sets = 128,
   5.235 +						    .associativity = 1,
   5.236 +						    .num_entries = 128,
   5.237 +						    .pf = 1,
   5.238 +						    .unified = 1,
   5.239 +						    .reduce_tr = 0,
   5.240 +						    .reserved = 0}};
   5.241 +			r9 = v.pti_val;
   5.242 +			/* Only support PAGE_SIZE tc.  */
   5.243 +			r10 = PAGE_SIZE;
   5.244 +			status = PAL_STATUS_SUCCESS;
   5.245 +		}
   5.246 +#endif
   5.247 +	        else if (
   5.248 +#ifdef VHPT_GLOBAL 
   5.249 +	                in1 == 1 /* Level 2. */
   5.250 +#else
   5.251 +			in1 == 0 /* Level 1. */
   5.252 +#endif
   5.253 +			 && (in2 == 1 || in2 == 2))
   5.254 +		{
   5.255 +			/* itlb/dtlb, 1 entry.  */
   5.256 +			const pal_tc_info_u_t v =
   5.257 +				{ .pal_tc_info_s = {.num_sets = 1,
   5.258 +						    .associativity = 1,
   5.259 +						    .num_entries = 1,
   5.260 +						    .pf = 1,
   5.261 +						    .unified = 0,
   5.262 +						    .reduce_tr = 0,
   5.263 +						    .reserved = 0}};
   5.264 +			r9 = v.pti_val;
   5.265 +			/* Only support PAGE_SIZE tc.  */
   5.266 +			r10 = PAGE_SIZE;
   5.267 +			status = PAL_STATUS_SUCCESS;
   5.268 +		}
   5.269 +	        else
   5.270 +			status = PAL_STATUS_EINVAL;
   5.271 +		break;
   5.272 +	    case PAL_RSE_INFO:
   5.273 +		status = ia64_pal_rse_info(
   5.274 +				&r9,
   5.275 +				(pal_hints_u_t *) &r10);
   5.276 +		break;
   5.277 +	    case PAL_REGISTER_INFO:
   5.278 +		status = ia64_pal_register_info(in1, &r9, &r10);
   5.279 +		break;
   5.280 +	    case PAL_CACHE_FLUSH:
   5.281 +		/* FIXME */
   5.282 +		printk("PAL_CACHE_FLUSH NOT IMPLEMENTED!\n");
   5.283 +		BUG();
   5.284 +		break;
   5.285 +	    case PAL_PERF_MON_INFO:
   5.286 +		{
   5.287 +			unsigned long pm_buffer[16];
   5.288 +			status = ia64_pal_perf_mon_info(
   5.289 +					pm_buffer,
   5.290 +					(pal_perf_mon_info_u_t *) &r9);
   5.291 +			if (status != 0) {
   5.292 +				while(1)
   5.293 +				printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
   5.294 +				break;
   5.295 +			}
   5.296 +			if (copy_to_user((void __user *)in1,pm_buffer,128)) {
   5.297 +				while(1)
   5.298 +				printk("xen_pal_emulator: PAL_PERF_MON_INFO "
   5.299 +					"can't copy to user!!!!\n");
   5.300 +				status = PAL_STATUS_UNIMPLEMENTED;
   5.301 +				break;
   5.302 +			}
   5.303 +		}
   5.304 +		break;
   5.305 +	    case PAL_CACHE_INFO:
   5.306 +		{
   5.307 +			pal_cache_config_info_t ci;
   5.308 +			status = ia64_pal_cache_config_info(in1,in2,&ci);
   5.309 +			if (status != 0) break;
   5.310 +			r9 = ci.pcci_info_1.pcci1_data;
   5.311 +			r10 = ci.pcci_info_2.pcci2_data;
   5.312 +		}
   5.313 +		break;
   5.314 +	    case PAL_VM_TR_READ:	/* FIXME: vcpu_get_tr?? */
   5.315 +		printk("PAL_VM_TR_READ NOT IMPLEMENTED, IGNORED!\n");
   5.316 +		break;
   5.317 +	    case PAL_HALT_INFO:
   5.318 +	        {
   5.319 +		    /* 1000 cycles to enter/leave low power state,
   5.320 +		       consumes 10 mW, implemented and cache/TLB coherent.  */
   5.321 +		    unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
   5.322 +			    | (1UL << 61) | (1UL << 60);
   5.323 +		    if (copy_to_user ((void *)in1, &res, sizeof (res)))
   5.324 +			    status = PAL_STATUS_EINVAL;    
   5.325 +		    else
   5.326 +			    status = PAL_STATUS_SUCCESS;
   5.327 +	        }
   5.328 +		break;
   5.329 +	    case PAL_HALT:
   5.330 +		    if (current->domain == dom0) {
   5.331 +			    printf ("Domain0 halts the machine\n");
   5.332 +			    (*efi.reset_system)(EFI_RESET_SHUTDOWN,0,0,NULL);
   5.333 +		    }
   5.334 +		    else
   5.335 +			    domain_shutdown (current->domain,
   5.336 +					     SHUTDOWN_poweroff);
   5.337 +		    break;
   5.338 +	    default:
   5.339 +		printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %lu!!!!\n",
   5.340 +				index);
   5.341 +		break;
   5.342 +	}
   5.343 +	return ((struct ia64_pal_retval) {status, r9, r10, r11});
   5.344 +}
   5.345 +
   5.346 +void
   5.347 +do_ssc(unsigned long ssc, struct pt_regs *regs)
   5.348 +{
   5.349 +	unsigned long arg0, arg1, arg2, arg3, retval;
   5.350 +	char buf[2];
   5.351 +/**/	static int last_fd, last_count;	// FIXME FIXME FIXME
   5.352 +/**/					// BROKEN FOR MULTIPLE DOMAINS & SMP
   5.353 +/**/	struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
   5.354 +
   5.355 +	arg0 = vcpu_get_gr(current,32);
   5.356 +	switch(ssc) {
   5.357 +	    case SSC_PUTCHAR:
   5.358 +		buf[0] = arg0;
   5.359 +		buf[1] = '\0';
   5.360 +		printf(buf);
   5.361 +		break;
   5.362 +	    case SSC_GETCHAR:
   5.363 +		retval = ia64_ssc(0,0,0,0,ssc);
   5.364 +		vcpu_set_gr(current,8,retval,0);
   5.365 +		break;
   5.366 +	    case SSC_WAIT_COMPLETION:
   5.367 +		if (arg0) {	// metaphysical address
   5.368 +
   5.369 +			arg0 = translate_domain_mpaddr(arg0);
   5.370 +/**/			stat = (struct ssc_disk_stat *)__va(arg0);
   5.371 +///**/			if (stat->fd == last_fd) stat->count = last_count;
   5.372 +/**/			stat->count = last_count;
   5.373 +//if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
   5.374 +///**/			retval = ia64_ssc(arg0,0,0,0,ssc);
   5.375 +/**/			retval = 0;
   5.376 +		}
   5.377 +		else retval = -1L;
   5.378 +		vcpu_set_gr(current,8,retval,0);
   5.379 +		break;
   5.380 +	    case SSC_OPEN:
   5.381 +		arg1 = vcpu_get_gr(current,33);	// access rights
   5.382 +if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware.  (ignoring...)\n"); arg0 = 0; }
   5.383 +		if (arg0) {	// metaphysical address
   5.384 +			arg0 = translate_domain_mpaddr(arg0);
   5.385 +			retval = ia64_ssc(arg0,arg1,0,0,ssc);
   5.386 +		}
   5.387 +		else retval = -1L;
   5.388 +		vcpu_set_gr(current,8,retval,0);
   5.389 +		break;
   5.390 +	    case SSC_WRITE:
   5.391 +	    case SSC_READ:
   5.392 +//if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
   5.393 +		arg1 = vcpu_get_gr(current,33);
   5.394 +		arg2 = vcpu_get_gr(current,34);
   5.395 +		arg3 = vcpu_get_gr(current,35);
   5.396 +		if (arg2) {	// metaphysical address of descriptor
   5.397 +			struct ssc_disk_req *req;
   5.398 +			unsigned long mpaddr;
   5.399 +			long len;
   5.400 +
   5.401 +			arg2 = translate_domain_mpaddr(arg2);
   5.402 +			req = (struct ssc_disk_req *) __va(arg2);
   5.403 +			req->len &= 0xffffffffL;	// avoid strange bug
   5.404 +			len = req->len;
   5.405 +/**/			last_fd = arg1;
   5.406 +/**/			last_count = len;
   5.407 +			mpaddr = req->addr;
   5.408 +//if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
   5.409 +			retval = 0;
   5.410 +			if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
   5.411 +				// do partial page first
   5.412 +				req->addr = translate_domain_mpaddr(mpaddr);
   5.413 +				req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
   5.414 +				len -= req->len; mpaddr += req->len;
   5.415 +				retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   5.416 +				arg3 += req->len; // file offset
   5.417 +/**/				last_stat.fd = last_fd;
   5.418 +/**/				(void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
   5.419 +//if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
   5.420 +			}
   5.421 +			if (retval >= 0) while (len > 0) {
   5.422 +				req->addr = translate_domain_mpaddr(mpaddr);
   5.423 +				req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
   5.424 +				len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
   5.425 +				retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   5.426 +				arg3 += req->len; // file offset
   5.427 +// TEMP REMOVED AGAIN				arg3 += req->len; // file offset
   5.428 +/**/				last_stat.fd = last_fd;
   5.429 +/**/				(void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
   5.430 +//if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
   5.431 +			}
   5.432 +			// set it back to the original value
   5.433 +			req->len = last_count;
   5.434 +		}
   5.435 +		else retval = -1L;
   5.436 +		vcpu_set_gr(current,8,retval,0);
   5.437 +//if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
   5.438 +		break;
   5.439 +	    case SSC_CONNECT_INTERRUPT:
   5.440 +		arg1 = vcpu_get_gr(current,33);
   5.441 +		arg2 = vcpu_get_gr(current,34);
   5.442 +		arg3 = vcpu_get_gr(current,35);
   5.443 +		if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware.  (ignoring...)\n"); break; }
   5.444 +		(void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   5.445 +		break;
   5.446 +	    case SSC_NETDEV_PROBE:
   5.447 +		vcpu_set_gr(current,8,-1L,0);
   5.448 +		break;
   5.449 +	    default:
   5.450 +		printf("ia64_handle_break: bad ssc code %lx, iip=0x%lx, b0=0x%lx... spinning\n",
   5.451 +			ssc, regs->cr_iip, regs->b0);
   5.452 +		while(1);
   5.453 +		break;
   5.454 +	}
   5.455 +	vcpu_increment_iip(current);
   5.456 +}
     6.1 --- a/xen/arch/ia64/xen/hypercall.c	Mon Jun 05 14:23:57 2006 -0600
     6.2 +++ b/xen/arch/ia64/xen/hypercall.c	Mon Jun 05 14:28:39 2006 -0600
     6.3 @@ -334,6 +334,58 @@ ia64_hypercall (struct pt_regs *regs)
     6.4  	    return xen_hypercall (regs);
     6.5  }
     6.6  
     6.7 +unsigned long hypercall_create_continuation(
     6.8 +	unsigned int op, const char *format, ...)
     6.9 +{
    6.10 +    struct mc_state *mcs = &mc_state[smp_processor_id()];
    6.11 +    struct vcpu *v = current;
    6.12 +    const char *p = format;
    6.13 +    unsigned long arg;
    6.14 +    unsigned int i;
    6.15 +    va_list args;
    6.16 +
    6.17 +    va_start(args, format);
    6.18 +    if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
    6.19 +	panic("PREEMPT happen in multicall\n");	// Not support yet
    6.20 +    } else {
    6.21 +	vcpu_set_gr(v, 2, op, 0);
    6.22 +	for ( i = 0; *p != '\0'; i++) {
    6.23 +            switch ( *p++ )
    6.24 +            {
    6.25 +            case 'i':
    6.26 +                arg = (unsigned long)va_arg(args, unsigned int);
    6.27 +                break;
    6.28 +            case 'l':
    6.29 +                arg = (unsigned long)va_arg(args, unsigned long);
    6.30 +                break;
    6.31 +            case 'h':
    6.32 +                arg = (unsigned long)va_arg(args, void *);
    6.33 +                break;
    6.34 +            default:
    6.35 +                arg = 0;
    6.36 +                BUG();
    6.37 +            }
    6.38 +	    switch (i) {
    6.39 +	    case 0: vcpu_set_gr(v, 14, arg, 0);
    6.40 +		    break;
    6.41 +	    case 1: vcpu_set_gr(v, 15, arg, 0);
    6.42 +		    break;
    6.43 +	    case 2: vcpu_set_gr(v, 16, arg, 0);
    6.44 +		    break;
    6.45 +	    case 3: vcpu_set_gr(v, 17, arg, 0);
    6.46 +		    break;
    6.47 +	    case 4: vcpu_set_gr(v, 18, arg, 0);
    6.48 +		    break;
    6.49 +	    default: panic("Too many args for hypercall continuation\n");
    6.50 +		    break;
    6.51 +	    }
    6.52 +	}
    6.53 +    }
    6.54 +    v->arch.hypercall_continuation = 1;
    6.55 +    va_end(args);
    6.56 +    return op;
    6.57 +}
    6.58 +
    6.59  /* Need make this function common */
    6.60  extern int
    6.61  iosapic_guest_read(
     7.1 --- a/xen/arch/ia64/xen/mm.c	Mon Jun 05 14:23:57 2006 -0600
     7.2 +++ b/xen/arch/ia64/xen/mm.c	Mon Jun 05 14:28:39 2006 -0600
     7.3 @@ -14,6 +14,7 @@
     7.4  #include <asm/mm.h>
     7.5  #include <asm/pgalloc.h>
     7.6  #include <asm/vhpt.h>
     7.7 +#include <asm/vcpu.h>
     7.8  #include <linux/efi.h>
     7.9  
    7.10  #ifndef CONFIG_XEN_IA64_DOM0_VP
    7.11 @@ -248,6 +249,110 @@ share_xen_page_with_privileged_guests(st
    7.12      share_xen_page_with_guest(page, dom_xen, readonly);
    7.13  }
    7.14  
    7.15 +unsigned long
    7.16 +gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
    7.17 +{
    7.18 +	unsigned long pte;
    7.19 +
    7.20 +#ifndef CONFIG_XEN_IA64_DOM0_VP
    7.21 +	if (d == dom0)
    7.22 +		return(gpfn);
    7.23 +#endif
    7.24 +	pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
    7.25 +	if (!pte) {
    7.26 +		panic("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
    7.27 +	}
    7.28 +	return ((pte & _PFN_MASK) >> PAGE_SHIFT);
    7.29 +}
    7.30 +
    7.31 +// given a domain virtual address, pte and pagesize, extract the metaphysical
    7.32 +// address, convert the pte for a physical address for (possibly different)
    7.33 +// Xen PAGE_SIZE and return modified pte.  (NOTE: TLB insert should use
    7.34 +// PAGE_SIZE!)
    7.35 +u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps)
    7.36 +{
    7.37 +	struct domain *d = current->domain;
    7.38 +	ia64_itir_t itir = {.itir = itir__};
    7.39 +	u64 mask, mpaddr, pteval2;
    7.40 +	u64 arflags;
    7.41 +	u64 arflags2;
    7.42 +
    7.43 +	pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
    7.44 +
    7.45 +	// FIXME address had better be pre-validated on insert
    7.46 +	mask = ~itir_mask(itir.itir);
    7.47 +	mpaddr = (((pteval & ~_PAGE_ED) & _PAGE_PPN_MASK) & ~mask) |
    7.48 +	         (address & mask);
    7.49 +#ifdef CONFIG_XEN_IA64_DOM0_VP
    7.50 +	if (itir.ps > PAGE_SHIFT) {
    7.51 +		itir.ps = PAGE_SHIFT;
    7.52 +	}
    7.53 +#endif
    7.54 +	*logps = itir.ps;
    7.55 +#ifndef CONFIG_XEN_IA64_DOM0_VP
    7.56 +	if (d == dom0) {
    7.57 +		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
    7.58 +			/*
    7.59 +			printk("translate_domain_pte: out-of-bounds dom0 mpaddr 0x%lx! itc=%lx...\n",
    7.60 +				mpaddr, ia64_get_itc());
    7.61 +			*/
    7.62 +			tdpfoo();
    7.63 +		}
    7.64 +	}
    7.65 +	else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
    7.66 +		/* Address beyond the limit.  However the grant table is
    7.67 +		   also beyond the limit.  Display a message if not in the
    7.68 +		   grant table.  */
    7.69 +		if (mpaddr >= IA64_GRANT_TABLE_PADDR
    7.70 +		    && mpaddr < (IA64_GRANT_TABLE_PADDR 
    7.71 +				 + (ORDER_GRANT_FRAMES << PAGE_SHIFT)))
    7.72 +			printf("translate_domain_pte: bad mpa=0x%lx (> 0x%lx),"
    7.73 +			       "vadr=0x%lx,pteval=0x%lx,itir=0x%lx\n",
    7.74 +			       mpaddr, (unsigned long)d->max_pages<<PAGE_SHIFT,
    7.75 +			       address, pteval, itir.itir);
    7.76 +		tdpfoo();
    7.77 +	}
    7.78 +#endif
    7.79 +	pteval2 = lookup_domain_mpa(d,mpaddr);
    7.80 +	arflags  = pteval  & _PAGE_AR_MASK;
    7.81 +	arflags2 = pteval2 & _PAGE_AR_MASK;
    7.82 +	if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {
    7.83 +#if 0
    7.84 +		DPRINTK("%s:%d "
    7.85 +		        "pteval 0x%lx arflag 0x%lx address 0x%lx itir 0x%lx "
    7.86 +		        "pteval2 0x%lx arflags2 0x%lx mpaddr 0x%lx\n",
    7.87 +		        __func__, __LINE__,
    7.88 +		        pteval, arflags, address, itir__,
    7.89 +		        pteval2, arflags2, mpaddr);
    7.90 +#endif
    7.91 +		pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;
    7.92 +}
    7.93 +
    7.94 +	pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
    7.95 +	pteval2 |= (pteval & _PAGE_ED);
    7.96 +	pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
    7.97 +	pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
    7.98 +	return pteval2;
    7.99 +}
   7.100 +
   7.101 +// given a current domain metaphysical address, return the physical address
   7.102 +unsigned long translate_domain_mpaddr(unsigned long mpaddr)
   7.103 +{
   7.104 +	unsigned long pteval;
   7.105 +
   7.106 +#ifndef CONFIG_XEN_IA64_DOM0_VP
   7.107 +	if (current->domain == dom0) {
   7.108 +		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   7.109 +			printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr 0x%lx! continuing...\n",
   7.110 +				mpaddr);
   7.111 +			tdpfoo();
   7.112 +		}
   7.113 +	}
   7.114 +#endif
   7.115 +	pteval = lookup_domain_mpa(current->domain,mpaddr);
   7.116 +	return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
   7.117 +}
   7.118 +
   7.119  //XXX !xxx_present() should be used instread of !xxx_none()?
   7.120  static pte_t*
   7.121  lookup_alloc_domain_pte(struct domain* d, unsigned long mpaddr)
   7.122 @@ -1036,6 +1141,238 @@ void domain_cache_flush (struct domain *
   7.123      //printf ("domain_cache_flush: %d %d pages\n", d->domain_id, nbr_page);
   7.124  }
   7.125  
   7.126 +#ifdef VERBOSE
   7.127 +#define MEM_LOG(_f, _a...)                           \
   7.128 +  printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
   7.129 +         current->domain->domain_id , __LINE__ , ## _a )
   7.130 +#else
   7.131 +#define MEM_LOG(_f, _a...) ((void)0)
   7.132 +#endif
   7.133 +
   7.134 +static void free_page_type(struct page_info *page, u32 type)
   7.135 +{
   7.136 +}
   7.137 +
   7.138 +static int alloc_page_type(struct page_info *page, u32 type)
   7.139 +{
   7.140 +	return 1;
   7.141 +}
   7.142 +
   7.143 +unsigned long __get_free_pages(unsigned int mask, unsigned int order)
   7.144 +{
   7.145 +	void *p = alloc_xenheap_pages(order);
   7.146 +
   7.147 +	memset(p,0,PAGE_SIZE<<order);
   7.148 +	return (unsigned long)p;
   7.149 +}
   7.150 +
   7.151 +void __free_pages(struct page_info *page, unsigned int order)
   7.152 +{
   7.153 +	if (order) BUG();
   7.154 +	free_xenheap_page(page);
   7.155 +}
   7.156 +
   7.157 +void *pgtable_quicklist_alloc(void)
   7.158 +{
   7.159 +    void *p;
   7.160 +    p = alloc_xenheap_pages(0);
   7.161 +    if (p)
   7.162 +        clear_page(p);
   7.163 +    return p;
   7.164 +}
   7.165 +
   7.166 +void pgtable_quicklist_free(void *pgtable_entry)
   7.167 +{
   7.168 +	free_xenheap_page(pgtable_entry);
   7.169 +}
   7.170 +
   7.171 +void cleanup_writable_pagetable(struct domain *d)
   7.172 +{
   7.173 +  return;
   7.174 +}
   7.175 +
   7.176 +void put_page_type(struct page_info *page)
   7.177 +{
   7.178 +    u32 nx, x, y = page->u.inuse.type_info;
   7.179 +
   7.180 + again:
   7.181 +    do {
   7.182 +        x  = y;
   7.183 +        nx = x - 1;
   7.184 +
   7.185 +        ASSERT((x & PGT_count_mask) != 0);
   7.186 +
   7.187 +        /*
   7.188 +         * The page should always be validated while a reference is held. The
   7.189 +         * exception is during domain destruction, when we forcibly invalidate
   7.190 +         * page-table pages if we detect a referential loop.
   7.191 +         * See domain.c:relinquish_list().
   7.192 +         */
   7.193 +        ASSERT((x & PGT_validated) ||
   7.194 +               test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
   7.195 +
   7.196 +        if ( unlikely((nx & PGT_count_mask) == 0) )
   7.197 +        {
   7.198 +            /* Record TLB information for flush later. Races are harmless. */
   7.199 +            page->tlbflush_timestamp = tlbflush_current_time();
   7.200 +
   7.201 +            if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
   7.202 +                 likely(nx & PGT_validated) )
   7.203 +            {
   7.204 +                /*
   7.205 +                 * Page-table pages must be unvalidated when count is zero. The
   7.206 +                 * 'free' is safe because the refcnt is non-zero and validated
   7.207 +                 * bit is clear => other ops will spin or fail.
   7.208 +                 */
   7.209 +                if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
   7.210 +                                           x & ~PGT_validated)) != x) )
   7.211 +                    goto again;
   7.212 +                /* We cleared the 'valid bit' so we do the clean up. */
   7.213 +                free_page_type(page, x);
   7.214 +                /* Carry on, but with the 'valid bit' now clear. */
   7.215 +                x  &= ~PGT_validated;
   7.216 +                nx &= ~PGT_validated;
   7.217 +            }
   7.218 +        }
   7.219 +        else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) ==
   7.220 +                            (PGT_pinned | 1)) &&
   7.221 +                           ((nx & PGT_type_mask) != PGT_writable_page)) )
   7.222 +        {
   7.223 +            /* Page is now only pinned. Make the back pointer mutable again. */
   7.224 +            nx |= PGT_va_mutable;
   7.225 +        }
   7.226 +    }
   7.227 +    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
   7.228 +}
   7.229 +
   7.230 +
   7.231 +int get_page_type(struct page_info *page, u32 type)
   7.232 +{
   7.233 +    u32 nx, x, y = page->u.inuse.type_info;
   7.234 +
   7.235 + again:
   7.236 +    do {
   7.237 +        x  = y;
   7.238 +        nx = x + 1;
   7.239 +        if ( unlikely((nx & PGT_count_mask) == 0) )
   7.240 +        {
   7.241 +            MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
   7.242 +            return 0;
   7.243 +        }
   7.244 +        else if ( unlikely((x & PGT_count_mask) == 0) )
   7.245 +        {
   7.246 +            if ( (x & (PGT_type_mask|PGT_va_mask)) != type )
   7.247 +            {
   7.248 +                if ( (x & PGT_type_mask) != (type & PGT_type_mask) )
   7.249 +                {
   7.250 +                    /*
   7.251 +                     * On type change we check to flush stale TLB
   7.252 +                     * entries. This may be unnecessary (e.g., page
   7.253 +                     * was GDT/LDT) but those circumstances should be
   7.254 +                     * very rare.
   7.255 +                     */
   7.256 +                    cpumask_t mask =
   7.257 +                        page_get_owner(page)->domain_dirty_cpumask;
   7.258 +                    tlbflush_filter(mask, page->tlbflush_timestamp);
   7.259 +
   7.260 +                    if ( unlikely(!cpus_empty(mask)) )
   7.261 +                    {
   7.262 +                        perfc_incrc(need_flush_tlb_flush);
   7.263 +                        flush_tlb_mask(mask);
   7.264 +                    }
   7.265 +                }
   7.266 +
   7.267 +                /* We lose existing type, back pointer, and validity. */
   7.268 +                nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated);
   7.269 +                nx |= type;
   7.270 +
   7.271 +                /* No special validation needed for writable pages. */
   7.272 +                /* Page tables and GDT/LDT need to be scanned for validity. */
   7.273 +                if ( type == PGT_writable_page )
   7.274 +                    nx |= PGT_validated;
   7.275 +            }
   7.276 +        }
   7.277 +        else
   7.278 +        {
   7.279 +            if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) )
   7.280 +            {
   7.281 +                if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
   7.282 +                {
   7.283 +                    if ( current->domain == page_get_owner(page) )
   7.284 +                    {
   7.285 +                        /*
   7.286 +                         * This ensures functions like set_gdt() see up-to-date
   7.287 +                         * type info without needing to clean up writable p.t.
   7.288 +                         * state on the fast path.
   7.289 +                         */
   7.290 +                        LOCK_BIGLOCK(current->domain);
   7.291 +                        cleanup_writable_pagetable(current->domain);
   7.292 +                        y = page->u.inuse.type_info;
   7.293 +                        UNLOCK_BIGLOCK(current->domain);
   7.294 +                        /* Can we make progress now? */
   7.295 +                        if ( ((y & PGT_type_mask) == (type & PGT_type_mask)) ||
   7.296 +                             ((y & PGT_count_mask) == 0) )
   7.297 +                            goto again;
   7.298 +                    }
   7.299 +                    if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
   7.300 +                         ((type & PGT_type_mask) != PGT_l1_page_table) )
   7.301 +                        MEM_LOG("Bad type (saw %08x != exp %08x) "
   7.302 +                                "for mfn %016lx (pfn %016lx)",
   7.303 +                                x, type, page_to_mfn(page),
   7.304 +                                get_gpfn_from_mfn(page_to_mfn(page)));
   7.305 +                    return 0;
   7.306 +                }
   7.307 +                else if ( (x & PGT_va_mask) == PGT_va_mutable )
   7.308 +                {
   7.309 +                    /* The va backpointer is mutable, hence we update it. */
   7.310 +                    nx &= ~PGT_va_mask;
   7.311 +                    nx |= type; /* we know the actual type is correct */
   7.312 +                }
   7.313 +                else if ( ((type & PGT_va_mask) != PGT_va_mutable) &&
   7.314 +                          ((type & PGT_va_mask) != (x & PGT_va_mask)) )
   7.315 +                {
   7.316 +#ifdef CONFIG_X86_PAE
   7.317 +                    /* We use backptr as extra typing. Cannot be unknown. */
   7.318 +                    if ( (type & PGT_type_mask) == PGT_l2_page_table )
   7.319 +                        return 0;
   7.320 +#endif
   7.321 +                    /* This table is possibly mapped at multiple locations. */
   7.322 +                    nx &= ~PGT_va_mask;
   7.323 +                    nx |= PGT_va_unknown;
   7.324 +                }
   7.325 +            }
   7.326 +            if ( unlikely(!(x & PGT_validated)) )
   7.327 +            {
   7.328 +                /* Someone else is updating validation of this page. Wait... */
   7.329 +                while ( (y = page->u.inuse.type_info) == x )
   7.330 +                    cpu_relax();
   7.331 +                goto again;
   7.332 +            }
   7.333 +        }
   7.334 +    }
   7.335 +    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
   7.336 +
   7.337 +    if ( unlikely(!(nx & PGT_validated)) )
   7.338 +    {
   7.339 +        /* Try to validate page type; drop the new reference on failure. */
   7.340 +        if ( unlikely(!alloc_page_type(page, type)) )
   7.341 +        {
   7.342 +            MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %08x"
   7.343 +                    ": caf=%08x taf=%" PRtype_info,
   7.344 +                    page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
   7.345 +                    type, page->count_info, page->u.inuse.type_info);
   7.346 +            /* Noone else can get a reference. We hold the only ref. */
   7.347 +            page->u.inuse.type_info = 0;
   7.348 +            return 0;
   7.349 +        }
   7.350 +
   7.351 +        /* Noone else is updating simultaneously. */
   7.352 +        __set_bit(_PGT_validated, &page->u.inuse.type_info);
   7.353 +    }
   7.354 +
   7.355 +    return 1;
   7.356 +}
   7.357 +
   7.358  /*
   7.359   * Local variables:
   7.360   * mode: C
     8.1 --- a/xen/arch/ia64/xen/process.c	Mon Jun 05 14:23:57 2006 -0600
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,949 +0,0 @@
     8.4 -
     8.5 -/*
     8.6 - * Miscellaneous process/domain related routines
     8.7 - * 
     8.8 - * Copyright (C) 2004 Hewlett-Packard Co.
     8.9 - *	Dan Magenheimer (dan.magenheimer@hp.com)
    8.10 - *
    8.11 - */
    8.12 -
    8.13 -#include <xen/config.h>
    8.14 -#include <xen/lib.h>
    8.15 -#include <xen/errno.h>
    8.16 -#include <xen/sched.h>
    8.17 -#include <xen/smp.h>
    8.18 -#include <asm/ptrace.h>
    8.19 -#include <xen/delay.h>
    8.20 -
    8.21 -#include <asm/sal.h>	/* FOR struct ia64_sal_retval */
    8.22 -
    8.23 -#include <asm/system.h>
    8.24 -#include <asm/io.h>
    8.25 -#include <asm/processor.h>
    8.26 -#include <asm/desc.h>
    8.27 -//#include <asm/ldt.h>
    8.28 -#include <xen/irq.h>
    8.29 -#include <xen/event.h>
    8.30 -#include <asm/regionreg.h>
    8.31 -#include <asm/privop.h>
    8.32 -#include <asm/vcpu.h>
    8.33 -#include <asm/ia64_int.h>
    8.34 -#include <asm/dom_fw.h>
    8.35 -#include <asm/vhpt.h>
    8.36 -#include "hpsim_ssc.h"
    8.37 -#include <xen/multicall.h>
    8.38 -#include <asm/debugger.h>
    8.39 -#include <asm/fpswa.h>
    8.40 -
    8.41 -extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
    8.42 -/* FIXME: where these declarations shold be there ? */
    8.43 -extern void panic_domain(struct pt_regs *, const char *, ...);
    8.44 -extern long platform_is_hp_ski(void);
    8.45 -extern int ia64_hyperprivop(unsigned long, REGS *);
    8.46 -extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
    8.47 -extern void vmx_do_launch(struct vcpu *);
    8.48 -extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
    8.49 -
    8.50 -#define IA64_PSR_CPL1	(__IA64_UL(1) << IA64_PSR_CPL1_BIT)
    8.51 -// note IA64_PSR_PK removed from following, why is this necessary?
    8.52 -#define	DELIVER_PSR_SET	(IA64_PSR_IC | IA64_PSR_I | \
    8.53 -			IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
    8.54 -			IA64_PSR_IT | IA64_PSR_BN)
    8.55 -
    8.56 -#define	DELIVER_PSR_CLR	(IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
    8.57 -			IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI |	\
    8.58 -			IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
    8.59 -			IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
    8.60 -			IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
    8.61 -			IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
    8.62 -
    8.63 -#include <xen/sched-if.h>
    8.64 -
    8.65 -void schedule_tail(struct vcpu *prev)
    8.66 -{
    8.67 -	extern char ia64_ivt;
    8.68 -	context_saved(prev);
    8.69 -
    8.70 -	if (VMX_DOMAIN(current)) {
    8.71 -		vmx_do_launch(current);
    8.72 -	} else {
    8.73 -		ia64_set_iva(&ia64_ivt);
    8.74 -        	ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
    8.75 -		        VHPT_ENABLED);
    8.76 -		load_region_regs(current);
    8.77 -		vcpu_load_kernel_regs(current);
    8.78 -	}
    8.79 -}
    8.80 -
    8.81 -void tdpfoo(void) { }
    8.82 -
    8.83 -// given a domain virtual address, pte and pagesize, extract the metaphysical
    8.84 -// address, convert the pte for a physical address for (possibly different)
    8.85 -// Xen PAGE_SIZE and return modified pte.  (NOTE: TLB insert should use
    8.86 -// PAGE_SIZE!)
    8.87 -u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps)
    8.88 -{
    8.89 -	struct domain *d = current->domain;
    8.90 -	ia64_itir_t itir = {.itir = itir__};
    8.91 -	u64 mask, mpaddr, pteval2;
    8.92 -	u64 arflags;
    8.93 -	u64 arflags2;
    8.94 -
    8.95 -	pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
    8.96 -
    8.97 -	// FIXME address had better be pre-validated on insert
    8.98 -	mask = ~itir_mask(itir.itir);
    8.99 -	mpaddr = (((pteval & ~_PAGE_ED) & _PAGE_PPN_MASK) & ~mask) |
   8.100 -	         (address & mask);
   8.101 -#ifdef CONFIG_XEN_IA64_DOM0_VP
   8.102 -	if (itir.ps > PAGE_SHIFT) {
   8.103 -		itir.ps = PAGE_SHIFT;
   8.104 -	}
   8.105 -#endif
   8.106 -	*logps = itir.ps;
   8.107 -#ifndef CONFIG_XEN_IA64_DOM0_VP
   8.108 -	if (d == dom0) {
   8.109 -		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   8.110 -			/*
   8.111 -			printk("translate_domain_pte: out-of-bounds dom0 mpaddr 0x%lx! itc=%lx...\n",
   8.112 -				mpaddr, ia64_get_itc());
   8.113 -			*/
   8.114 -			tdpfoo();
   8.115 -		}
   8.116 -	}
   8.117 -	else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
   8.118 -		/* Address beyond the limit.  However the grant table is
   8.119 -		   also beyond the limit.  Display a message if not in the
   8.120 -		   grant table.  */
   8.121 -		if (mpaddr >= IA64_GRANT_TABLE_PADDR
   8.122 -		    && mpaddr < (IA64_GRANT_TABLE_PADDR 
   8.123 -				 + (ORDER_GRANT_FRAMES << PAGE_SHIFT)))
   8.124 -			printf("translate_domain_pte: bad mpa=0x%lx (> 0x%lx),"
   8.125 -			       "vadr=0x%lx,pteval=0x%lx,itir=0x%lx\n",
   8.126 -			       mpaddr, (unsigned long)d->max_pages<<PAGE_SHIFT,
   8.127 -			       address, pteval, itir.itir);
   8.128 -		tdpfoo();
   8.129 -	}
   8.130 -#endif
   8.131 -	pteval2 = lookup_domain_mpa(d,mpaddr);
   8.132 -	arflags  = pteval  & _PAGE_AR_MASK;
   8.133 -	arflags2 = pteval2 & _PAGE_AR_MASK;
   8.134 -	if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {
   8.135 -#if 0
   8.136 -		DPRINTK("%s:%d "
   8.137 -		        "pteval 0x%lx arflag 0x%lx address 0x%lx itir 0x%lx "
   8.138 -		        "pteval2 0x%lx arflags2 0x%lx mpaddr 0x%lx\n",
   8.139 -		        __func__, __LINE__,
   8.140 -		        pteval, arflags, address, itir__,
   8.141 -		        pteval2, arflags2, mpaddr);
   8.142 -#endif
   8.143 -		pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;
   8.144 -}
   8.145 -
   8.146 -	pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
   8.147 -	pteval2 |= (pteval & _PAGE_ED);
   8.148 -	pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
   8.149 -	pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
   8.150 -	return pteval2;
   8.151 -}
   8.152 -
   8.153 -// given a current domain metaphysical address, return the physical address
   8.154 -unsigned long translate_domain_mpaddr(unsigned long mpaddr)
   8.155 -{
   8.156 -	unsigned long pteval;
   8.157 -
   8.158 -#ifndef CONFIG_XEN_IA64_DOM0_VP
   8.159 -	if (current->domain == dom0) {
   8.160 -		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   8.161 -			printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr 0x%lx! continuing...\n",
   8.162 -				mpaddr);
   8.163 -			tdpfoo();
   8.164 -		}
   8.165 -	}
   8.166 -#endif
   8.167 -	pteval = lookup_domain_mpa(current->domain,mpaddr);
   8.168 -	return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
   8.169 -}
   8.170 -
   8.171 -unsigned long slow_reflect_count[0x80] = { 0 };
   8.172 -unsigned long fast_reflect_count[0x80] = { 0 };
   8.173 -
   8.174 -#define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
   8.175 -
   8.176 -void zero_reflect_counts(void)
   8.177 -{
   8.178 -	int i;
   8.179 -	for (i=0; i<0x80; i++) slow_reflect_count[i] = 0;
   8.180 -	for (i=0; i<0x80; i++) fast_reflect_count[i] = 0;
   8.181 -}
   8.182 -
   8.183 -int dump_reflect_counts(char *buf)
   8.184 -{
   8.185 -	int i,j,cnt;
   8.186 -	char *s = buf;
   8.187 -
   8.188 -	s += sprintf(s,"Slow reflections by vector:\n");
   8.189 -	for (i = 0, j = 0; i < 0x80; i++) {
   8.190 -		if ( (cnt = slow_reflect_count[i]) != 0 ) {
   8.191 -			s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
   8.192 -			if ((j++ & 3) == 3) s += sprintf(s,"\n");
   8.193 -		}
   8.194 -	}
   8.195 -	if (j & 3) s += sprintf(s,"\n");
   8.196 -	s += sprintf(s,"Fast reflections by vector:\n");
   8.197 -	for (i = 0, j = 0; i < 0x80; i++) {
   8.198 -		if ( (cnt = fast_reflect_count[i]) != 0 ) {
   8.199 -			s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
   8.200 -			if ((j++ & 3) == 3) s += sprintf(s,"\n");
   8.201 -		}
   8.202 -	}
   8.203 -	if (j & 3) s += sprintf(s,"\n");
   8.204 -	return s - buf;
   8.205 -}
   8.206 -
   8.207 -// should never panic domain... if it does, stack may have been overrun
   8.208 -void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
   8.209 -{
   8.210 -	struct vcpu *v = current;
   8.211 -
   8.212 -	if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
   8.213 -		panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
   8.214 -	}
   8.215 -	vector &= ~0xf;
   8.216 -	if (vector != IA64_DATA_TLB_VECTOR &&
   8.217 -	    vector != IA64_ALT_DATA_TLB_VECTOR &&
   8.218 -	    vector != IA64_VHPT_TRANS_VECTOR) {
   8.219 -		panic_domain(regs,"psr.ic off, delivering fault=%lx,ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
   8.220 -		             vector,regs->cr_ipsr,regs->cr_iip,PSCB(v,ifa),isr,PSCB(v,iip));
   8.221 -	}
   8.222 -}
   8.223 -
   8.224 -void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
   8.225 -{
   8.226 -	struct vcpu *v = current;
   8.227 -
   8.228 -	if (!PSCB(v,interrupt_collection_enabled))
   8.229 -		check_bad_nested_interruption(isr,regs,vector);
   8.230 -	PSCB(v,unat) = regs->ar_unat;  // not sure if this is really needed?
   8.231 -	PSCB(v,precover_ifs) = regs->cr_ifs;
   8.232 -	vcpu_bsw0(v);
   8.233 -	PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
   8.234 -	PSCB(v,isr) = isr;
   8.235 -	PSCB(v,iip) = regs->cr_iip;
   8.236 -	PSCB(v,ifs) = 0;
   8.237 -	PSCB(v,incomplete_regframe) = 0;
   8.238 -
   8.239 -	regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
   8.240 -	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   8.241 -	regs->r31 = XSI_IPSR;
   8.242 -
   8.243 -	v->vcpu_info->evtchn_upcall_mask = 1;
   8.244 -	PSCB(v,interrupt_collection_enabled) = 0;
   8.245 -
   8.246 -	inc_slow_reflect_count(vector);
   8.247 -}
   8.248 -
   8.249 -void foodpi(void) {}
   8.250 -
   8.251 -static unsigned long pending_false_positive = 0;
   8.252 -
   8.253 -void reflect_extint(struct pt_regs *regs)
   8.254 -{
   8.255 -	unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
   8.256 -	struct vcpu *v = current;
   8.257 -	static int first_extint = 1;
   8.258 -
   8.259 -	if (first_extint) {
   8.260 -		printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
   8.261 -		first_extint = 0;
   8.262 -	}
   8.263 -	if (vcpu_timer_pending_early(v))
   8.264 -printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
   8.265 -	PSCB(current,itir) = 0;
   8.266 -	reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
   8.267 -}
   8.268 -
   8.269 -void reflect_event(struct pt_regs *regs)
   8.270 -{
   8.271 -	unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
   8.272 -	struct vcpu *v = current;
   8.273 -
   8.274 -	/* Sanity check */
   8.275 -	if (is_idle_vcpu(v) || !user_mode(regs)) {
   8.276 -		//printk("WARN: invocation to reflect_event in nested xen\n");
   8.277 -		return;
   8.278 -	}
   8.279 -
   8.280 -	if (!event_pending(v))
   8.281 -		return;
   8.282 -
   8.283 -	if (!PSCB(v,interrupt_collection_enabled))
   8.284 -		printf("psr.ic off, delivering event, ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
   8.285 -		       regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
   8.286 -	PSCB(v,unat) = regs->ar_unat;  // not sure if this is really needed?
   8.287 -	PSCB(v,precover_ifs) = regs->cr_ifs;
   8.288 -	vcpu_bsw0(v);
   8.289 -	PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
   8.290 -	PSCB(v,isr) = isr;
   8.291 -	PSCB(v,iip) = regs->cr_iip;
   8.292 -	PSCB(v,ifs) = 0;
   8.293 -	PSCB(v,incomplete_regframe) = 0;
   8.294 -
   8.295 -	regs->cr_iip = v->arch.event_callback_ip;
   8.296 -	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   8.297 -	regs->r31 = XSI_IPSR;
   8.298 -
   8.299 -	v->vcpu_info->evtchn_upcall_mask = 1;
   8.300 -	PSCB(v,interrupt_collection_enabled) = 0;
   8.301 -}
   8.302 -
   8.303 -// ONLY gets called from ia64_leave_kernel
   8.304 -// ONLY call with interrupts disabled?? (else might miss one?)
   8.305 -// NEVER successful if already reflecting a trap/fault because psr.i==0
   8.306 -void deliver_pending_interrupt(struct pt_regs *regs)
   8.307 -{
   8.308 -	struct domain *d = current->domain;
   8.309 -	struct vcpu *v = current;
   8.310 -	// FIXME: Will this work properly if doing an RFI???
   8.311 -	if (!is_idle_domain(d) && user_mode(regs)) {
   8.312 -		if (vcpu_deliverable_interrupts(v))
   8.313 -			reflect_extint(regs);
   8.314 -		else if (PSCB(v,pending_interruption))
   8.315 -			++pending_false_positive;
   8.316 -	}
   8.317 -}
   8.318 -unsigned long lazy_cover_count = 0;
   8.319 -
   8.320 -static int
   8.321 -handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
   8.322 -{
   8.323 -	if (!PSCB(v,interrupt_collection_enabled)) {
   8.324 -		PSCB(v,ifs) = regs->cr_ifs;
   8.325 -		PSCB(v,incomplete_regframe) = 1;
   8.326 -		regs->cr_ifs = 0;
   8.327 -		lazy_cover_count++;
   8.328 -		return(1); // retry same instruction with cr.ifs off
   8.329 -	}
   8.330 -	return(0);
   8.331 -}
   8.332 -
   8.333 -void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
   8.334 -{
   8.335 -	unsigned long iip = regs->cr_iip, iha;
   8.336 -	// FIXME should validate address here
   8.337 -	unsigned long pteval;
   8.338 -	unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
   8.339 -	IA64FAULT fault;
   8.340 -
   8.341 -	if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) return;
   8.342 -	if ((isr & IA64_ISR_SP)
   8.343 -	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
   8.344 -	{
   8.345 -		/*
   8.346 -		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
   8.347 -		 * bit in the psr to ensure forward progress.  (Target register will get a
   8.348 -		 * NaT for ld.s, lfetch will be canceled.)
   8.349 -		 */
   8.350 -		ia64_psr(regs)->ed = 1;
   8.351 -		return;
   8.352 -	}
   8.353 -
   8.354 - again:
   8.355 -	fault = vcpu_translate(current,address,is_data,&pteval,&itir,&iha);
   8.356 -	if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
   8.357 -		u64 logps;
   8.358 -		pteval = translate_domain_pte(pteval, address, itir, &logps);
   8.359 -		vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
   8.360 -		if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
   8.361 -			/* dtlb has been purged in-between.  This dtlb was
   8.362 -			   matching.  Undo the work.  */
   8.363 -			vcpu_flush_tlb_vhpt_range (address, 1);
   8.364 -			goto again;
   8.365 -		}
   8.366 -		return;
   8.367 -	}
   8.368 -
   8.369 -	if (!user_mode (regs)) {
   8.370 -		/* The fault occurs inside Xen.  */
   8.371 -		if (!ia64_done_with_exception(regs)) {
   8.372 -			// should never happen.  If it does, region 0 addr may
   8.373 -			// indicate a bad xen pointer
   8.374 -			printk("*** xen_handle_domain_access: exception table"
   8.375 -			       " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
   8.376 -				iip, address);
   8.377 -			panic_domain(regs,"*** xen_handle_domain_access: exception table"
   8.378 -			       " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
   8.379 -				iip, address);
   8.380 -		}
   8.381 -		return;
   8.382 -	}
   8.383 -	if (!PSCB(current,interrupt_collection_enabled)) {
   8.384 -		check_bad_nested_interruption(isr,regs,fault);
   8.385 -		//printf("Delivering NESTED DATA TLB fault\n");
   8.386 -		fault = IA64_DATA_NESTED_TLB_VECTOR;
   8.387 -		regs->cr_iip = ((unsigned long) PSCBX(current,iva) + fault) & ~0xffUL;
   8.388 -		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   8.389 -		// NOTE: nested trap must NOT pass PSCB address
   8.390 -		//regs->r31 = (unsigned long) &PSCB(current);
   8.391 -		inc_slow_reflect_count(fault);
   8.392 -		return;
   8.393 -	}
   8.394 -
   8.395 -	PSCB(current,itir) = itir;
   8.396 -	PSCB(current,iha) = iha;
   8.397 -	PSCB(current,ifa) = address;
   8.398 -	reflect_interruption(isr, regs, fault);
   8.399 -}
   8.400 -
   8.401 -fpswa_interface_t *fpswa_interface = 0;
   8.402 -
   8.403 -void trap_init (void)
   8.404 -{
   8.405 -	if (ia64_boot_param->fpswa)
   8.406 -		/* FPSWA fixup: make the interface pointer a virtual address: */
   8.407 -		fpswa_interface = __va(ia64_boot_param->fpswa);
   8.408 -	else
   8.409 -		printk("No FPSWA supported.\n");
   8.410 -}
   8.411 -
   8.412 -static fpswa_ret_t
   8.413 -fp_emulate (int fp_fault, void *bundle, unsigned long *ipsr,
   8.414 -	    unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
   8.415 -	    unsigned long *ifs, struct pt_regs *regs)
   8.416 -{
   8.417 -	fp_state_t fp_state;
   8.418 -	fpswa_ret_t ret;
   8.419 -
   8.420 -	if (!fpswa_interface)
   8.421 -		return ((fpswa_ret_t) {-1, 0, 0, 0});
   8.422 -
   8.423 -	memset(&fp_state, 0, sizeof(fp_state_t));
   8.424 -
   8.425 -	/*
   8.426 -	 * compute fp_state.  only FP registers f6 - f11 are used by the
   8.427 -	 * kernel, so set those bits in the mask and set the low volatile
   8.428 -	 * pointer to point to these registers.
   8.429 -	 */
   8.430 -	fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */
   8.431 -
   8.432 -	fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
   8.433 -	/*
   8.434 -	 * unsigned long (*EFI_FPSWA) (
   8.435 -	 *      unsigned long    trap_type,
   8.436 -	 *      void             *Bundle,
   8.437 -	 *      unsigned long    *pipsr,
   8.438 -	 *      unsigned long    *pfsr,
   8.439 -	 *      unsigned long    *pisr,
   8.440 -	 *      unsigned long    *ppreds,
   8.441 -	 *      unsigned long    *pifs,
   8.442 -	 *      void             *fp_state);
   8.443 -	 */
   8.444 -	ret = (*fpswa_interface->fpswa)(fp_fault, bundle,
   8.445 -					ipsr, fpsr, isr, pr, ifs, &fp_state);
   8.446 -
   8.447 -	return ret;
   8.448 -}
   8.449 -
   8.450 -/*
   8.451 - * Handle floating-point assist faults and traps for domain.
   8.452 - */
   8.453 -unsigned long
   8.454 -handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
   8.455 -{
   8.456 -	struct vcpu *v = current;
   8.457 -	IA64_BUNDLE bundle;
   8.458 -	IA64_BUNDLE __get_domain_bundle(UINT64);
   8.459 -	unsigned long fault_ip;
   8.460 -	fpswa_ret_t ret;
   8.461 -
   8.462 -	fault_ip = regs->cr_iip;
   8.463 -	/*
   8.464 -	 * When the FP trap occurs, the trapping instruction is completed.
   8.465 -	 * If ipsr.ri == 0, there is the trapping instruction in previous bundle.
   8.466 -	 */
   8.467 -	if (!fp_fault && (ia64_psr(regs)->ri == 0))
   8.468 -		fault_ip -= 16;
   8.469 -	bundle = __get_domain_bundle(fault_ip);
   8.470 -	if (!bundle.i64[0] && !bundle.i64[1]) {
   8.471 -		printk("%s: floating-point bundle at 0x%lx not mapped\n",
   8.472 -		       __FUNCTION__, fault_ip);
   8.473 -		return -1;
   8.474 -	}
   8.475 -
   8.476 -	ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
   8.477 -	                 &isr, &regs->pr, &regs->cr_ifs, regs);
   8.478 -
   8.479 -	if (ret.status) {
   8.480 -		PSCBX(v, fpswa_ret) = ret;
   8.481 -		printk("%s(%s): fp_emulate() returned %ld\n",
   8.482 -		       __FUNCTION__, fp_fault?"fault":"trap", ret.status);
   8.483 -	}
   8.484 -
   8.485 -	return ret.status;
   8.486 -}
   8.487 -
   8.488 -void
   8.489 -ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
   8.490 -	    unsigned long iim, unsigned long itir, unsigned long arg5,
   8.491 -	    unsigned long arg6, unsigned long arg7, unsigned long stack)
   8.492 -{
   8.493 -	struct pt_regs *regs = (struct pt_regs *) &stack;
   8.494 -	unsigned long code;
   8.495 -	static const char *reason[] = {
   8.496 -		"IA-64 Illegal Operation fault",
   8.497 -		"IA-64 Privileged Operation fault",
   8.498 -		"IA-64 Privileged Register fault",
   8.499 -		"IA-64 Reserved Register/Field fault",
   8.500 -		"Disabled Instruction Set Transition fault",
   8.501 -		"Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
   8.502 -		"Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
   8.503 -		"Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
   8.504 -	};
   8.505 -
   8.506 -	printf("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, ipsr=0x%016lx, isr=0x%016lx\n",
   8.507 -	       vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
   8.508 -
   8.509 -
   8.510 -	if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
   8.511 -		/*
   8.512 -		 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
   8.513 -		 * the lfetch.
   8.514 -		 */
   8.515 -		ia64_psr(regs)->ed = 1;
   8.516 -		printf("ia64_fault: handled lfetch.fault\n");
   8.517 -		return;
   8.518 -	}
   8.519 -
   8.520 -	switch (vector) {
   8.521 -	    case 0:
   8.522 -		printk("VHPT Translation.\n");
   8.523 -		break;
   8.524 -	  
   8.525 -	    case 4:
   8.526 -		printk("Alt DTLB.\n");
   8.527 -		break;
   8.528 -	  
   8.529 -	    case 6:
   8.530 -		printk("Instruction Key Miss.\n");
   8.531 -		break;
   8.532 -
   8.533 -	    case 7: 
   8.534 -		printk("Data Key Miss.\n");
   8.535 -		break;
   8.536 -
   8.537 -	    case 8: 
   8.538 -		printk("Dirty-bit.\n");
   8.539 -		break;
   8.540 -
   8.541 -	    case 20:
   8.542 -		printk("Page Not Found.\n");
   8.543 -		break;
   8.544 -
   8.545 -	    case 21:
   8.546 -		printk("Key Permission.\n");
   8.547 -		break;
   8.548 -
   8.549 -	    case 22:
   8.550 -		printk("Instruction Access Rights.\n");
   8.551 -		break;
   8.552 -
   8.553 -	    case 24: /* General Exception */
   8.554 -		code = (isr >> 4) & 0xf;
   8.555 -		printk("General Exception: %s%s.\n", reason[code],
   8.556 -		        (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
   8.557 -		                       " (data access)") : "");
   8.558 -		if (code == 8) {
   8.559 -# ifdef CONFIG_IA64_PRINT_HAZARDS
   8.560 -			printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
   8.561 -			       current->comm, current->pid,
   8.562 -			       regs->cr_iip + ia64_psr(regs)->ri,
   8.563 -			       regs->pr);
   8.564 -# endif
   8.565 -			printf("ia64_fault: returning on hazard\n");
   8.566 -			return;
   8.567 -		}
   8.568 -		break;
   8.569 -
   8.570 -	    case 25:
   8.571 -		printk("Disabled FP-Register.\n");
   8.572 -		break;
   8.573 -
   8.574 -	    case 26:
   8.575 -		printk("NaT consumption.\n");
   8.576 -		break;
   8.577 -
   8.578 -	    case 29:
   8.579 -		printk("Debug.\n");
   8.580 -		break;
   8.581 -
   8.582 -	    case 30:
   8.583 -		printk("Unaligned Reference.\n");
   8.584 -		break;
   8.585 -
   8.586 -	    case 31:
   8.587 -		printk("Unsupported data reference.\n");
   8.588 -		break;
   8.589 -
   8.590 -	    case 32:
   8.591 -		printk("Floating-Point Fault.\n");
   8.592 -		break;
   8.593 -
   8.594 -	    case 33:
   8.595 -		printk("Floating-Point Trap.\n");
   8.596 -		break;
   8.597 -
   8.598 -	    case 34:
   8.599 -		printk("Lower Privilege Transfer Trap.\n");
   8.600 -		break;
   8.601 -
   8.602 -	    case 35:
   8.603 -		printk("Taken Branch Trap.\n");
   8.604 -		break;
   8.605 -
   8.606 -	    case 36:
   8.607 -		printk("Single Step Trap.\n");
   8.608 -		break;
   8.609 -    
   8.610 -	    case 45:
   8.611 -		printk("IA-32 Exception.\n");
   8.612 -		break;
   8.613 -
   8.614 -	    case 46:
   8.615 -		printk("IA-32 Intercept.\n");
   8.616 -		break;
   8.617 -
   8.618 -	    case 47:
   8.619 -		printk("IA-32 Interrupt.\n");
   8.620 -		break;
   8.621 -
   8.622 -	    default:
   8.623 -		printk("Fault %lu\n", vector);
   8.624 -		break;
   8.625 -	}
   8.626 -
   8.627 -	show_registers(regs);
   8.628 -	panic("Fault in Xen.\n");
   8.629 -}
   8.630 -
   8.631 -unsigned long running_on_sim = 0;
   8.632 -
   8.633 -void
   8.634 -do_ssc(unsigned long ssc, struct pt_regs *regs)
   8.635 -{
   8.636 -	unsigned long arg0, arg1, arg2, arg3, retval;
   8.637 -	char buf[2];
   8.638 -/**/	static int last_fd, last_count;	// FIXME FIXME FIXME
   8.639 -/**/					// BROKEN FOR MULTIPLE DOMAINS & SMP
   8.640 -/**/	struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
   8.641 -
   8.642 -	arg0 = vcpu_get_gr(current,32);
   8.643 -	switch(ssc) {
   8.644 -	    case SSC_PUTCHAR:
   8.645 -		buf[0] = arg0;
   8.646 -		buf[1] = '\0';
   8.647 -		printf(buf);
   8.648 -		break;
   8.649 -	    case SSC_GETCHAR:
   8.650 -		retval = ia64_ssc(0,0,0,0,ssc);
   8.651 -		vcpu_set_gr(current,8,retval,0);
   8.652 -		break;
   8.653 -	    case SSC_WAIT_COMPLETION:
   8.654 -		if (arg0) {	// metaphysical address
   8.655 -
   8.656 -			arg0 = translate_domain_mpaddr(arg0);
   8.657 -/**/			stat = (struct ssc_disk_stat *)__va(arg0);
   8.658 -///**/			if (stat->fd == last_fd) stat->count = last_count;
   8.659 -/**/			stat->count = last_count;
   8.660 -//if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
   8.661 -///**/			retval = ia64_ssc(arg0,0,0,0,ssc);
   8.662 -/**/			retval = 0;
   8.663 -		}
   8.664 -		else retval = -1L;
   8.665 -		vcpu_set_gr(current,8,retval,0);
   8.666 -		break;
   8.667 -	    case SSC_OPEN:
   8.668 -		arg1 = vcpu_get_gr(current,33);	// access rights
   8.669 -if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware.  (ignoring...)\n"); arg0 = 0; }
   8.670 -		if (arg0) {	// metaphysical address
   8.671 -			arg0 = translate_domain_mpaddr(arg0);
   8.672 -			retval = ia64_ssc(arg0,arg1,0,0,ssc);
   8.673 -		}
   8.674 -		else retval = -1L;
   8.675 -		vcpu_set_gr(current,8,retval,0);
   8.676 -		break;
   8.677 -	    case SSC_WRITE:
   8.678 -	    case SSC_READ:
   8.679 -//if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
   8.680 -		arg1 = vcpu_get_gr(current,33);
   8.681 -		arg2 = vcpu_get_gr(current,34);
   8.682 -		arg3 = vcpu_get_gr(current,35);
   8.683 -		if (arg2) {	// metaphysical address of descriptor
   8.684 -			struct ssc_disk_req *req;
   8.685 -			unsigned long mpaddr;
   8.686 -			long len;
   8.687 -
   8.688 -			arg2 = translate_domain_mpaddr(arg2);
   8.689 -			req = (struct ssc_disk_req *) __va(arg2);
   8.690 -			req->len &= 0xffffffffL;	// avoid strange bug
   8.691 -			len = req->len;
   8.692 -/**/			last_fd = arg1;
   8.693 -/**/			last_count = len;
   8.694 -			mpaddr = req->addr;
   8.695 -//if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
   8.696 -			retval = 0;
   8.697 -			if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
   8.698 -				// do partial page first
   8.699 -				req->addr = translate_domain_mpaddr(mpaddr);
   8.700 -				req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
   8.701 -				len -= req->len; mpaddr += req->len;
   8.702 -				retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   8.703 -				arg3 += req->len; // file offset
   8.704 -/**/				last_stat.fd = last_fd;
   8.705 -/**/				(void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
   8.706 -//if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
   8.707 -			}
   8.708 -			if (retval >= 0) while (len > 0) {
   8.709 -				req->addr = translate_domain_mpaddr(mpaddr);
   8.710 -				req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
   8.711 -				len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
   8.712 -				retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   8.713 -				arg3 += req->len; // file offset
   8.714 -// TEMP REMOVED AGAIN				arg3 += req->len; // file offset
   8.715 -/**/				last_stat.fd = last_fd;
   8.716 -/**/				(void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
   8.717 -//if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
   8.718 -			}
   8.719 -			// set it back to the original value
   8.720 -			req->len = last_count;
   8.721 -		}
   8.722 -		else retval = -1L;
   8.723 -		vcpu_set_gr(current,8,retval,0);
   8.724 -//if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
   8.725 -		break;
   8.726 -	    case SSC_CONNECT_INTERRUPT:
   8.727 -		arg1 = vcpu_get_gr(current,33);
   8.728 -		arg2 = vcpu_get_gr(current,34);
   8.729 -		arg3 = vcpu_get_gr(current,35);
   8.730 -		if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware.  (ignoring...)\n"); break; }
   8.731 -		(void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   8.732 -		break;
   8.733 -	    case SSC_NETDEV_PROBE:
   8.734 -		vcpu_set_gr(current,8,-1L,0);
   8.735 -		break;
   8.736 -	    default:
   8.737 -		printf("ia64_handle_break: bad ssc code %lx, iip=0x%lx, b0=0x%lx... spinning\n",
   8.738 -			ssc, regs->cr_iip, regs->b0);
   8.739 -		while(1);
   8.740 -		break;
   8.741 -	}
   8.742 -	vcpu_increment_iip(current);
   8.743 -}
   8.744 -
   8.745 -/* Also read in hyperprivop.S  */
   8.746 -int first_break = 1;
   8.747 -
   8.748 -void
   8.749 -ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   8.750 -{
   8.751 -	struct domain *d = current->domain;
   8.752 -	struct vcpu *v = current;
   8.753 -	IA64FAULT vector;
   8.754 -
   8.755 -	if (first_break) {
   8.756 -		if (platform_is_hp_ski()) running_on_sim = 1;
   8.757 -		else running_on_sim = 0;
   8.758 -		first_break = 0;
   8.759 -	}
   8.760 -	if (iim == 0x80001 || iim == 0x80002) {	//FIXME: don't hardcode constant
   8.761 -		do_ssc(vcpu_get_gr(current,36), regs);
   8.762 -	} 
   8.763 -#ifdef CRASH_DEBUG
   8.764 -	else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) {
   8.765 -		if (iim == 0)
   8.766 -			show_registers(regs);
   8.767 -		debugger_trap_fatal(0 /* don't care */, regs);
   8.768 -	} 
   8.769 -#endif
   8.770 -	else if (iim == d->arch.breakimm) {
   8.771 -		/* by default, do not continue */
   8.772 -		v->arch.hypercall_continuation = 0;
   8.773 -
   8.774 -		if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
   8.775 -			if (!PSCBX(v, hypercall_continuation))
   8.776 -				vcpu_increment_iip(current);
   8.777 -		}
   8.778 -		else reflect_interruption(isr, regs, vector);
   8.779 -	}
   8.780 -	else if (!PSCB(v,interrupt_collection_enabled)) {
   8.781 -		if (ia64_hyperprivop(iim,regs))
   8.782 -			vcpu_increment_iip(current);
   8.783 -	}
   8.784 -	else {
   8.785 -		if (iim == 0) 
   8.786 -			die_if_kernel("bug check", regs, iim);
   8.787 -		PSCB(v,iim) = iim;
   8.788 -		reflect_interruption(isr,regs,IA64_BREAK_VECTOR);
   8.789 -	}
   8.790 -}
   8.791 -
   8.792 -void
   8.793 -ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
   8.794 -{
   8.795 -	IA64FAULT vector;
   8.796 -
   8.797 -	vector = priv_emulate(current,regs,isr);
   8.798 -	if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
   8.799 -		// Note: if a path results in a vector to reflect that requires
   8.800 -		// iha/itir (e.g. vcpu_force_data_miss), they must be set there
   8.801 -		reflect_interruption(isr,regs,vector);
   8.802 -	}
   8.803 -}
   8.804 -
   8.805 -/* Used in vhpt.h.  */
   8.806 -#define INTR_TYPE_MAX	10
   8.807 -UINT64 int_counts[INTR_TYPE_MAX];
   8.808 -
   8.809 -void
   8.810 -ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
   8.811 -{
   8.812 -	struct vcpu *v = current;
   8.813 -	unsigned long check_lazy_cover = 0;
   8.814 -	unsigned long psr = regs->cr_ipsr;
   8.815 -
   8.816 -	/* Following faults shouldn'g be seen from Xen itself */
   8.817 -	if (!(psr & IA64_PSR_CPL)) BUG();
   8.818 -
   8.819 -	switch(vector) {
   8.820 -	    case 8:
   8.821 -		vector = IA64_DIRTY_BIT_VECTOR; break;
   8.822 -	    case 9:
   8.823 -		vector = IA64_INST_ACCESS_BIT_VECTOR; break;
   8.824 -	    case 10:
   8.825 -		check_lazy_cover = 1;
   8.826 -		vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
   8.827 -	    case 20:
   8.828 -		check_lazy_cover = 1;
   8.829 -		vector = IA64_PAGE_NOT_PRESENT_VECTOR; break;
   8.830 -	    case 22:
   8.831 -		vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
   8.832 -	    case 23:
   8.833 -		check_lazy_cover = 1;
   8.834 -		vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
   8.835 -	    case 25:
   8.836 -		vector = IA64_DISABLED_FPREG_VECTOR;
   8.837 -		break;
   8.838 -	    case 26:
   8.839 -		if (((isr >> 4L) & 0xfL) == 1) {
   8.840 -			//regs->eml_unat = 0;  FIXME: DO WE NEED THIS??
   8.841 -			printf("ia64_handle_reflection: handling regNaT fault\n");
   8.842 -			vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   8.843 -		}
   8.844 -#if 1
   8.845 -		// pass null pointer dereferences through with no error
   8.846 -		// but retain debug output for non-zero ifa
   8.847 -		if (!ifa) {
   8.848 -			vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   8.849 -		}
   8.850 -#endif
   8.851 -		printf("*** NaT fault... attempting to handle as privop\n");
   8.852 -		printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
   8.853 -		       isr, ifa, regs->cr_iip, psr);
   8.854 -		//regs->eml_unat = 0;  FIXME: DO WE NEED THIS???
   8.855 -		// certain NaT faults are higher priority than privop faults
   8.856 -		vector = priv_emulate(v,regs,isr);
   8.857 -		if (vector == IA64_NO_FAULT) {
   8.858 -			printf("*** Handled privop masquerading as NaT fault\n");
   8.859 -			return;
   8.860 -		}
   8.861 -		vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   8.862 -	    case 27:
   8.863 -		//printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
   8.864 -		PSCB(current,iim) = iim;
   8.865 -		vector = IA64_SPECULATION_VECTOR; break;
   8.866 -	    case 30:
   8.867 -		// FIXME: Should we handle unaligned refs in Xen??
   8.868 -		vector = IA64_UNALIGNED_REF_VECTOR; break;
   8.869 -	    case 32:
   8.870 -		if (!(handle_fpu_swa(1, regs, isr))) {
   8.871 -		    vcpu_increment_iip(v);
   8.872 -		    return;
   8.873 -		}
   8.874 -		printf("ia64_handle_reflection: handling FP fault\n");
   8.875 -		vector = IA64_FP_FAULT_VECTOR; break;
   8.876 -	    case 33:
   8.877 -		if (!(handle_fpu_swa(0, regs, isr))) return;
   8.878 -		printf("ia64_handle_reflection: handling FP trap\n");
   8.879 -		vector = IA64_FP_TRAP_VECTOR; break;
   8.880 -	    case 34:
   8.881 -		printf("ia64_handle_reflection: handling lowerpriv trap\n");
   8.882 -		vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break;
   8.883 -	    case 35:
   8.884 -		printf("ia64_handle_reflection: handling taken branch trap\n");
   8.885 -		vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; break;
   8.886 -	    case 36:
   8.887 -		printf("ia64_handle_reflection: handling single step trap\n");
   8.888 -		vector = IA64_SINGLE_STEP_TRAP_VECTOR; break;
   8.889 -
   8.890 -	    default:
   8.891 -		printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
   8.892 -		while(vector);
   8.893 -		return;
   8.894 -	}
   8.895 -	if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return;
   8.896 -	PSCB(current,ifa) = ifa;
   8.897 -	PSCB(current,itir) = vcpu_get_itir_on_fault(v,ifa);
   8.898 -	reflect_interruption(isr,regs,vector);
   8.899 -}
   8.900 -
   8.901 -unsigned long hypercall_create_continuation(
   8.902 -	unsigned int op, const char *format, ...)
   8.903 -{
   8.904 -    struct mc_state *mcs = &mc_state[smp_processor_id()];
   8.905 -    struct vcpu *v = current;
   8.906 -    const char *p = format;
   8.907 -    unsigned long arg;
   8.908 -    unsigned int i;
   8.909 -    va_list args;
   8.910 -
   8.911 -    va_start(args, format);
   8.912 -    if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
   8.913 -	panic("PREEMPT happen in multicall\n");	// Not support yet
   8.914 -    } else {
   8.915 -	vcpu_set_gr(v, 2, op, 0);
   8.916 -	for ( i = 0; *p != '\0'; i++) {
   8.917 -            switch ( *p++ )
   8.918 -            {
   8.919 -            case 'i':
   8.920 -                arg = (unsigned long)va_arg(args, unsigned int);
   8.921 -                break;
   8.922 -            case 'l':
   8.923 -                arg = (unsigned long)va_arg(args, unsigned long);
   8.924 -                break;
   8.925 -            case 'h':
   8.926 -                arg = (unsigned long)va_arg(args, void *);
   8.927 -                break;
   8.928 -            default:
   8.929 -                arg = 0;
   8.930 -                BUG();
   8.931 -            }
   8.932 -	    switch (i) {
   8.933 -	    case 0: vcpu_set_gr(v, 14, arg, 0);
   8.934 -		    break;
   8.935 -	    case 1: vcpu_set_gr(v, 15, arg, 0);
   8.936 -		    break;
   8.937 -	    case 2: vcpu_set_gr(v, 16, arg, 0);
   8.938 -		    break;
   8.939 -	    case 3: vcpu_set_gr(v, 17, arg, 0);
   8.940 -		    break;
   8.941 -	    case 4: vcpu_set_gr(v, 18, arg, 0);
   8.942 -		    break;
   8.943 -	    default: panic("Too many args for hypercall continuation\n");
   8.944 -		    break;
   8.945 -	    }
   8.946 -	}
   8.947 -    }
   8.948 -    v->arch.hypercall_continuation = 1;
   8.949 -    va_end(args);
   8.950 -    return op;
   8.951 -}
   8.952 -
     9.1 --- a/xen/arch/ia64/xen/xenmisc.c	Mon Jun 05 14:23:57 2006 -0600
     9.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Mon Jun 05 14:28:39 2006 -0600
     9.3 @@ -19,7 +19,6 @@
     9.4  #include <public/sched.h>
     9.5  #include <asm/vhpt.h>
     9.6  #include <asm/debugger.h>
     9.7 -#include <asm/vmx.h>
     9.8  #include <asm/vmx_vcpu.h>
     9.9  #include <asm/vcpu.h>
    9.10  
    9.11 @@ -56,91 +55,8 @@ is_platform_hp_ski(void)
    9.12  	return 1;
    9.13  }
    9.14  
    9.15 -long
    9.16 -platform_is_hp_ski(void)
    9.17 -{
    9.18 -	extern long running_on_sim;
    9.19 -	return running_on_sim;
    9.20 -}
    9.21 -
    9.22 -
    9.23  struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); }
    9.24  
    9.25 -unsigned long
    9.26 -gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
    9.27 -{
    9.28 -	unsigned long pte;
    9.29 -
    9.30 -#ifndef CONFIG_XEN_IA64_DOM0_VP
    9.31 -	if (d == dom0)
    9.32 -		return(gpfn);
    9.33 -#endif
    9.34 -	pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
    9.35 -	if (!pte) {
    9.36 -		panic("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
    9.37 -	}
    9.38 -	return ((pte & _PFN_MASK) >> PAGE_SHIFT);
    9.39 -}
    9.40 -
    9.41 -#if 0
    9.42 -u32
    9.43 -mfn_to_gmfn(struct domain *d, unsigned long frame)
    9.44 -{
    9.45 -	// FIXME: is this right?
    9.46 -if ((frame << PAGE_SHIFT) & _PAGE_PPN_MASK) {
    9.47 -printk("mfn_to_gmfn: bad frame. spinning...\n");
    9.48 -while(1);
    9.49 -}
    9.50 -	return frame;
    9.51 -}
    9.52 -#endif
    9.53 -
    9.54 -///////////////////////////////
    9.55 -// from arch/x86/memory.c
    9.56 -///////////////////////////////
    9.57 -
    9.58 -
    9.59 -static void free_page_type(struct page_info *page, u32 type)
    9.60 -{
    9.61 -}
    9.62 -
    9.63 -static int alloc_page_type(struct page_info *page, u32 type)
    9.64 -{
    9.65 -	return 1;
    9.66 -}
    9.67 -
    9.68 -///////////////////////////////
    9.69 -//// misc memory stuff
    9.70 -///////////////////////////////
    9.71 -
    9.72 -unsigned long __get_free_pages(unsigned int mask, unsigned int order)
    9.73 -{
    9.74 -	void *p = alloc_xenheap_pages(order);
    9.75 -
    9.76 -	memset(p,0,PAGE_SIZE<<order);
    9.77 -	return (unsigned long)p;
    9.78 -}
    9.79 -
    9.80 -void __free_pages(struct page_info *page, unsigned int order)
    9.81 -{
    9.82 -	if (order) BUG();
    9.83 -	free_xenheap_page(page);
    9.84 -}
    9.85 -
    9.86 -void *pgtable_quicklist_alloc(void)
    9.87 -{
    9.88 -    void *p;
    9.89 -    p = alloc_xenheap_pages(0);
    9.90 -    if (p)
    9.91 -        clear_page(p);
    9.92 -    return p;
    9.93 -}
    9.94 -
    9.95 -void pgtable_quicklist_free(void *pgtable_entry)
    9.96 -{
    9.97 -	free_xenheap_page(pgtable_entry);
    9.98 -}
    9.99 -
   9.100  ///////////////////////////////
   9.101  // from arch/ia64/traps.c
   9.102  ///////////////////////////////
   9.103 @@ -246,74 +162,6 @@ void *search_module_extables(unsigned lo
   9.104  void *__module_text_address(unsigned long addr) { return NULL; }
   9.105  void *module_text_address(unsigned long addr) { return NULL; }
   9.106  
   9.107 -unsigned long context_switch_count = 0;
   9.108 -
   9.109 -extern struct vcpu *ia64_switch_to (struct vcpu *next_task);
   9.110 -
   9.111 -
   9.112 -void context_switch(struct vcpu *prev, struct vcpu *next)
   9.113 -{
   9.114 -    uint64_t spsr;
   9.115 -    uint64_t pta;
   9.116 -
   9.117 -    local_irq_save(spsr);
   9.118 -    context_switch_count++;
   9.119 -
   9.120 -    __ia64_save_fpu(prev->arch._thread.fph);
   9.121 -    __ia64_load_fpu(next->arch._thread.fph);
   9.122 -    if (VMX_DOMAIN(prev))
   9.123 -	    vmx_save_state(prev);
   9.124 -    if (VMX_DOMAIN(next))
   9.125 -	    vmx_load_state(next);
   9.126 -    /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
   9.127 -    prev = ia64_switch_to(next);
   9.128 -
   9.129 -    //cpu_set(smp_processor_id(), current->domain->domain_dirty_cpumask);
   9.130 -
   9.131 -    if (!VMX_DOMAIN(current)){
   9.132 -	    vcpu_set_next_timer(current);
   9.133 -    }
   9.134 -
   9.135 -
   9.136 -// leave this debug for now: it acts as a heartbeat when more than
   9.137 -// one domain is active
   9.138 -{
   9.139 -static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
   9.140 -static int i = 100;
   9.141 -int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
   9.142 -if (!cnt[id]--) { cnt[id] = 500000; printk("%x",id); }
   9.143 -if (!i--) { i = 1000000; printk("+"); }
   9.144 -}
   9.145 -
   9.146 -    if (VMX_DOMAIN(current)){
   9.147 -		vmx_load_all_rr(current);
   9.148 -    }else{
   9.149 -    	extern char ia64_ivt;
   9.150 -    	ia64_set_iva(&ia64_ivt);
   9.151 -    	if (!is_idle_domain(current->domain)) {
   9.152 -        	ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
   9.153 -			     VHPT_ENABLED);
   9.154 -	    	load_region_regs(current);
   9.155 -	    	vcpu_load_kernel_regs(current);
   9.156 -		if (vcpu_timer_expired(current))
   9.157 -			vcpu_pend_timer(current);
   9.158 -    	}else {
   9.159 -		/* When switching to idle domain, only need to disable vhpt
   9.160 -		 * walker. Then all accesses happen within idle context will
   9.161 -		 * be handled by TR mapping and identity mapping.
   9.162 -		 */
   9.163 -		pta = ia64_get_pta();
   9.164 -		ia64_set_pta(pta & ~VHPT_ENABLED);
   9.165 -        }
   9.166 -    }
   9.167 -    local_irq_restore(spsr);
   9.168 -    context_saved(prev);
   9.169 -}
   9.170 -
   9.171 -void continue_running(struct vcpu *same)
   9.172 -{
   9.173 -	/* nothing to do */
   9.174 -}
   9.175  
   9.176  void arch_dump_domain_info(struct domain *d)
   9.177  {
   9.178 @@ -340,202 +188,3 @@ void panic_domain(struct pt_regs *regs, 
   9.179  	}
   9.180  	domain_crash_synchronous ();
   9.181  }
   9.182 -
   9.183 -///////////////////////////////
   9.184 -// from arch/x86/mm.c
   9.185 -///////////////////////////////
   9.186 -
   9.187 -#ifdef VERBOSE
   9.188 -#define MEM_LOG(_f, _a...)                           \
   9.189 -  printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
   9.190 -         current->domain->domain_id , __LINE__ , ## _a )
   9.191 -#else
   9.192 -#define MEM_LOG(_f, _a...) ((void)0)
   9.193 -#endif
   9.194 -
   9.195 -void cleanup_writable_pagetable(struct domain *d)
   9.196 -{
   9.197 -  return;
   9.198 -}
   9.199 -
   9.200 -void put_page_type(struct page_info *page)
   9.201 -{
   9.202 -    u32 nx, x, y = page->u.inuse.type_info;
   9.203 -
   9.204 - again:
   9.205 -    do {
   9.206 -        x  = y;
   9.207 -        nx = x - 1;
   9.208 -
   9.209 -        ASSERT((x & PGT_count_mask) != 0);
   9.210 -
   9.211 -        /*
   9.212 -         * The page should always be validated while a reference is held. The
   9.213 -         * exception is during domain destruction, when we forcibly invalidate
   9.214 -         * page-table pages if we detect a referential loop.
   9.215 -         * See domain.c:relinquish_list().
   9.216 -         */
   9.217 -        ASSERT((x & PGT_validated) ||
   9.218 -               test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
   9.219 -
   9.220 -        if ( unlikely((nx & PGT_count_mask) == 0) )
   9.221 -        {
   9.222 -            /* Record TLB information for flush later. Races are harmless. */
   9.223 -            page->tlbflush_timestamp = tlbflush_current_time();
   9.224 -
   9.225 -            if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
   9.226 -                 likely(nx & PGT_validated) )
   9.227 -            {
   9.228 -                /*
   9.229 -                 * Page-table pages must be unvalidated when count is zero. The
   9.230 -                 * 'free' is safe because the refcnt is non-zero and validated
   9.231 -                 * bit is clear => other ops will spin or fail.
   9.232 -                 */
   9.233 -                if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
   9.234 -                                           x & ~PGT_validated)) != x) )
   9.235 -                    goto again;
   9.236 -                /* We cleared the 'valid bit' so we do the clean up. */
   9.237 -                free_page_type(page, x);
   9.238 -                /* Carry on, but with the 'valid bit' now clear. */
   9.239 -                x  &= ~PGT_validated;
   9.240 -                nx &= ~PGT_validated;
   9.241 -            }
   9.242 -        }
   9.243 -        else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) ==
   9.244 -                            (PGT_pinned | 1)) &&
   9.245 -                           ((nx & PGT_type_mask) != PGT_writable_page)) )
   9.246 -        {
   9.247 -            /* Page is now only pinned. Make the back pointer mutable again. */
   9.248 -            nx |= PGT_va_mutable;
   9.249 -        }
   9.250 -    }
   9.251 -    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
   9.252 -}
   9.253 -
   9.254 -
   9.255 -int get_page_type(struct page_info *page, u32 type)
   9.256 -{
   9.257 -    u32 nx, x, y = page->u.inuse.type_info;
   9.258 -
   9.259 - again:
   9.260 -    do {
   9.261 -        x  = y;
   9.262 -        nx = x + 1;
   9.263 -        if ( unlikely((nx & PGT_count_mask) == 0) )
   9.264 -        {
   9.265 -            MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
   9.266 -            return 0;
   9.267 -        }
   9.268 -        else if ( unlikely((x & PGT_count_mask) == 0) )
   9.269 -        {
   9.270 -            if ( (x & (PGT_type_mask|PGT_va_mask)) != type )
   9.271 -            {
   9.272 -                if ( (x & PGT_type_mask) != (type & PGT_type_mask) )
   9.273 -                {
   9.274 -                    /*
   9.275 -                     * On type change we check to flush stale TLB
   9.276 -                     * entries. This may be unnecessary (e.g., page
   9.277 -                     * was GDT/LDT) but those circumstances should be
   9.278 -                     * very rare.
   9.279 -                     */
   9.280 -                    cpumask_t mask =
   9.281 -                        page_get_owner(page)->domain_dirty_cpumask;
   9.282 -                    tlbflush_filter(mask, page->tlbflush_timestamp);
   9.283 -
   9.284 -                    if ( unlikely(!cpus_empty(mask)) )
   9.285 -                    {
   9.286 -                        perfc_incrc(need_flush_tlb_flush);
   9.287 -                        flush_tlb_mask(mask);
   9.288 -                    }
   9.289 -                }
   9.290 -
   9.291 -                /* We lose existing type, back pointer, and validity. */
   9.292 -                nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated);
   9.293 -                nx |= type;
   9.294 -
   9.295 -                /* No special validation needed for writable pages. */
   9.296 -                /* Page tables and GDT/LDT need to be scanned for validity. */
   9.297 -                if ( type == PGT_writable_page )
   9.298 -                    nx |= PGT_validated;
   9.299 -            }
   9.300 -        }
   9.301 -        else
   9.302 -        {
   9.303 -            if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) )
   9.304 -            {
   9.305 -                if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
   9.306 -                {
   9.307 -                    if ( current->domain == page_get_owner(page) )
   9.308 -                    {
   9.309 -                        /*
   9.310 -                         * This ensures functions like set_gdt() see up-to-date
   9.311 -                         * type info without needing to clean up writable p.t.
   9.312 -                         * state on the fast path.
   9.313 -                         */
   9.314 -                        LOCK_BIGLOCK(current->domain);
   9.315 -                        cleanup_writable_pagetable(current->domain);
   9.316 -                        y = page->u.inuse.type_info;
   9.317 -                        UNLOCK_BIGLOCK(current->domain);
   9.318 -                        /* Can we make progress now? */
   9.319 -                        if ( ((y & PGT_type_mask) == (type & PGT_type_mask)) ||
   9.320 -                             ((y & PGT_count_mask) == 0) )
   9.321 -                            goto again;
   9.322 -                    }
   9.323 -                    if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
   9.324 -                         ((type & PGT_type_mask) != PGT_l1_page_table) )
   9.325 -                        MEM_LOG("Bad type (saw %08x != exp %08x) "
   9.326 -                                "for mfn %016lx (pfn %016lx)",
   9.327 -                                x, type, page_to_mfn(page),
   9.328 -                                get_gpfn_from_mfn(page_to_mfn(page)));
   9.329 -                    return 0;
   9.330 -                }
   9.331 -                else if ( (x & PGT_va_mask) == PGT_va_mutable )
   9.332 -                {
   9.333 -                    /* The va backpointer is mutable, hence we update it. */
   9.334 -                    nx &= ~PGT_va_mask;
   9.335 -                    nx |= type; /* we know the actual type is correct */
   9.336 -                }
   9.337 -                else if ( ((type & PGT_va_mask) != PGT_va_mutable) &&
   9.338 -                          ((type & PGT_va_mask) != (x & PGT_va_mask)) )
   9.339 -                {
   9.340 -#ifdef CONFIG_X86_PAE
   9.341 -                    /* We use backptr as extra typing. Cannot be unknown. */
   9.342 -                    if ( (type & PGT_type_mask) == PGT_l2_page_table )
   9.343 -                        return 0;
   9.344 -#endif
   9.345 -                    /* This table is possibly mapped at multiple locations. */
   9.346 -                    nx &= ~PGT_va_mask;
   9.347 -                    nx |= PGT_va_unknown;
   9.348 -                }
   9.349 -            }
   9.350 -            if ( unlikely(!(x & PGT_validated)) )
   9.351 -            {
   9.352 -                /* Someone else is updating validation of this page. Wait... */
   9.353 -                while ( (y = page->u.inuse.type_info) == x )
   9.354 -                    cpu_relax();
   9.355 -                goto again;
   9.356 -            }
   9.357 -        }
   9.358 -    }
   9.359 -    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
   9.360 -
   9.361 -    if ( unlikely(!(nx & PGT_validated)) )
   9.362 -    {
   9.363 -        /* Try to validate page type; drop the new reference on failure. */
   9.364 -        if ( unlikely(!alloc_page_type(page, type)) )
   9.365 -        {
   9.366 -            MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %08x"
   9.367 -                    ": caf=%08x taf=%" PRtype_info,
   9.368 -                    page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
   9.369 -                    type, page->count_info, page->u.inuse.type_info);
   9.370 -            /* Noone else can get a reference. We hold the only ref. */
   9.371 -            page->u.inuse.type_info = 0;
   9.372 -            return 0;
   9.373 -        }
   9.374 -
   9.375 -        /* Noone else is updating simultaneously. */
   9.376 -        __set_bit(_PGT_validated, &page->u.inuse.type_info);
   9.377 -    }
   9.378 -
   9.379 -    return 1;
   9.380 -}
    10.1 --- a/xen/include/asm-ia64/domain.h	Mon Jun 05 14:23:57 2006 -0600
    10.2 +++ b/xen/include/asm-ia64/domain.h	Mon Jun 05 14:28:39 2006 -0600
    10.3 @@ -14,6 +14,9 @@
    10.4  
    10.5  extern void domain_relinquish_resources(struct domain *);
    10.6  
    10.7 +/* given a current domain metaphysical address, return the physical address */
    10.8 +extern unsigned long translate_domain_mpaddr(unsigned long mpaddr);
    10.9 +
   10.10  /* Flush cache of domain d.
   10.11     If sync_only is true, only synchronize I&D caches,
   10.12     if false, flush and invalidate caches.  */