ia64/xen-unstable

changeset 9100:eeac4fdf02ed

merge
author kaf24@firebug.cl.cam.ac.uk
date Thu Mar 02 11:00:49 2006 +0100 (2006-03-02)
parents 7edd64c8bb36 385ddb11971d
children d9f980fabc18
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/ia64/Kconfig	Thu Mar 02 10:59:34 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/Kconfig	Thu Mar 02 11:00:49 2006 +0100
     1.3 @@ -79,6 +79,11 @@ config XEN_BLKDEV_FRONTEND
     1.4  	bool
     1.5  	default y
     1.6  
     1.7 +config XEN_BLKDEV_BACKEND
     1.8 +	depends on XEN
     1.9 +	bool
    1.10 +	default y
    1.11 +
    1.12  config XEN_VT
    1.13  	bool "Override for turning on CONFIG_VT for domU"
    1.14  	default y
    1.15 @@ -495,6 +500,7 @@ source "arch/ia64/oprofile/Kconfig"
    1.16  
    1.17  config KPROBES
    1.18  	bool "Kprobes (EXPERIMENTAL)"
    1.19 +	depends on EXPERIMENTAL && MODULES
    1.20  	help
    1.21  	  Kprobes allows you to trap at almost any kernel address and
    1.22  	  execute a callback function.  register_kprobe() establishes
     2.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/head.S	Thu Mar 02 10:59:34 2006 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/head.S	Thu Mar 02 11:00:49 2006 +0100
     2.3 @@ -363,6 +363,12 @@ 1:	// now we are in virtual mode
     2.4  	;;
     2.5  (isBP)	st8 [r2]=r28		// save the address of the boot param area passed by the bootloader
     2.6  
     2.7 +#ifdef CONFIG_XEN
     2.8 +	//  Note: isBP is used by the subprogram.
     2.9 +	br.call.sptk.many rp=early_xen_setup
    2.10 +	;;
    2.11 +#endif
    2.12 +
    2.13  #ifdef CONFIG_SMP
    2.14  (isAP)	br.call.sptk.many rp=start_secondary
    2.15  .ret0:
    2.16 @@ -371,10 +377,6 @@ 1:	// now we are in virtual mode
    2.17  
    2.18  	// This is executed by the bootstrap processor (bsp) only:
    2.19  
    2.20 -#ifdef CONFIG_XEN
    2.21 -	br.call.sptk.many rp=early_xen_setup
    2.22 -	;;
    2.23 -#endif
    2.24  #ifdef CONFIG_IA64_FW_EMU
    2.25  	// initialize PAL & SAL emulator:
    2.26  	br.call.sptk.many rp=sys_fw_init
     3.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/sal.c	Thu Mar 02 10:59:34 2006 +0100
     3.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/sal.c	Thu Mar 02 11:00:49 2006 +0100
     3.3 @@ -336,8 +336,10 @@ ia64_sal_init (struct ia64_sal_systab *s
     3.4  		p += SAL_DESC_SIZE(*p);
     3.5  	}
     3.6  
     3.7 +#ifdef CONFIG_XEN
     3.8  	if (!running_on_xen)
     3.9 -		check_sal_cache_flush();
    3.10 +#endif
    3.11 +	check_sal_cache_flush();
    3.12  }
    3.13  
    3.14  int
     4.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c	Thu Mar 02 10:59:34 2006 +0100
     4.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c	Thu Mar 02 11:00:49 2006 +0100
     4.3 @@ -61,6 +61,9 @@
     4.4  #include <asm/system.h>
     4.5  #include <asm/unistd.h>
     4.6  #include <asm/system.h>
     4.7 +#ifdef CONFIG_XEN
     4.8 +#include <asm/hypervisor.h>
     4.9 +#endif
    4.10  
    4.11  #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
    4.12  # error "struct cpuinfo_ia64 too big!"
    4.13 @@ -243,6 +246,14 @@ reserve_memory (void)
    4.14  	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);
    4.15  	n++;
    4.16  
    4.17 +#ifdef CONFIG_XEN
    4.18 +	if (running_on_xen) {
    4.19 +		rsvd_region[n].start = (unsigned long)__va((HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
    4.20 +		rsvd_region[n].end   = rsvd_region[n].start + PAGE_SIZE;
    4.21 +		n++;
    4.22 + 	}
    4.23 +#endif
    4.24 +
    4.25  #ifdef CONFIG_BLK_DEV_INITRD
    4.26  	if (ia64_boot_param->initrd_start) {
    4.27  		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
    4.28 @@ -260,6 +271,7 @@ reserve_memory (void)
    4.29  	n++;
    4.30  
    4.31  	num_rsvd_regions = n;
    4.32 +	BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
    4.33  
    4.34  	sort_regions(rsvd_region, num_rsvd_regions);
    4.35  }
     5.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c	Thu Mar 02 10:59:34 2006 +0100
     5.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c	Thu Mar 02 11:00:49 2006 +0100
     5.3 @@ -106,8 +106,10 @@ int bind_virq_to_irqhandler(
     5.4      BUG_ON(HYPERVISOR_event_channel_op(&op) != 0 );
     5.5      evtchn = op.u.bind_virq.port;
     5.6  
     5.7 -    if (!unbound_irq(evtchn))
     5.8 -	return -EINVAL;
     5.9 +    if (!unbound_irq(evtchn)) {
    5.10 +        evtchn = -EINVAL;
    5.11 +        goto out;
    5.12 +    }
    5.13  
    5.14      evtchns[evtchn].handler = handler;
    5.15      evtchns[evtchn].dev_id = dev_id;
    5.16 @@ -115,6 +117,7 @@ int bind_virq_to_irqhandler(
    5.17      irq_info[evtchn] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
    5.18  
    5.19      unmask_evtchn(evtchn);
    5.20 +out:
    5.21      spin_unlock(&irq_mapping_update_lock);
    5.22      return evtchn;
    5.23  }
    5.24 @@ -125,8 +128,10 @@ int bind_evtchn_to_irqhandler(unsigned i
    5.25  {
    5.26      spin_lock(&irq_mapping_update_lock);
    5.27  
    5.28 -    if (!unbound_irq(evtchn))
    5.29 -	return -EINVAL;
    5.30 +    if (!unbound_irq(evtchn)) {
    5.31 +	evtchn = -EINVAL;
    5.32 +	goto out;
    5.33 +    }
    5.34  
    5.35      evtchns[evtchn].handler = handler;
    5.36      evtchns[evtchn].dev_id = dev_id;
    5.37 @@ -134,6 +139,7 @@ int bind_evtchn_to_irqhandler(unsigned i
    5.38      irq_info[evtchn] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
    5.39  
    5.40      unmask_evtchn(evtchn);
    5.41 +out:
    5.42      spin_unlock(&irq_mapping_update_lock);
    5.43      return evtchn;
    5.44  }
    5.45 @@ -158,7 +164,7 @@ void unbind_from_irqhandler(unsigned int
    5.46      spin_lock(&irq_mapping_update_lock);
    5.47  
    5.48      if (unbound_irq(irq))
    5.49 -        return;
    5.50 +        goto out;
    5.51  
    5.52      op.cmd = EVTCHNOP_close;
    5.53      op.u.close.port = evtchn;
    5.54 @@ -179,6 +185,7 @@ void unbind_from_irqhandler(unsigned int
    5.55      evtchns[evtchn].handler = NULL;
    5.56      evtchns[evtchn].opened = 0;
    5.57  
    5.58 +out:
    5.59      spin_unlock(&irq_mapping_update_lock);
    5.60  }
    5.61  
     6.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S	Thu Mar 02 10:59:34 2006 +0100
     6.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S	Thu Mar 02 11:00:49 2006 +0100
     6.3 @@ -14,20 +14,22 @@
     6.4  running_on_xen:
     6.5  	data4 0
     6.6  
     6.7 +#define isBP	p3	// are we the Bootstrap Processor?
     6.8 +
     6.9  	.text
    6.10  GLOBAL_ENTRY(early_xen_setup)
    6.11 -	mov r8=cr.dcr;;
    6.12 -	extr.u r8=r8,63,1
    6.13 -	movl r9=running_on_xen;;
    6.14 -	st4 [r9]=r8;;
    6.15 +	mov r8=cr.dcr
    6.16 +(isBP)	movl r9=running_on_xen;;
    6.17 +	extr.u r8=r8,63,1;;
    6.18  	cmp.ne p7,p0=r8,r0;;
    6.19 +(isBP)	st4 [r9]=r8
    6.20  (p7)	movl r10=xen_ivt;;
    6.21  (p7)	mov cr.iva=r10
    6.22  	br.ret.sptk.many rp;;
    6.23 -END(xen_init)
    6.24 +END(early_xen_setup)
    6.25  
    6.26  GLOBAL_ENTRY(is_running_on_xen)
    6.27  	movl r9=running_on_xen;;
    6.28 -	ld4 r8=[r9];;
    6.29 +	ld4 r8=[r9]
    6.30  	br.ret.sptk.many rp;;
    6.31  END(is_running_on_xen)
     7.1 --- a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h	Thu Mar 02 10:59:34 2006 +0100
     7.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h	Thu Mar 02 11:00:49 2006 +0100
     7.3 @@ -37,8 +37,6 @@
     7.4  #include <asm/page.h>
     7.5  #define virt_to_machine(v) __pa(v)
     7.6  #define machine_to_virt(m) __va(m)
     7.7 -//#define virt_to_mfn(v)	(__pa(v) >> 14)
     7.8 -//#define mfn_to_virt(m)	(__va(m << 14))
     7.9  #define virt_to_mfn(v)	((__pa(v)) >> PAGE_SHIFT)
    7.10  #define mfn_to_virt(m)	(__va((m) << PAGE_SHIFT))
    7.11  
    7.12 @@ -46,455 +44,210 @@
    7.13   * Assembler stubs for hyper-calls.
    7.14   */
    7.15  
    7.16 -#if 0
    7.17 -static inline int
    7.18 -HYPERVISOR_set_trap_table(
    7.19 -    trap_info_t *table)
    7.20 -{
    7.21 -#if 0
    7.22 -    int ret;
    7.23 -    unsigned long ignore;
    7.24 -
    7.25 -    __asm__ __volatile__ (
    7.26 -        TRAP_INSTR
    7.27 -        : "=a" (ret), "=b" (ignore)
    7.28 -	: "0" (__HYPERVISOR_set_trap_table), "1" (table)
    7.29 -	: "memory" );
    7.30 -
    7.31 -    return ret;
    7.32 -#endif
    7.33 -    return 1;
    7.34 -}
    7.35 -
    7.36 -static inline int
    7.37 -HYPERVISOR_mmu_update(
    7.38 -    mmu_update_t *req, int count, int *success_count, domid_t domid)
    7.39 -{
    7.40 -#if 0
    7.41 -    int ret;
    7.42 -    unsigned long ign1, ign2, ign3, ign4;
    7.43 +#define _hypercall0(type, name)					\
    7.44 +({								\
    7.45 +	long __res;						\
    7.46 +	__asm__ __volatile__ (";;\n"				\
    7.47 +			      "mov r2=%1\n"			\
    7.48 +			      "break 0x1000 ;;\n"		\
    7.49 +			      "mov %0=r8 ;;\n"			\
    7.50 +			      : "=r" (__res)			\
    7.51 +			      : "i" (__HYPERVISOR_##name)	\
    7.52 +			      : "r2","r8",			\
    7.53 +			        "memory" );			\
    7.54 +	(type)__res;						\
    7.55 +})
    7.56  
    7.57 -    __asm__ __volatile__ (
    7.58 -        TRAP_INSTR
    7.59 -        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
    7.60 -	: "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
    7.61 -        "3" (success_count), "4" (domid)
    7.62 -	: "memory" );
    7.63 -
    7.64 -    return ret;
    7.65 -#endif
    7.66 -    return 1;
    7.67 -}
    7.68 -
    7.69 -static inline int
    7.70 -HYPERVISOR_mmuext_op(
    7.71 -    struct mmuext_op *op, int count, int *success_count, domid_t domid)
    7.72 -{
    7.73 -#if 0
    7.74 -    int ret;
    7.75 -    unsigned long ign1, ign2, ign3, ign4;
    7.76 +#define _hypercall1(type, name, a1)				\
    7.77 +({								\
    7.78 +	long __res;						\
    7.79 +	__asm__ __volatile__ (";;\n"				\
    7.80 +			      "mov r14=%2\n"			\
    7.81 +			      "mov r2=%1\n"			\
    7.82 +			      "break 0x1000 ;;\n"		\
    7.83 +			      "mov %0=r8 ;;\n"			\
    7.84 +			      : "=r" (__res)			\
    7.85 +			      : "i" (__HYPERVISOR_##name),	\
    7.86 +				"r" ((unsigned long)(a1))	\
    7.87 +			      : "r14","r2","r8",		\
    7.88 +				"memory" );			\
    7.89 +	(type)__res;						\
    7.90 +})
    7.91  
    7.92 -    __asm__ __volatile__ (
    7.93 -        TRAP_INSTR
    7.94 -        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
    7.95 -	: "0" (__HYPERVISOR_mmuext_op), "1" (op), "2" (count),
    7.96 -        "3" (success_count), "4" (domid)
    7.97 -	: "memory" );
    7.98 -
    7.99 -    return ret;
   7.100 -#endif
   7.101 -    return 1;
   7.102 -}
   7.103 -
   7.104 -static inline int
   7.105 -HYPERVISOR_set_gdt(
   7.106 -    unsigned long *frame_list, int entries)
   7.107 -{
   7.108 -#if 0
   7.109 -    int ret;
   7.110 -    unsigned long ign1, ign2;
   7.111 +#define _hypercall2(type, name, a1, a2)				\
   7.112 +({								\
   7.113 +	long __res;						\
   7.114 +	__asm__ __volatile__ (";;\n"				\
   7.115 +			      "mov r14=%2\n"			\
   7.116 +			      "mov r15=%3\n"			\
   7.117 +			      "mov r2=%1\n"			\
   7.118 +			      "break 0x1000 ;;\n"		\
   7.119 +			      "mov %0=r8 ;;\n"			\
   7.120 +			      : "=r" (__res)			\
   7.121 +			      : "i" (__HYPERVISOR_##name),	\
   7.122 +				"r" ((unsigned long)(a1)),	\
   7.123 +				"r" ((unsigned long)(a2))	\
   7.124 +			      : "r14","r15","r2","r8",		\
   7.125 +				"memory" );			\
   7.126 +	(type)__res;						\
   7.127 +})
   7.128  
   7.129 -    __asm__ __volatile__ (
   7.130 -        TRAP_INSTR
   7.131 -        : "=a" (ret), "=b" (ign1), "=c" (ign2)
   7.132 -	: "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
   7.133 -	: "memory" );
   7.134 -
   7.135 -
   7.136 -    return ret;
   7.137 -#endif
   7.138 -    return 1;
   7.139 -}
   7.140 -
   7.141 -static inline int
   7.142 -HYPERVISOR_stack_switch(
   7.143 -    unsigned long ss, unsigned long esp)
   7.144 -{
   7.145 -#if 0
   7.146 -    int ret;
   7.147 -    unsigned long ign1, ign2;
   7.148 -
   7.149 -    __asm__ __volatile__ (
   7.150 -        TRAP_INSTR
   7.151 -        : "=a" (ret), "=b" (ign1), "=c" (ign2)
   7.152 -	: "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
   7.153 -	: "memory" );
   7.154 -
   7.155 -    return ret;
   7.156 -#endif
   7.157 -    return 1;
   7.158 -}
   7.159 +#define _hypercall3(type, name, a1, a2, a3)			\
   7.160 +({								\
   7.161 +	long __res;						\
   7.162 +	__asm__ __volatile__ (";;\n"                            \
   7.163 +			      "mov r14=%2\n"                    \
   7.164 +			      "mov r15=%3\n"                    \
   7.165 +			      "mov r16=%4\n"                    \
   7.166 +			      "mov r2=%1\n"                     \
   7.167 +			      "break 0x1000 ;;\n"               \
   7.168 +			      "mov %0=r8 ;;\n"                  \
   7.169 +			      : "=r" (__res)                    \
   7.170 +			      : "i" (__HYPERVISOR_##name),      \
   7.171 +				"r" ((unsigned long)(a1)),	\
   7.172 +				"r" ((unsigned long)(a2)),	\
   7.173 +				"r" ((unsigned long)(a3))	\
   7.174 +			      : "r14","r15","r16","r2","r8",	\
   7.175 +			        "memory" );                     \
   7.176 +	(type)__res;                                            \
   7.177 +})
   7.178  
   7.179 -static inline int
   7.180 -HYPERVISOR_set_callbacks(
   7.181 -    unsigned long event_selector, unsigned long event_address,
   7.182 -    unsigned long failsafe_selector, unsigned long failsafe_address)
   7.183 -{
   7.184 -#if 0
   7.185 -    int ret;
   7.186 -    unsigned long ign1, ign2, ign3, ign4;
   7.187 -
   7.188 -    __asm__ __volatile__ (
   7.189 -        TRAP_INSTR
   7.190 -        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
   7.191 -	: "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
   7.192 -	  "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
   7.193 -	: "memory" );
   7.194 +#define _hypercall4(type, name, a1, a2, a3, a4)			\
   7.195 +({								\
   7.196 +	long __res;						\
   7.197 +	__asm__ __volatile__ (";;\n"                            \
   7.198 +			      "mov r14=%2\n"                    \
   7.199 +			      "mov r15=%3\n"                    \
   7.200 +			      "mov r16=%4\n"                    \
   7.201 +			      "mov r17=%5\n"                    \
   7.202 +			      "mov r2=%1\n"                     \
   7.203 +			      "break 0x1000 ;;\n"               \
   7.204 +			      "mov %0=r8 ;;\n"                  \
   7.205 +			      : "=r" (__res)                    \
   7.206 +			      : "i" (__HYPERVISOR_##name),      \
   7.207 +				"r" ((unsigned long)(a1)),	\
   7.208 +				"r" ((unsigned long)(a2)),	\
   7.209 +				"r" ((unsigned long)(a3)),	\
   7.210 +				"r" ((unsigned long)(a4))       \
   7.211 +			      : "r14","r15","r16","r2","r8",	\
   7.212 +			        "r17","memory" );               \
   7.213 +	(type)__res;                                            \
   7.214 +})
   7.215  
   7.216 -    return ret;
   7.217 -#endif
   7.218 -    return 1;
   7.219 -}
   7.220 -
   7.221 -static inline int
   7.222 -HYPERVISOR_fpu_taskswitch(
   7.223 -    int set)
   7.224 -{
   7.225 -#if 0
   7.226 -    int ret;
   7.227 -    unsigned long ign;
   7.228 -
   7.229 -    __asm__ __volatile__ (
   7.230 -        TRAP_INSTR
   7.231 -        : "=a" (ret), "=b" (ign)
   7.232 -        : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
   7.233 -        : "memory" );
   7.234 -
   7.235 -    return ret;
   7.236 -#endif
   7.237 -    return 1;
   7.238 -}
   7.239 +#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
   7.240 +({								\
   7.241 +	long __res;						\
   7.242 +	__asm__ __volatile__ (";;\n"                            \
   7.243 +			      "mov r14=%2\n"                    \
   7.244 +			      "mov r15=%3\n"                    \
   7.245 +			      "mov r16=%4\n"                    \
   7.246 +			      "mov r17=%5\n"                    \
   7.247 +			      "mov r18=%6\n"                    \
   7.248 +			      "mov r2=%1\n"                     \
   7.249 +			      "break 0x1000 ;;\n"               \
   7.250 +			      "mov %0=r8 ;;\n"                  \
   7.251 +			      : "=r" (__res)                    \
   7.252 +			      : "i" (__HYPERVISOR_##name),      \
   7.253 +				"r" ((unsigned long)(a1)),	\
   7.254 +				"r" ((unsigned long)(a2)),	\
   7.255 +				"r" ((unsigned long)(a3)),	\
   7.256 +				"r" ((unsigned long)(a4)),	\
   7.257 +				"r" ((unsigned long)(a5))       \
   7.258 +			      : "r14","r15","r16","r2","r8",	\
   7.259 +			        "r17","r18","memory" );         \
   7.260 +	(type)__res;                                            \
   7.261 +})
   7.262  
   7.263  static inline int
   7.264  HYPERVISOR_sched_op(
   7.265      int cmd, unsigned long arg)
   7.266  {
   7.267 -    return 1;
   7.268 -}
   7.269 -
   7.270 -static inline int
   7.271 -HYPERVISOR_suspend(
   7.272 -    unsigned long srec)
   7.273 -{
   7.274 -    return 1;
   7.275 +	return _hypercall2(int, sched_op, cmd, arg);
   7.276  }
   7.277  
   7.278  static inline long
   7.279  HYPERVISOR_set_timer_op(
   7.280      u64 timeout)
   7.281  {
   7.282 -#if 0
   7.283 -    int ret;
   7.284      unsigned long timeout_hi = (unsigned long)(timeout>>32);
   7.285      unsigned long timeout_lo = (unsigned long)timeout;
   7.286 -    unsigned long ign1, ign2;
   7.287 -
   7.288 -    __asm__ __volatile__ (
   7.289 -        TRAP_INSTR
   7.290 -        : "=a" (ret), "=b" (ign1), "=c" (ign2)
   7.291 -	: "0" (__HYPERVISOR_set_timer_op), "b" (timeout_lo), "c" (timeout_hi)
   7.292 -	: "memory");
   7.293 -
   7.294 -    return ret;
   7.295 -#endif
   7.296 -    return 1;
   7.297 +    return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
   7.298  }
   7.299  
   7.300  static inline int
   7.301  HYPERVISOR_dom0_op(
   7.302      dom0_op_t *dom0_op)
   7.303  {
   7.304 -#if 0
   7.305 -    int ret;
   7.306 -    unsigned long ign1;
   7.307 -
   7.308      dom0_op->interface_version = DOM0_INTERFACE_VERSION;
   7.309 -    __asm__ __volatile__ (
   7.310 -        TRAP_INSTR
   7.311 -        : "=a" (ret), "=b" (ign1)
   7.312 -	: "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
   7.313 -	: "memory");
   7.314 -
   7.315 -    return ret;
   7.316 -#endif
   7.317 -    return 1;
   7.318 -}
   7.319 -
   7.320 -static inline int
   7.321 -HYPERVISOR_set_debugreg(
   7.322 -    int reg, unsigned long value)
   7.323 -{
   7.324 -#if 0
   7.325 -    int ret;
   7.326 -    unsigned long ign1, ign2;
   7.327 -    __asm__ __volatile__ (
   7.328 -        TRAP_INSTR
   7.329 -        : "=a" (ret), "=b" (ign1), "=c" (ign2)
   7.330 -	: "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
   7.331 -	: "memory" );
   7.332 -
   7.333 -    return ret;
   7.334 -#endif
   7.335 -    return 1;
   7.336 -}
   7.337 -
   7.338 -static inline unsigned long
   7.339 -HYPERVISOR_get_debugreg(
   7.340 -    int reg)
   7.341 -{
   7.342 -#if 0
   7.343 -    unsigned long ret;
   7.344 -    unsigned long ign;
   7.345 -    __asm__ __volatile__ (
   7.346 -        TRAP_INSTR
   7.347 -        : "=a" (ret), "=b" (ign)
   7.348 -	: "0" (__HYPERVISOR_get_debugreg), "1" (reg)
   7.349 -	: "memory" );
   7.350 -
   7.351 -    return ret;
   7.352 -#endif
   7.353 -    return 1;
   7.354 -}
   7.355 -
   7.356 -static inline int
   7.357 -HYPERVISOR_update_descriptor(
   7.358 -    unsigned long ma, unsigned long word1, unsigned long word2)
   7.359 -{
   7.360 -#if 0
   7.361 -    int ret;
   7.362 -    unsigned long ign1, ign2, ign3;
   7.363 -
   7.364 -    __asm__ __volatile__ (
   7.365 -        TRAP_INSTR
   7.366 -        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
   7.367 -	: "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
   7.368 -	  "3" (word2)
   7.369 -	: "memory" );
   7.370 -
   7.371 -    return ret;
   7.372 -#endif
   7.373 -    return 1;
   7.374 -}
   7.375 -
   7.376 -static inline int
   7.377 -HYPERVISOR_set_fast_trap(
   7.378 -    int idx)
   7.379 -{
   7.380 -#if 0
   7.381 -    int ret;
   7.382 -    unsigned long ign;
   7.383 -
   7.384 -    __asm__ __volatile__ (
   7.385 -        TRAP_INSTR
   7.386 -        : "=a" (ret), "=b" (ign)
   7.387 -	: "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
   7.388 -	: "memory" );
   7.389 -
   7.390 -    return ret;
   7.391 -#endif
   7.392 -    return 1;
   7.393 -}
   7.394 -
   7.395 -static inline int
   7.396 -HYPERVISOR_dom_mem_op(
   7.397 -    unsigned int op, unsigned long *extent_list,
   7.398 -    unsigned long nr_extents, unsigned int extent_order)
   7.399 -{
   7.400 -#if 0
   7.401 -    int ret;
   7.402 -    unsigned long ign1, ign2, ign3, ign4, ign5;
   7.403 -
   7.404 -    __asm__ __volatile__ (
   7.405 -        TRAP_INSTR
   7.406 -        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
   7.407 -	  "=D" (ign5)
   7.408 -	: "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
   7.409 -	  "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
   7.410 -        : "memory" );
   7.411 -
   7.412 -    return ret;
   7.413 -#endif
   7.414 -    return 1;
   7.415 +    return _hypercall1(int, dom0_op, dom0_op);
   7.416  }
   7.417  
   7.418  static inline int
   7.419  HYPERVISOR_multicall(
   7.420      void *call_list, int nr_calls)
   7.421  {
   7.422 -#if 0
   7.423 -    int ret;
   7.424 -    unsigned long ign1, ign2;
   7.425 -
   7.426 -    __asm__ __volatile__ (
   7.427 -        TRAP_INSTR
   7.428 -        : "=a" (ret), "=b" (ign1), "=c" (ign2)
   7.429 -	: "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
   7.430 -	: "memory" );
   7.431 -
   7.432 -    return ret;
   7.433 -#endif
   7.434 -    return 1;
   7.435 -}
   7.436 -#endif
   7.437 -
   7.438 -static inline int
   7.439 -HYPERVISOR_update_va_mapping(
   7.440 -    unsigned long va, pte_t new_val, unsigned long flags)
   7.441 -{
   7.442 -    /* no-op */
   7.443 -    return 1;
   7.444 +    return _hypercall2(int, multicall, call_list, nr_calls);
   7.445  }
   7.446  
   7.447  static inline int
   7.448  HYPERVISOR_memory_op(
   7.449      unsigned int cmd, void *arg)
   7.450  {
   7.451 -    int ret;
   7.452 -    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
   7.453 -        : "=r" (ret)
   7.454 -        : "i" (__HYPERVISOR_memory_op), "r"(cmd), "r"(arg)
   7.455 -        : "r14","r15","r2","r8","memory" );
   7.456 -    return ret;
   7.457 +    return _hypercall2(int, memory_op, cmd, arg);
   7.458  }
   7.459  
   7.460  static inline int
   7.461  HYPERVISOR_event_channel_op(
   7.462      void *op)
   7.463  {
   7.464 -    int ret;
   7.465 -    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
   7.466 -        : "=r" (ret)
   7.467 -        : "i" (__HYPERVISOR_event_channel_op), "r"(op)
   7.468 -        : "r14","r2","r8","memory" );
   7.469 -    return ret;
   7.470 +    return _hypercall1(int, event_channel_op, op);
   7.471  }
   7.472  
   7.473 -#if 0
   7.474  static inline int
   7.475  HYPERVISOR_xen_version(
   7.476 -    int cmd)
   7.477 +    int cmd, void *arg)
   7.478  {
   7.479 -#if 0
   7.480 -    int ret;
   7.481 -    unsigned long ignore;
   7.482 -
   7.483 -    __asm__ __volatile__ (
   7.484 -        TRAP_INSTR
   7.485 -        : "=a" (ret), "=b" (ignore)
   7.486 -	: "0" (__HYPERVISOR_xen_version), "1" (cmd)
   7.487 -	: "memory" );
   7.488 -
   7.489 -    return ret;
   7.490 -#endif
   7.491 -    return 1;
   7.492 +    return _hypercall2(int, xen_version, cmd, arg);
   7.493  }
   7.494 -#endif
   7.495  
   7.496  static inline int
   7.497  HYPERVISOR_console_io(
   7.498      int cmd, int count, char *str)
   7.499  {
   7.500 -    int ret;
   7.501 -    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r16=%4 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
   7.502 -        : "=r" (ret)
   7.503 -        : "i" (__HYPERVISOR_console_io), "r"(cmd), "r"(count), "r"(str)
   7.504 -        : "r14","r15","r16","r2","r8","memory" );
   7.505 -    return ret;
   7.506 +    return _hypercall3(int, console_io, cmd, count, str);
   7.507  }
   7.508  
   7.509 -#if 0
   7.510  static inline int
   7.511  HYPERVISOR_physdev_op(
   7.512      void *physdev_op)
   7.513  {
   7.514 -#if 0
   7.515 -    int ret;
   7.516 -    unsigned long ign;
   7.517 -
   7.518 -    __asm__ __volatile__ (
   7.519 -        TRAP_INSTR
   7.520 -        : "=a" (ret), "=b" (ign)
   7.521 -	: "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
   7.522 -	: "memory" );
   7.523 -
   7.524 -    return ret;
   7.525 -#endif
   7.526 -    return 1;
   7.527 +    return _hypercall1(int, physdev_op, physdev_op);
   7.528  }
   7.529 -#endif
   7.530  
   7.531  static inline int
   7.532  HYPERVISOR_grant_table_op(
   7.533      unsigned int cmd, void *uop, unsigned int count)
   7.534  {
   7.535 -    int ret;
   7.536 -    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r16=%4 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
   7.537 -        : "=r" (ret)
   7.538 -        : "i" (__HYPERVISOR_grant_table_op), "r"(cmd), "r"(uop), "r"(count)
   7.539 -        : "r14","r15","r16","r2","r8","memory" );
   7.540 -    return ret;
   7.541 +    return _hypercall3(int, grant_table_op, cmd, uop, count);
   7.542  }
   7.543  
   7.544 -#if 0
   7.545  static inline int
   7.546 -HYPERVISOR_update_va_mapping_otherdomain(
   7.547 -    unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
   7.548 +HYPERVISOR_vcpu_op(
   7.549 +	int cmd, int vcpuid, void *extra_args)
   7.550  {
   7.551 -#if 0
   7.552 -    int ret;
   7.553 -    unsigned long ign1, ign2, ign3, ign4;
   7.554 -
   7.555 -    __asm__ __volatile__ (
   7.556 -        TRAP_INSTR
   7.557 -        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
   7.558 -	: "0" (__HYPERVISOR_update_va_mapping_otherdomain),
   7.559 -          "1" (va), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
   7.560 -        "memory" );
   7.561 -    
   7.562 -    return ret;
   7.563 -#endif
   7.564 -    return 1;
   7.565 +    return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
   7.566  }
   7.567  
   7.568  static inline int
   7.569 -HYPERVISOR_vm_assist(
   7.570 -    unsigned int cmd, unsigned int type)
   7.571 +HYPERVISOR_suspend(
   7.572 +	unsigned long srec)
   7.573  {
   7.574 -#if 0
   7.575 -    int ret;
   7.576 -    unsigned long ign1, ign2;
   7.577 -
   7.578 -    __asm__ __volatile__ (
   7.579 -        TRAP_INSTR
   7.580 -        : "=a" (ret), "=b" (ign1), "=c" (ign2)
   7.581 -	: "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
   7.582 -	: "memory" );
   7.583 -
   7.584 -    return ret;
   7.585 -#endif
   7.586 -    return 1;
   7.587 +    return _hypercall3(int, sched_op, SCHEDOP_shutdown,
   7.588 +			SHUTDOWN_suspend, srec);
   7.589  }
   7.590  
   7.591 -#endif
   7.592 +extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
   7.593 +static inline void exit_idle(void) {}
   7.594 +#define do_IRQ(irq, regs) __do_IRQ((irq), (regs))
   7.595  
   7.596  #endif /* __HYPERCALL_H__ */
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/meminit.h	Thu Mar 02 11:00:49 2006 +0100
     8.3 @@ -0,0 +1,67 @@
     8.4 +#ifndef meminit_h
     8.5 +#define meminit_h
     8.6 +
     8.7 +/*
     8.8 + * This file is subject to the terms and conditions of the GNU General Public
     8.9 + * License.  See the file "COPYING" in the main directory of this archive
    8.10 + * for more details.
    8.11 + */
    8.12 +
    8.13 +#include <linux/config.h>
    8.14 +
    8.15 +/*
    8.16 + * Entries defined so far:
    8.17 + * 	- boot param structure itself
    8.18 + * 	- memory map
    8.19 + * 	- initrd (optional)
    8.20 + * 	- command line string
    8.21 + * 	- kernel code & data
    8.22 + * 	- Kernel memory map built from EFI memory map
    8.23 + *	- xen start info
    8.24 + *
    8.25 + * More could be added if necessary
    8.26 + */
    8.27 +#ifndef CONFIG_XEN
    8.28 +#define IA64_MAX_RSVD_REGIONS 6
    8.29 +#else
    8.30 +#define IA64_MAX_RSVD_REGIONS 7
    8.31 +#endif
    8.32 +
    8.33 +struct rsvd_region {
    8.34 +	unsigned long start;	/* virtual address of beginning of element */
    8.35 +	unsigned long end;	/* virtual address of end of element + 1 */
    8.36 +};
    8.37 +
    8.38 +extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
    8.39 +extern int num_rsvd_regions;
    8.40 +
    8.41 +extern void find_memory (void);
    8.42 +extern void reserve_memory (void);
    8.43 +extern void find_initrd (void);
    8.44 +extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
    8.45 +extern void efi_memmap_init(unsigned long *, unsigned long *);
    8.46 +
    8.47 +/*
    8.48 + * For rounding an address to the next IA64_GRANULE_SIZE or order
    8.49 + */
    8.50 +#define GRANULEROUNDDOWN(n)	((n) & ~(IA64_GRANULE_SIZE-1))
    8.51 +#define GRANULEROUNDUP(n)	(((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
    8.52 +#define ORDERROUNDDOWN(n)	((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
    8.53 +
    8.54 +#ifdef CONFIG_NUMA
    8.55 +  extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
    8.56 +#else
    8.57 +# define call_pernode_memory(start, len, func)	(*func)(start, len, 0)
    8.58 +#endif
    8.59 +
    8.60 +#define IGNORE_PFN0	1	/* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
    8.61 +
    8.62 +#ifdef CONFIG_VIRTUAL_MEM_MAP
    8.63 +# define LARGE_GAP	0x40000000 /* Use virtual mem map if hole is > than this */
    8.64 +  extern unsigned long vmalloc_end;
    8.65 +  extern struct page *vmem_map;
    8.66 +  extern int find_largest_hole (u64 start, u64 end, void *arg);
    8.67 +  extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
    8.68 +#endif
    8.69 +
    8.70 +#endif /* meminit_h */
     9.1 --- a/xen/arch/ia64/asm-xsi-offsets.c	Thu Mar 02 10:59:34 2006 +0100
     9.2 +++ b/xen/arch/ia64/asm-xsi-offsets.c	Thu Mar 02 11:00:49 2006 +0100
     9.3 @@ -84,8 +84,6 @@ void foo(void)
     9.4  	DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pending_interruption)));
     9.5  	DEFINE(XSI_INCOMPL_REGFR_OFS, offsetof(mapped_regs_t, incomplete_regframe));
     9.6  	DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, incomplete_regframe)));
     9.7 -	DEFINE(XSI_DELIV_MASK0_OFS, offsetof(mapped_regs_t, delivery_mask[0]));
     9.8 -	DEFINE(XSI_DELIV_MASK0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, delivery_mask[0])));
     9.9  	DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode));
    9.10  	DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, metaphysical_mode)));
    9.11  
    10.1 --- a/xen/arch/ia64/linux-xen/README.origin	Thu Mar 02 10:59:34 2006 +0100
    10.2 +++ b/xen/arch/ia64/linux-xen/README.origin	Thu Mar 02 11:00:49 2006 +0100
    10.3 @@ -5,19 +5,24 @@
    10.4  # (e.g. with #ifdef XEN or XEN in a comment) so that they can be
    10.5  # easily updated to future versions of the corresponding Linux files.
    10.6  
    10.7 -efi.c		-> linux/arch/ia64/kernel/efi.c
    10.8 -entry.h		-> linux/arch/ia64/kernel/entry.h
    10.9 -entry.S		-> linux/arch/ia64/kernel/entry.S
   10.10 -hpsim_ssc.h	-> linux/arch/ia64/hp/sim/hpsim_ssc.h
   10.11 -irq_ia64.c	-> linux/arch/ia64/kernel/irq_ia64.c
   10.12 -minstate.h	-> linux/arch/ia64/kernel/minstate.h
   10.13 -mm_contig.c	-> linux/arch/ia64/mm/contig.c
   10.14 -pal.S		-> linux/arch/ia64/kernel/pal.S
   10.15 -sal.c		-> linux/arch/ia64/kernel/sal.c
   10.16 -setup.c		-> linux/arch/ia64/kernel/setup.c
   10.17 -smp.c		-> linux/arch/ia64/kernel/smp.c
   10.18 -smpboot.c	-> linux/arch/ia64/kernel/smpboot.c
   10.19 -sort.c		-> linux/lib/sort.c
   10.20 -time.c		-> linux/arch/ia64/kernel/time.c
   10.21 -tlb.c		-> linux/arch/ia64/mm/tlb.c
   10.22 -unaligned.c	-> linux/arch/ia64/kernel/unaligned.c
   10.23 +efi.c			-> linux/arch/ia64/kernel/efi.c
   10.24 +entry.h			-> linux/arch/ia64/kernel/entry.h
   10.25 +entry.S			-> linux/arch/ia64/kernel/entry.S
   10.26 +head.S			-> linux/arch/ia64/kernel/head.S
   10.27 +hpsim_ssc.h		-> linux/arch/ia64/hp/sim/hpsim_ssc.h
   10.28 +irq_ia64.c		-> linux/arch/ia64/kernel/irq_ia64.c
   10.29 +minstate.h		-> linux/arch/ia64/kernel/minstate.h
   10.30 +mm_contig.c		-> linux/arch/ia64/mm/contig.c
   10.31 +pal.S			-> linux/arch/ia64/kernel/pal.S
   10.32 +process-linux-xen.c	-> linux/arch/ia64/kernel/process.c
   10.33 +sal.c			-> linux/arch/ia64/kernel/sal.c
   10.34 +setup.c			-> linux/arch/ia64/kernel/setup.c
   10.35 +smp.c			-> linux/arch/ia64/kernel/smp.c
   10.36 +smpboot.c		-> linux/arch/ia64/kernel/smpboot.c
   10.37 +sort.c			-> linux/lib/sort.c
   10.38 +time.c			-> linux/arch/ia64/kernel/time.c
   10.39 +tlb.c			-> linux/arch/ia64/mm/tlb.c
   10.40 +unaligned.c		-> linux/arch/ia64/kernel/unaligned.c
   10.41 +unwind.c		-> linux/arch/ia64/kernel/unwind.c
   10.42 +unwind_decoder.c	-> linux/arch/ia64/kernel/unwind_decoder.c
   10.43 +unwind_i.h		-> linux/arch/ia64/kernel/unwind_i.h
    11.1 --- a/xen/arch/ia64/linux-xen/efi.c	Thu Mar 02 10:59:34 2006 +0100
    11.2 +++ b/xen/arch/ia64/linux-xen/efi.c	Thu Mar 02 11:00:49 2006 +0100
    11.3 @@ -534,32 +534,9 @@ efi_map_pal_code (void)
    11.4  {
    11.5  #ifdef XEN
    11.6  	u64 psr;
    11.7 -	static unsigned long last_rr7 = 0;
    11.8 -	unsigned long current_rr7 = ia64_get_rr(7L<<61);
    11.9 -
   11.10 -	// this routine is called only once in Linux but may be called
   11.11 -	// multiple times in Xen.  However, we only need to flush and
   11.12 -	// reset itr[IA64_TR_PALCODE] if rr7 changes
   11.13  	if (!pal_vaddr) {
   11.14  		pal_vaddr = efi_get_pal_addr ();
   11.15 -		last_rr7 = current_rr7;
   11.16  	}
   11.17 -	else if (last_rr7 == current_rr7) return;
   11.18 -	else {
   11.19 -		last_rr7 = current_rr7;
   11.20 -		printk("efi_map_pal_code,remapping pal w/rr7=%lx\n",last_rr7);
   11.21 -	}
   11.22 -
   11.23 -	printf("efi_map_pal_code: about to ia64_ptr(%d,%p,%p)\n",
   11.24 -		0x1, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
   11.25 -		 IA64_GRANULE_SHIFT);
   11.26 -	ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
   11.27 -		 IA64_GRANULE_SHIFT);
   11.28 -	ia64_srlz_i();
   11.29 -	printf("efi_map_pal_code: about to ia64_itr(%p,%p,%p,%p,%p)\n",
   11.30 -		0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
   11.31 -		 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
   11.32 -		 IA64_GRANULE_SHIFT);
   11.33  #else
   11.34  	void *pal_vaddr = efi_get_pal_addr ();
   11.35  	u64 psr;
    12.1 --- a/xen/arch/ia64/linux-xen/irq_ia64.c	Thu Mar 02 10:59:34 2006 +0100
    12.2 +++ b/xen/arch/ia64/linux-xen/irq_ia64.c	Thu Mar 02 11:00:49 2006 +0100
    12.3 @@ -169,7 +169,7 @@ ia64_handle_irq (ia64_vector vector, str
    12.4  	 * handler needs to be able to wait for further keyboard interrupts, which can't
    12.5  	 * come through until ia64_eoi() has been done.
    12.6  	 */
    12.7 -	xen_irq_exit(regs);
    12.8 +	irq_exit();
    12.9  }
   12.10  
   12.11  #ifdef CONFIG_HOTPLUG_CPU
    13.1 --- a/xen/arch/ia64/linux-xen/minstate.h	Thu Mar 02 10:59:34 2006 +0100
    13.2 +++ b/xen/arch/ia64/linux-xen/minstate.h	Thu Mar 02 11:00:49 2006 +0100
    13.3 @@ -175,7 +175,7 @@
    13.4  	;;											\
    13.5  .mem.offset 0,0; st8.spill [r16]=r13,16;							\
    13.6  .mem.offset 8,0; st8.spill [r17]=r21,16;	/* save ar.fpsr */				\
    13.7 -	/* XEN mov r13=IA64_KR(CURRENT);	/* establish `current' */				\
    13.8 +	/* XEN mov r13=IA64_KR(CURRENT);*/	/* establish `current' */				\
    13.9  	MINSTATE_GET_CURRENT(r13);		/* XEN establish `current' */				\
   13.10  	;;											\
   13.11  .mem.offset 0,0; st8.spill [r16]=r15,16;							\
    14.1 --- a/xen/arch/ia64/linux-xen/setup.c	Thu Mar 02 10:59:34 2006 +0100
    14.2 +++ b/xen/arch/ia64/linux-xen/setup.c	Thu Mar 02 11:00:49 2006 +0100
    14.3 @@ -94,6 +94,10 @@ struct io_space io_space[MAX_IO_SPACES];
    14.4  EXPORT_SYMBOL(io_space);
    14.5  unsigned int num_io_spaces;
    14.6  
    14.7 +#ifdef XEN
    14.8 +extern void early_cmdline_parse(char **);
    14.9 +#endif
   14.10 +
   14.11  /*
   14.12   * "flush_icache_range()" needs to know what processor dependent stride size to use
   14.13   * when it makes i-cache(s) coherent with d-caches.
   14.14 @@ -500,6 +504,7 @@ late_setup_arch (char **cmdline_p)
   14.15  	paging_init();
   14.16  }
   14.17  
   14.18 +#ifndef XEN
   14.19  /*
   14.20   * Display cpu info for all cpu's.
   14.21   */
   14.22 @@ -611,14 +616,13 @@ c_stop (struct seq_file *m, void *v)
   14.23  {
   14.24  }
   14.25  
   14.26 -#ifndef XEN
   14.27  struct seq_operations cpuinfo_op = {
   14.28  	.start =	c_start,
   14.29  	.next =		c_next,
   14.30  	.stop =		c_stop,
   14.31  	.show =		show_cpuinfo
   14.32  };
   14.33 -#endif
   14.34 +#endif /* XEN */
   14.35  
   14.36  void
   14.37  identify_cpu (struct cpuinfo_ia64 *c)
    15.1 --- a/xen/arch/ia64/linux-xen/unaligned.c	Thu Mar 02 10:59:34 2006 +0100
    15.2 +++ b/xen/arch/ia64/linux-xen/unaligned.c	Thu Mar 02 11:00:49 2006 +0100
    15.3 @@ -216,6 +216,7 @@ static u16 gr_info[32]={
    15.4  	RPT(r28), RPT(r29), RPT(r30), RPT(r31)
    15.5  };
    15.6  
    15.7 +#ifndef XEN
    15.8  static u16 fr_info[32]={
    15.9  	0,			/* constant : WE SHOULD NEVER GET THIS */
   15.10  	0,			/* constant : WE SHOULD NEVER GET THIS */
   15.11 @@ -285,6 +286,7 @@ invala_fr (int regno)
   15.12  	}
   15.13  #	undef F
   15.14  }
   15.15 +#endif /* XEN */
   15.16  
   15.17  static inline unsigned long
   15.18  rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
   15.19 @@ -299,12 +301,11 @@ rotate_reg (unsigned long sor, unsigned 
   15.20  void
   15.21  set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
   15.22  {
   15.23 -	struct switch_stack *sw = (struct switch_stack *) regs - 1;
   15.24 -	unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
   15.25 +	unsigned long *bsp, *bspstore, *addr, *rnat_addr;
   15.26  	unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
   15.27 -	unsigned long rnats, nat_mask;
   15.28 +	unsigned long nat_mask;
   15.29      unsigned long old_rsc,new_rsc;
   15.30 -	unsigned long on_kbs,rnat;
   15.31 +	unsigned long rnat;
   15.32  	long sof = (regs->cr_ifs) & 0x7f;
   15.33  	long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
   15.34  	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
   15.35 @@ -323,7 +324,7 @@ set_rse_reg (struct pt_regs *regs, unsig
   15.36      new_rsc=old_rsc&(~0x3);
   15.37      ia64_set_rsc(new_rsc);
   15.38  
   15.39 -    bspstore = ia64_get_bspstore();
   15.40 +    bspstore = (unsigned long*)ia64_get_bspstore();
   15.41      bsp =kbs + (regs->loadrs >> 19);//16+3
   15.42  
   15.43  	addr = ia64_rse_skip_regs(bsp, -sof + ridx);
   15.44 @@ -335,7 +336,7 @@ set_rse_reg (struct pt_regs *regs, unsig
   15.45          ia64_flushrs ();
   15.46          ia64_mf ();
   15.47  		*addr = val;
   15.48 -        bspstore = ia64_get_bspstore();
   15.49 +        bspstore = (unsigned long*)ia64_get_bspstore();
   15.50      	rnat = ia64_get_rnat ();
   15.51          if(bspstore < rnat_addr){
   15.52              rnat=rnat&(~nat_mask);
   15.53 @@ -362,13 +363,11 @@ set_rse_reg (struct pt_regs *regs, unsig
   15.54  
   15.55  
   15.56  static void
   15.57 -get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, unsigned long *nat)
   15.58 +get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int*nat)
   15.59  {
   15.60 -    struct switch_stack *sw = (struct switch_stack *) regs - 1;
   15.61 -    unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
   15.62 +    unsigned long *bsp, *addr, *rnat_addr, *bspstore;
   15.63      unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
   15.64 -    unsigned long rnats, nat_mask;
   15.65 -    unsigned long on_kbs;
   15.66 +    unsigned long nat_mask;
   15.67      unsigned long old_rsc, new_rsc;
   15.68      long sof = (regs->cr_ifs) & 0x7f;
   15.69      long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
   15.70 @@ -388,7 +387,7 @@ get_rse_reg (struct pt_regs *regs, unsig
   15.71      new_rsc=old_rsc&(~(0x3));
   15.72      ia64_set_rsc(new_rsc);
   15.73  
   15.74 -    bspstore = ia64_get_bspstore();
   15.75 +    bspstore = (unsigned long*)ia64_get_bspstore();
   15.76      bsp =kbs + (regs->loadrs >> 19); //16+3;
   15.77  
   15.78      addr = ia64_rse_skip_regs(bsp, -sof + ridx);
   15.79 @@ -399,14 +398,14 @@ get_rse_reg (struct pt_regs *regs, unsig
   15.80  
   15.81          ia64_flushrs ();
   15.82          ia64_mf ();
   15.83 -        bspstore = ia64_get_bspstore();
   15.84 +        bspstore = (unsigned long*)ia64_get_bspstore();
   15.85      }
   15.86      *val=*addr;
   15.87      if(nat){
   15.88          if(bspstore < rnat_addr){
   15.89 -            *nat=!!(ia64_get_rnat()&nat_mask);
   15.90 +            *nat=(int)!!(ia64_get_rnat()&nat_mask);
   15.91          }else{
   15.92 -            *nat = !!((*rnat_addr)&nat_mask);
   15.93 +            *nat = (int)!!((*rnat_addr)&nat_mask);
   15.94          }
   15.95          ia64_set_rsc(old_rsc);
   15.96      }
   15.97 @@ -634,6 +633,7 @@ fph_index (struct pt_regs *regs, long re
   15.98  	return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
   15.99  }
  15.100  
  15.101 +#ifndef XEN
  15.102  static void
  15.103  setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
  15.104  {
  15.105 @@ -682,6 +682,7 @@ setfpreg (unsigned long regnum, struct i
  15.106  		regs->cr_ipsr |= IA64_PSR_MFL;
  15.107  	}
  15.108  }
  15.109 +#endif /* XEN */
  15.110  
  15.111  /*
  15.112   * Those 2 inline functions generate the spilled versions of the constant floating point
  15.113 @@ -699,6 +700,7 @@ float_spill_f1 (struct ia64_fpreg *final
  15.114  	ia64_stf_spill(final, 1);
  15.115  }
  15.116  
  15.117 +#ifndef XEN
  15.118  static void
  15.119  getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
  15.120  {
  15.121 @@ -748,6 +750,7 @@ getfpreg (unsigned long regnum, struct i
  15.122  		}
  15.123  	}
  15.124  }
  15.125 +#endif /* XEN */
  15.126  
  15.127  
  15.128  #ifdef XEN
  15.129 @@ -803,6 +806,7 @@ getreg (unsigned long regnum, unsigned l
  15.130  		*nat  = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
  15.131  }
  15.132  
  15.133 +#ifndef XEN
  15.134  static void
  15.135  emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsigned long ifa)
  15.136  {
  15.137 @@ -1078,6 +1082,7 @@ emulate_store_int (unsigned long ifa, lo
  15.138  
  15.139  	return 0;
  15.140  }
  15.141 +#endif /* XEN */
  15.142  
  15.143  /*
  15.144   * floating point operations sizes in bytes
  15.145 @@ -1153,6 +1158,7 @@ float2mem_double (struct ia64_fpreg *ini
  15.146  	ia64_stfd(final, 6);
  15.147  }
  15.148  
  15.149 +#ifndef XEN
  15.150  static int
  15.151  emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
  15.152  {
  15.153 @@ -1437,6 +1443,7 @@ within_logging_rate_limit (void)
  15.154  	return 0;
  15.155  
  15.156  }
  15.157 +#endif /* XEN */
  15.158  
  15.159  void
  15.160  ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
    16.1 --- a/xen/arch/ia64/linux-xen/unwind.c	Thu Mar 02 10:59:34 2006 +0100
    16.2 +++ b/xen/arch/ia64/linux-xen/unwind.c	Thu Mar 02 11:00:49 2006 +0100
    16.3 @@ -484,7 +484,8 @@ unw_access_fr (struct unw_frame_info *in
    16.4  	} else if (regnum <= 15) {
    16.5  		if (regnum <= 11) {
    16.6  			pt = get_scratch_regs(info);
    16.7 -			addr = &pt->f6  + (regnum - 6);
    16.8 +			//XXX struct ia64_fpreg and struct pt_fpreg are same.
    16.9 +			addr = (struct ia64_fpreg*)(&pt->f6  + (regnum - 6));
   16.10  		}
   16.11  		else
   16.12  			addr = &info->sw->f12 + (regnum - 12);
    17.1 --- a/xen/arch/ia64/linux/README.origin	Thu Mar 02 10:59:34 2006 +0100
    17.2 +++ b/xen/arch/ia64/linux/README.origin	Thu Mar 02 11:00:49 2006 +0100
    17.3 @@ -5,14 +5,15 @@ needs to be changed, move it to ../linux
    17.4  the instructions in the README there.
    17.5  
    17.6  cmdline.c		-> linux/lib/cmdline.c
    17.7 -efi_stub.S		-> linux/arch/ia64/efi_stub.S
    17.8 +efi_stub.S		-> linux/arch/ia64/kernel/efi_stub.S
    17.9  extable.c		-> linux/arch/ia64/mm/extable.c
   17.10  hpsim.S			-> linux/arch/ia64/hp/sim/hpsim.S
   17.11  ia64_ksyms.c		-> linux/arch/ia64/kernel/ia64_ksyms.c
   17.12 +irq_lsapic.c		-> linux/arch/ia64/kernel/irq_lsapic.c
   17.13  linuxextable.c		-> linux/kernel/extable.c
   17.14  machvec.c		-> linux/arch/ia64/kernel/machvec.c
   17.15  patch.c			-> linux/arch/ia64/kernel/patch.c
   17.16 -pcdp.h			-> drivers/firmware/pcdp.h
   17.17 +pcdp.h			-> linux/drivers/firmware/pcdp.h
   17.18  
   17.19  bitop.c			-> linux/arch/ia64/lib/bitop.c
   17.20  clear_page.S		-> linux/arch/ia64/lib/clear_page.S
    18.1 --- a/xen/arch/ia64/linux/cmdline.c	Thu Mar 02 10:59:34 2006 +0100
    18.2 +++ b/xen/arch/ia64/linux/cmdline.c	Thu Mar 02 11:00:49 2006 +0100
    18.3 @@ -15,6 +15,7 @@
    18.4  #include <linux/module.h>
    18.5  #include <linux/kernel.h>
    18.6  #include <linux/string.h>
    18.7 +#include <xen/lib.h>
    18.8  
    18.9  
   18.10  /**
    19.1 --- a/xen/arch/ia64/linux/linuxextable.c	Thu Mar 02 10:59:34 2006 +0100
    19.2 +++ b/xen/arch/ia64/linux/linuxextable.c	Thu Mar 02 11:00:49 2006 +0100
    19.3 @@ -20,6 +20,10 @@
    19.4  #include <asm/uaccess.h>
    19.5  #include <asm/sections.h>
    19.6  
    19.7 +extern void *search_module_extables(unsigned long addr);
    19.8 +extern void *__module_text_address(unsigned long addr);
    19.9 +extern void *module_text_address(unsigned long addr);
   19.10 +
   19.11  extern struct exception_table_entry __start___ex_table[];
   19.12  extern struct exception_table_entry __stop___ex_table[];
   19.13  
    20.1 --- a/xen/arch/ia64/vmx/mm.c	Thu Mar 02 10:59:34 2006 +0100
    20.2 +++ b/xen/arch/ia64/vmx/mm.c	Thu Mar 02 11:00:49 2006 +0100
    20.3 @@ -106,17 +106,18 @@ int vmx_do_mmu_update(mmu_update_t *ureq
    20.4      u64 mfn, gpfn;
    20.5      VCPU *vcpu;
    20.6      mmu_update_t req;
    20.7 -    ia64_rr rr;
    20.8 +    /* ia64_rr rr; */
    20.9      thash_cb_t *hcb;
   20.10 -    thash_data_t entry={0},*ovl;
   20.11 +    /* thash_data_t entry={0},*ovl; */
   20.12      vcpu = current;
   20.13 -    search_section_t sections;
   20.14 +    /* search_section_t sections; */
   20.15      hcb = vmx_vcpu_get_vtlb(vcpu);
   20.16      for ( i = 0; i < count; i++ )
   20.17      {
   20.18          copy_from_user(&req, ureqs, sizeof(req));
   20.19          cmd = req.ptr&3;
   20.20          req.ptr &= ~3;
   20.21 +/*
   20.22          if(cmd ==MMU_NORMAL_PT_UPDATE){
   20.23              entry.page_flags = req.val;
   20.24              entry.locked = 1;
   20.25 @@ -133,10 +134,12 @@ int vmx_do_mmu_update(mmu_update_t *ureq
   20.26              if (ovl) {
   20.27                    // generate MCA.
   20.28                  panic("Tlb conflict!!");
   20.29 -                return;
   20.30 +                return -1;
   20.31              }
   20.32 -            thash_purge_and_insert(hcb, &entry);
   20.33 -        }else if(cmd == MMU_MACHPHYS_UPDATE){
   20.34 +            thash_purge_and_insert(hcb, &entry, req.ptr);
   20.35 +        }else
   20.36 + */
   20.37 +        if(cmd == MMU_MACHPHYS_UPDATE){
   20.38              mfn = req.ptr >>PAGE_SHIFT;
   20.39              gpfn = req.val;
   20.40              set_machinetophys(mfn,gpfn);
    21.1 --- a/xen/arch/ia64/vmx/mmio.c	Thu Mar 02 10:59:34 2006 +0100
    21.2 +++ b/xen/arch/ia64/vmx/mmio.c	Thu Mar 02 11:00:49 2006 +0100
    21.3 @@ -32,6 +32,7 @@
    21.4  #include <public/hvm/ioreq.h>
    21.5  #include <asm/mm.h>
    21.6  #include <asm/vmx.h>
    21.7 +#include <public/event_channel.h>
    21.8  
    21.9  /*
   21.10  struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
   21.11 @@ -135,7 +136,6 @@ static void low_mmio_access(VCPU *vcpu, 
   21.12      struct vcpu *v = current;
   21.13      vcpu_iodata_t *vio;
   21.14      ioreq_t *p;
   21.15 -    unsigned long addr;
   21.16  
   21.17      vio = get_vio(v->domain, v->vcpu_id);
   21.18      if (vio == 0) {
   21.19 @@ -168,7 +168,6 @@ static void legacy_io_access(VCPU *vcpu,
   21.20      struct vcpu *v = current;
   21.21      vcpu_iodata_t *vio;
   21.22      ioreq_t *p;
   21.23 -    unsigned long addr;
   21.24  
   21.25      vio = get_vio(v->domain, v->vcpu_id);
   21.26      if (vio == 0) {
   21.27 @@ -406,7 +405,7 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
   21.28  {
   21.29      REGS *regs;
   21.30      IA64_BUNDLE bundle;
   21.31 -    int slot, dir, inst_type;
   21.32 +    int slot, dir=0, inst_type;
   21.33      size_t size;
   21.34      u64 data, value,post_update, slot1a, slot1b, temp;
   21.35      INST64 inst;
    22.1 --- a/xen/arch/ia64/vmx/pal_emul.c	Thu Mar 02 10:59:34 2006 +0100
    22.2 +++ b/xen/arch/ia64/vmx/pal_emul.c	Thu Mar 02 11:00:49 2006 +0100
    22.3 @@ -19,6 +19,7 @@
    22.4   */
    22.5  
    22.6  #include <asm/vmx_vcpu.h>
    22.7 +#include <asm/pal.h>
    22.8  
    22.9  static void
   22.10  get_pal_parameters (VCPU *vcpu, UINT64 *gr29,
   22.11 @@ -180,10 +181,18 @@ pal_fixed_addr(VCPU *vcpu){
   22.12  
   22.13  static struct ia64_pal_retval
   22.14  pal_freq_base(VCPU *vcpu){
   22.15 +    struct ia64_pal_retval result;
   22.16 +
   22.17 +    PAL_CALL(result,PAL_FREQ_BASE, 0, 0, 0);
   22.18 +    return result;
   22.19  }
   22.20  
   22.21  static struct ia64_pal_retval
   22.22  pal_freq_ratios(VCPU *vcpu){
   22.23 +    struct ia64_pal_retval result;
   22.24 +
   22.25 +    PAL_CALL(result,PAL_FREQ_RATIOS, 0, 0, 0);
   22.26 +    return result;
   22.27  }
   22.28  
   22.29  static struct ia64_pal_retval
   22.30 @@ -229,7 +238,6 @@ pal_vm_info(VCPU *vcpu){
   22.31  static struct ia64_pal_retval
   22.32  pal_vm_page_size(VCPU *vcpu){
   22.33  }
   22.34 -
   22.35  void
   22.36  pal_emul( VCPU *vcpu) {
   22.37  	UINT64 gr28;
   22.38 @@ -266,11 +274,19 @@ pal_emul( VCPU *vcpu) {
   22.39  		case PAL_CACHE_WRITE:
   22.40  			result = pal_cache_write (vcpu);
   22.41  			break;
   22.42 -			
   22.43 +
   22.44  		case PAL_PLATFORM_ADDR:
   22.45  			result = pal_platform_addr (vcpu);
   22.46  			break;
   22.47  
   22.48 +		case PAL_FREQ_RATIOS:
   22.49 +			result = pal_freq_ratios (vcpu);
   22.50 +			break;
   22.51 +
   22.52 +		case PAL_FREQ_BASE:
   22.53 +			result = pal_freq_base (vcpu);
   22.54 +			break;
   22.55 +
   22.56  		default:
   22.57  			panic("pal_emul(): guest call unsupported pal" );
   22.58    }
    23.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Thu Mar 02 10:59:34 2006 +0100
    23.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Thu Mar 02 11:00:49 2006 +0100
    23.3 @@ -47,6 +47,9 @@
    23.4  /*
    23.5   * Update the checked last_itc.
    23.6   */
    23.7 +
    23.8 +extern void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
    23.9 +     UINT64 vector,REGS *regs);
   23.10  static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
   23.11  {
   23.12      vtm->last_itc = cur_itc;
   23.13 @@ -483,7 +486,7 @@ int vmx_vcpu_pend_interrupt(VCPU *vcpu, 
   23.14  
   23.15      if (vector & ~0xff) {
   23.16          DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n");
   23.17 -        return;
   23.18 +        return -1;
   23.19      }
   23.20      local_irq_save(spsr);
   23.21      ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
   23.22 @@ -572,12 +575,13 @@ void guest_write_eoi(VCPU *vcpu)
   23.23      VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
   23.24      local_irq_restore(spsr);
   23.25      VCPU(vcpu, eoi)=0;    // overwrite the data
   23.26 -    vmx_check_pending_irq(vcpu);
   23.27 +    vcpu->arch.irq_new_pending=1;
   23.28 +//    vmx_check_pending_irq(vcpu);
   23.29  }
   23.30  
   23.31  uint64_t guest_read_vivr(VCPU *vcpu)
   23.32  {
   23.33 -    int vec, next, h_inservice;
   23.34 +    int vec, h_inservice;
   23.35      uint64_t  spsr;
   23.36  
   23.37      local_irq_save(spsr);
   23.38 @@ -609,7 +613,7 @@ static void generate_exirq(VCPU *vcpu)
   23.39      vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
   23.40  }
   23.41  
   23.42 -vhpi_detection(VCPU *vcpu)
   23.43 +void vhpi_detection(VCPU *vcpu)
   23.44  {
   23.45      uint64_t    threshold,vhpi;
   23.46      tpr_t       vtpr;
   23.47 @@ -626,7 +630,7 @@ vhpi_detection(VCPU *vcpu)
   23.48      }
   23.49  }
   23.50  
   23.51 -vmx_vexirq(VCPU *vcpu)
   23.52 +void vmx_vexirq(VCPU *vcpu)
   23.53  {
   23.54      static  uint64_t  vexirq_count=0;
   23.55  
    24.1 --- a/xen/arch/ia64/vmx/vmmu.c	Thu Mar 02 10:59:34 2006 +0100
    24.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Thu Mar 02 11:00:49 2006 +0100
    24.3 @@ -31,39 +31,26 @@
    24.4  #include <asm/hw_irq.h>
    24.5  #include <asm/vmx_pal_vsa.h>
    24.6  #include <asm/kregs.h>
    24.7 -
    24.8 -/*
    24.9 - * Architecture ppn is in 4KB unit while XEN
   24.10 - * page may be different(1<<PAGE_SHIFT).
   24.11 - */
   24.12 -static inline u64 arch_ppn_to_xen_ppn(u64 appn)
   24.13 -{
   24.14 -    return (appn << ARCH_PAGE_SHIFT) >> PAGE_SHIFT;
   24.15 -}
   24.16 -
   24.17 -static inline u64 xen_ppn_to_arch_ppn(u64 xppn)
   24.18 -{
   24.19 -    return (xppn << PAGE_SHIFT) >> ARCH_PAGE_SHIFT;
   24.20 -}
   24.21 -
   24.22 +#include <xen/irq.h>
   24.23  
   24.24  /*
   24.25   * Get the machine page frame number in 16KB unit
   24.26   * Input:
   24.27   *  d: 
   24.28   */
   24.29 -u64 get_mfn(domid_t domid, u64 gpfn, u64 pages)
   24.30 +u64 get_mfn(struct domain *d, u64 gpfn)
   24.31  {
   24.32 -    struct domain *d;
   24.33 -    u64    i, xen_gppn, xen_mppn, mpfn;
   24.34 -    
   24.35 +//    struct domain *d;
   24.36 +    u64    xen_gppn, xen_mppn, mpfn;
   24.37 +/*
   24.38      if ( domid == DOMID_SELF ) {
   24.39          d = current->domain;
   24.40      }
   24.41      else {
   24.42          d = find_domain_by_id(domid);
   24.43      }
   24.44 -    xen_gppn = arch_ppn_to_xen_ppn(gpfn);
   24.45 + */
   24.46 +    xen_gppn = arch_to_xen_ppn(gpfn);
   24.47      xen_mppn = gmfn_to_mfn(d, xen_gppn);
   24.48  /*
   24.49      for (i=0; i<pages; i++) {
   24.50 @@ -72,8 +59,8 @@ u64 get_mfn(domid_t domid, u64 gpfn, u64
   24.51          }
   24.52      }
   24.53  */
   24.54 -    mpfn= xen_ppn_to_arch_ppn(xen_mppn);
   24.55 -    mpfn = mpfn | (((1UL <<(PAGE_SHIFT-12))-1)&gpfn);
   24.56 +    mpfn= xen_to_arch_ppn(xen_mppn);
   24.57 +    mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn);
   24.58      return mpfn;
   24.59      
   24.60  }
   24.61 @@ -141,66 +128,67 @@ purge_machine_tc_by_domid(domid_t domid)
   24.62  #endif
   24.63  }
   24.64  
   24.65 -static thash_cb_t *init_domain_vhpt(struct vcpu *d)
   24.66 +static thash_cb_t *init_domain_vhpt(struct vcpu *d, void *vbase, void *vcur)
   24.67  {
   24.68 -    struct page_info *page;
   24.69 -    void   *vbase,*vcur;
   24.70 -    vhpt_special *vs;
   24.71 +//    struct page_info *page;
   24.72      thash_cb_t  *vhpt;
   24.73      PTA pta_value;
   24.74 -    
   24.75 -    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
   24.76 +/*
   24.77 +    page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
   24.78      if ( page == NULL ) {
   24.79          panic("No enough contiguous memory for init_domain_mm\n");
   24.80      }
   24.81      vbase = page_to_virt(page);
   24.82      printk("Allocate domain vhpt at 0x%lx\n", (u64)vbase);
   24.83 -    memset(vbase, 0, VCPU_TLB_SIZE);
   24.84 -    vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
   24.85 +    memset(vbase, 0, VCPU_VHPT_SIZE);
   24.86 + */
   24.87 +//    vcur = (void*)((u64)vbase + VCPU_VHPT_SIZE);
   24.88      vcur -= sizeof (thash_cb_t);
   24.89      vhpt = vcur;
   24.90      vhpt->ht = THASH_VHPT;
   24.91      vhpt->vcpu = d;
   24.92 -    vhpt->hash_func = machine_thash;
   24.93 -    vcur -= sizeof (vhpt_special);
   24.94 -    vs = vcur;
   24.95 +//    vhpt->hash_func = machine_thash;
   24.96 +//    vcur -= sizeof (vhpt_special);
   24.97 +//    vs = vcur;
   24.98  
   24.99      /* Setup guest pta */
  24.100      pta_value.val = 0;
  24.101      pta_value.ve = 1;
  24.102      pta_value.vf = 1;
  24.103 -    pta_value.size = VCPU_TLB_SHIFT - 1;    /* 2M */
  24.104 +    pta_value.size = VCPU_VHPT_SHIFT - 1;    /* 16M*/
  24.105      pta_value.base = ((u64)vbase) >> PTA_BASE_SHIFT;
  24.106      d->arch.arch_vmx.mpta = pta_value.val;
  24.107 -   
  24.108 -    vhpt->vs = vs;
  24.109 -    vhpt->vs->get_mfn = get_mfn;
  24.110 -    vhpt->vs->tag_func = machine_ttag;
  24.111 +
  24.112 +//    vhpt->vs = vs;
  24.113 +//    vhpt->vs->get_mfn = __gpfn_to_mfn_foreign;
  24.114 +//    vhpt->vs->tag_func = machine_ttag;
  24.115      vhpt->hash = vbase;
  24.116 -    vhpt->hash_sz = VCPU_TLB_SIZE/2;
  24.117 -    vhpt->cch_buf = (u64)vbase + vhpt->hash_sz;
  24.118 +    vhpt->hash_sz = VCPU_VHPT_SIZE/2;
  24.119 +    vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz);
  24.120      vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
  24.121 -    vhpt->recycle_notifier = recycle_message;
  24.122 -    thash_init(vhpt,VCPU_TLB_SHIFT-1);
  24.123 +//    vhpt->recycle_notifier = recycle_message;
  24.124 +    thash_init(vhpt,VCPU_VHPT_SHIFT-1);
  24.125      return vhpt;
  24.126  }
  24.127  
  24.128  
  24.129 +
  24.130  thash_cb_t *init_domain_tlb(struct vcpu *d)
  24.131  {
  24.132      struct page_info *page;
  24.133 -    void    *vbase,*vcur;
  24.134 +    void    *vbase, *vhptbase, *vcur;
  24.135      tlb_special_t  *ts;
  24.136      thash_cb_t  *tlb;
  24.137      
  24.138 -    page = alloc_domheap_pages (NULL, VCPU_TLB_ORDER, 0);
  24.139 +    page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
  24.140      if ( page == NULL ) {
  24.141          panic("No enough contiguous memory for init_domain_mm\n");
  24.142      }
  24.143 -    vbase = page_to_virt(page);
  24.144 -    printk("Allocate domain tlb at 0x%lx\n", (u64)vbase);
  24.145 -    memset(vbase, 0, VCPU_TLB_SIZE);
  24.146 -    vcur = (void*)((u64)vbase + VCPU_TLB_SIZE);
  24.147 +    vhptbase = page_to_virt(page);
  24.148 +    memset(vhptbase, 0, VCPU_VHPT_SIZE);
  24.149 +    printk("Allocate domain tlb&vhpt at 0x%lx\n", (u64)vhptbase);
  24.150 +    vbase =vhptbase + VCPU_VHPT_SIZE - VCPU_VTLB_SIZE;
  24.151 +    vcur = (void*)((u64)vbase + VCPU_VTLB_SIZE);
  24.152      vcur -= sizeof (thash_cb_t);
  24.153      tlb = vcur;
  24.154      tlb->ht = THASH_TLB;
  24.155 @@ -208,14 +196,14 @@ thash_cb_t *init_domain_tlb(struct vcpu 
  24.156      vcur -= sizeof (tlb_special_t);
  24.157      ts = vcur;
  24.158      tlb->ts = ts;
  24.159 -    tlb->ts->vhpt = init_domain_vhpt(d);
  24.160 -    tlb->hash_func = machine_thash;
  24.161 +    tlb->ts->vhpt = init_domain_vhpt(d,vhptbase,vbase);
  24.162 +//    tlb->hash_func = machine_thash;
  24.163      tlb->hash = vbase;
  24.164 -    tlb->hash_sz = VCPU_TLB_SIZE/2;
  24.165 -    tlb->cch_buf = (u64)vbase + tlb->hash_sz;
  24.166 +    tlb->hash_sz = VCPU_VTLB_SIZE/2;
  24.167 +    tlb->cch_buf = (void *)(vbase + tlb->hash_sz);
  24.168      tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
  24.169 -    tlb->recycle_notifier = recycle_message;
  24.170 -    thash_init(tlb,VCPU_TLB_SHIFT-1);
  24.171 +//    tlb->recycle_notifier = recycle_message;
  24.172 +    thash_init(tlb,VCPU_VTLB_SHIFT-1);
  24.173      return tlb;
  24.174  }
  24.175  
  24.176 @@ -249,13 +237,14 @@ void machine_tlb_insert(struct vcpu *d, 
  24.177      u64     psr;
  24.178      thash_data_t    mtlb;
  24.179      unsigned int    cl = tlb->cl;
  24.180 -
  24.181 +    unsigned long mtlb_ppn;
  24.182      mtlb.ifa = tlb->vadr;
  24.183      mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
  24.184      //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
  24.185      mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
  24.186 -    mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, 1);
  24.187 -    if (mtlb.ppn == INVALID_MFN)
  24.188 +    mtlb.ppn = get_mfn(d->domain,tlb->ppn);
  24.189 +    mtlb_ppn=mtlb.ppn;
  24.190 +    if (mtlb_ppn == INVALID_MFN)
  24.191      panic("Machine tlb insert with invalid mfn number.\n");
  24.192  
  24.193      psr = ia64_clear_ic();
  24.194 @@ -287,44 +276,33 @@ void machine_tlb_purge(u64 va, u64 ps)
  24.195  //    ia64_srlz_i();
  24.196  //    return;
  24.197  }
  24.198 -
  24.199 -u64 machine_thash(PTA pta, u64 va)
  24.200 +/*
  24.201 +u64 machine_thash(u64 va)
  24.202  {
  24.203 -    u64     saved_pta;
  24.204 -    u64     hash_addr, tag;
  24.205 -    unsigned long psr;
  24.206 -    struct vcpu *v = current;
  24.207 -    ia64_rr vrr;
  24.208 -
  24.209 -    saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
  24.210 -    psr = ia64_clear_ic();
  24.211 -    ia64_setreg(_IA64_REG_CR_PTA, pta.val);
  24.212 -    hash_addr = ia64_thash(va);
  24.213 -    ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
  24.214 -    ia64_set_psr(psr);
  24.215 -    ia64_srlz_i();
  24.216 -    return hash_addr;
  24.217 +    return ia64_thash(va);
  24.218  }
  24.219  
  24.220 -u64 machine_ttag(PTA pta, u64 va)
  24.221 +u64 machine_ttag(u64 va)
  24.222 +{
  24.223 +    return ia64_ttag(va);
  24.224 +}
  24.225 +*/
  24.226 +thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag)
  24.227  {
  24.228 -//    u64     saved_pta;
  24.229 -//    u64     hash_addr, tag;
  24.230 -//    u64     psr;
  24.231 -//    struct vcpu *v = current;
  24.232 -
  24.233 -//    saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
  24.234 -//    psr = ia64_clear_ic();
  24.235 -//    ia64_setreg(_IA64_REG_CR_PTA, pta.val);
  24.236 -//    tag = ia64_ttag(va);
  24.237 -    return ia64_ttag(va);
  24.238 -//    ia64_setreg(_IA64_REG_CR_PTA, saved_pta);
  24.239 -//    ia64_set_psr(psr);
  24.240 -//    ia64_srlz_i();
  24.241 -//    return tag;
  24.242 +    u64 index,pfn,rid,pfn_bits;
  24.243 +    pfn_bits = vpta.size-5-8;
  24.244 +    pfn = REGION_OFFSET(va)>>_REGION_PAGE_SIZE(vrr);
  24.245 +    rid = _REGION_ID(vrr);
  24.246 +    index = ((rid&0xff)<<pfn_bits)|(pfn&((1UL<<pfn_bits)-1));
  24.247 +    *tag = ((rid>>8)&0xffff) | ((pfn >>pfn_bits)<<16);
  24.248 +    return (thash_data_t *)((vpta.base<<PTA_BASE_SHIFT)+(index<<5));
  24.249 +//    return ia64_call_vsa(PAL_VPS_THASH,va,vrr,vpta,0,0,0,0);
  24.250  }
  24.251  
  24.252 -
  24.253 +//u64 vsa_ttag(u64 va, u64 vrr)
  24.254 +//{
  24.255 +//    return ia64_call_vsa(PAL_VPS_TTAG,va,vrr,0,0,0,0,0);
  24.256 +//}
  24.257  
  24.258  int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref)
  24.259  {
  24.260 @@ -371,11 +349,12 @@ int unimplemented_gva(VCPU *vcpu,u64 vad
  24.261   *  num:  number of dword (8byts) to read.
  24.262   */
  24.263  int
  24.264 -fetch_code(VCPU *vcpu, u64 gip, u64 *code)
  24.265 +fetch_code(VCPU *vcpu, u64 gip, u64 *code1, u64 *code2)
  24.266  {
  24.267 -    u64     gpip;   // guest physical IP
  24.268 -    u64     mpa;
  24.269 +    u64     gpip=0;   // guest physical IP
  24.270 +    u64     *vpa;
  24.271      thash_data_t    *tlb;
  24.272 +    thash_cb_t *hcb;
  24.273      ia64_rr vrr;
  24.274      u64     mfn;
  24.275  
  24.276 @@ -384,19 +363,26 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
  24.277      }
  24.278      else {
  24.279          vmx_vcpu_get_rr(vcpu, gip, &vrr.rrval);
  24.280 -        tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu),
  24.281 -                vrr.rid, gip, ISIDE_TLB );
  24.282 +	hcb = vmx_vcpu_get_vtlb(vcpu);
  24.283 +        tlb = vtlb_lookup_ex (hcb, vrr.rid, gip, ISIDE_TLB );
  24.284          if( tlb == NULL )
  24.285 -             tlb = vtlb_lookup_ex (vmx_vcpu_get_vtlb(vcpu),
  24.286 +             tlb = vtlb_lookup_ex (hcb,
  24.287                  vrr.rid, gip, DSIDE_TLB );
  24.288 -        if ( tlb == NULL ) panic("No entry found in ITLB and DTLB\n");
  24.289 -        gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
  24.290 +        if (tlb) 
  24.291 +	        gpip = (tlb->ppn << 12) | ( gip & (PSIZE(tlb->ps)-1) );
  24.292      }
  24.293 -    mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
  24.294 -    if ( mfn == INVALID_MFN ) return 0;
  24.295 - 
  24.296 -    mpa = (gpip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT);
  24.297 -    *code = *(u64*)__va(mpa);
  24.298 +    if( gpip){
  24.299 +	 mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
  24.300 +    	if( mfn == INVALID_MFN )  panic("fetch_code: invalid memory\n");
  24.301 +    	vpa =(u64 *)__va( (gip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT));
  24.302 +    }else{
  24.303 +	tlb = vhpt_lookup(gip);
  24.304 +	if( tlb == NULL)
  24.305 +	    panic("No entry found in ITLB and DTLB\n");
  24.306 +	vpa =(u64 *)__va((tlb->ppn>>(PAGE_SHIFT-ARCH_PAGE_SHIFT)<<PAGE_SHIFT)|(gip&(PAGE_SIZE-1)));
  24.307 +    }
  24.308 +    *code1 = *vpa++;
  24.309 +    *code2 = *vpa;
  24.310      return 1;
  24.311  }
  24.312  
  24.313 @@ -414,19 +400,19 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
  24.314      data.vadr=PAGEALIGN(ifa,data.ps);
  24.315      data.tc = 1;
  24.316      data.cl=ISIDE_TLB;
  24.317 -    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  24.318 +    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
  24.319      data.rid = vrr.rid;
  24.320      
  24.321      sections.tr = 1;
  24.322      sections.tc = 0;
  24.323  
  24.324 -    ovl = thash_find_overlap(hcb, &data, sections);
  24.325 +    ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB);
  24.326      while (ovl) {
  24.327          // generate MCA.
  24.328          panic("Tlb conflict!!");
  24.329 -        return;
  24.330 +        return IA64_FAULT;
  24.331      }
  24.332 -    thash_purge_and_insert(hcb, &data);
  24.333 +    thash_purge_and_insert(hcb, &data, ifa);
  24.334      return IA64_NO_FAULT;
  24.335  }
  24.336  
  24.337 @@ -447,24 +433,26 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
  24.338      data.vadr=PAGEALIGN(ifa,data.ps);
  24.339      data.tc = 1;
  24.340      data.cl=DSIDE_TLB;
  24.341 -    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  24.342 +    vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr);
  24.343      data.rid = vrr.rid;
  24.344      sections.tr = 1;
  24.345      sections.tc = 0;
  24.346  
  24.347 -    ovl = thash_find_overlap(hcb, &data, sections);
  24.348 +    ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB);
  24.349      if (ovl) {
  24.350            // generate MCA.
  24.351          panic("Tlb conflict!!");
  24.352 -        return;
  24.353 +        return IA64_FAULT;
  24.354      }
  24.355 -    thash_purge_and_insert(hcb, &data);
  24.356 +    thash_purge_and_insert(hcb, &data, ifa);
  24.357      return IA64_NO_FAULT;
  24.358  }
  24.359  
  24.360  /*
  24.361   * Return TRUE/FALSE for success of lock operation
  24.362   */
  24.363 +
  24.364 +/*
  24.365  int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
  24.366  {
  24.367  
  24.368 @@ -478,6 +466,9 @@ int vmx_lock_guest_dtc (VCPU *vcpu, UINT
  24.369      preferred_size = PSIZE(vrr.ps);
  24.370      return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
  24.371  }
  24.372 + */
  24.373 +
  24.374 +
  24.375  
  24.376  IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
  24.377  {
  24.378 @@ -486,6 +477,7 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN
  24.379      thash_cb_t  *hcb;
  24.380      search_section_t sections;
  24.381      ia64_rr vrr;
  24.382 +    /* u64 mfn,psr; */
  24.383  
  24.384      hcb = vmx_vcpu_get_vtlb(vcpu);
  24.385      data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  24.386 @@ -493,21 +485,38 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN
  24.387      data.vadr=PAGEALIGN(ifa,data.ps);
  24.388      data.tc = 0;
  24.389      data.cl=ISIDE_TLB;
  24.390 -    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  24.391 +    vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
  24.392      data.rid = vrr.rid;
  24.393      sections.tr = 1;
  24.394      sections.tc = 0;
  24.395  
  24.396 -    ovl = thash_find_overlap(hcb, &data, sections);
  24.397 +
  24.398 +    ovl = vtr_find_overlap(hcb, &data, ISIDE_TLB);
  24.399      if (ovl) {
  24.400          // generate MCA.
  24.401          panic("Tlb conflict!!");
  24.402 -        return;
  24.403 +        return IA64_FAULT;
  24.404      }
  24.405      sections.tr = 0;
  24.406      sections.tc = 1;
  24.407      thash_purge_entries(hcb, &data, sections);
  24.408 +/*    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
  24.409 +        data.contiguous=1;
  24.410 +    }
  24.411 + */
  24.412      thash_tr_insert(hcb, &data, ifa, idx);
  24.413 +/*
  24.414 +    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
  24.415 +        mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn));
  24.416 +        data.page_flags=pte&~PAGE_FLAGS_RV_MASK;
  24.417 +        data.ppn = xen_to_arch_ppn(mfn);
  24.418 +        psr = ia64_clear_ic();
  24.419 +        ia64_itr(0x1, IA64_ITR_GUEST_KERNEL, data.vadr, data.page_flags, data.ps);
  24.420 +        ia64_set_psr(psr);      // restore psr
  24.421 +        ia64_srlz_i();
  24.422 +//        return IA64_NO_FAULT;
  24.423 +    }
  24.424 +*/
  24.425      return IA64_NO_FAULT;
  24.426  }
  24.427  
  24.428 @@ -518,7 +527,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN
  24.429      thash_cb_t  *hcb;
  24.430      search_section_t sections;
  24.431      ia64_rr    vrr;
  24.432 -
  24.433 +    /* u64 mfn,psr; */
  24.434  
  24.435      hcb = vmx_vcpu_get_vtlb(vcpu);
  24.436      data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
  24.437 @@ -526,21 +535,39 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN
  24.438      data.vadr=PAGEALIGN(ifa,data.ps);
  24.439      data.tc = 0;
  24.440      data.cl=DSIDE_TLB;
  24.441 -    vmx_vcpu_get_rr(vcpu, ifa, &vrr);
  24.442 +    vmx_vcpu_get_rr(vcpu, ifa,(UINT64 *)&vrr);
  24.443      data.rid = vrr.rid;
  24.444      sections.tr = 1;
  24.445      sections.tc = 0;
  24.446  
  24.447 -    ovl = thash_find_overlap(hcb, &data, sections);
  24.448 +    ovl = vtr_find_overlap(hcb, &data, DSIDE_TLB);
  24.449      while (ovl) {
  24.450          // generate MCA.
  24.451          panic("Tlb conflict!!");
  24.452 -        return;
  24.453 +        return IA64_FAULT;
  24.454      }
  24.455      sections.tr = 0;
  24.456      sections.tc = 1;
  24.457      thash_purge_entries(hcb, &data, sections);
  24.458 +/*
  24.459 +    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
  24.460 +        data.contiguous=1;
  24.461 +    }
  24.462 + */
  24.463      thash_tr_insert(hcb, &data, ifa, idx);
  24.464 +/*
  24.465 +    if((idx==IA64_TR_KERNEL)&&(data.ps == KERNEL_TR_PAGE_SHIFT)){
  24.466 +        mfn = __gpfn_to_mfn_foreign(vcpu->domain,arch_to_xen_ppn(data.ppn));
  24.467 +        data.page_flags=pte&~PAGE_FLAGS_RV_MASK;
  24.468 +        data.ppn = xen_to_arch_ppn(mfn);
  24.469 +        psr = ia64_clear_ic();
  24.470 +        ia64_itr(0x2,IA64_DTR_GUEST_KERNEL , data.vadr, data.page_flags, data.ps);
  24.471 +        ia64_set_psr(psr);      // restore psr
  24.472 +        ia64_srlz_i();
  24.473 +//        return IA64_NO_FAULT;
  24.474 +    }
  24.475 +*/
  24.476 +
  24.477      return IA64_NO_FAULT;
  24.478  }
  24.479  
  24.480 @@ -578,7 +605,6 @@ IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UIN
  24.481      thash_cb_t  *hcb;
  24.482      ia64_rr vrr;
  24.483      search_section_t sections;
  24.484 -    thash_data_t data, *ovl;
  24.485      hcb = vmx_vcpu_get_vtlb(vcpu);
  24.486      vrr=vmx_vcpu_rr(vcpu,vadr);
  24.487      sections.tr = 0;
  24.488 @@ -616,7 +642,7 @@ IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UIN
  24.489  {
  24.490      PTA vpta;
  24.491      ia64_rr vrr;
  24.492 -    u64 vhpt_offset,tmp;
  24.493 +    u64 vhpt_offset;
  24.494      vmx_vcpu_get_pta(vcpu, &vpta.val);
  24.495      vrr=vmx_vcpu_rr(vcpu, vadr);
  24.496      if(vpta.vf){
  24.497 @@ -686,7 +712,25 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
  24.498              *padr = (data->ppn<<12) | (vadr&(PSIZE(data->ps)-1));
  24.499              return IA64_NO_FAULT;
  24.500          }
  24.501 -    }else{
  24.502 +    }
  24.503 +    data = vhpt_lookup(vadr);
  24.504 +    if(data){
  24.505 +        if(data->p==0){
  24.506 +            visr.na=1;
  24.507 +            vcpu_set_isr(vcpu,visr.val);
  24.508 +            page_not_present(vcpu, vadr);
  24.509 +            return IA64_FAULT;
  24.510 +        }else if(data->ma == VA_MATTR_NATPAGE){
  24.511 +            visr.na = 1;
  24.512 +            vcpu_set_isr(vcpu, visr.val);
  24.513 +            dnat_page_consumption(vcpu, vadr);
  24.514 +            return IA64_FAULT;
  24.515 +        }else{
  24.516 +            *padr = ((*(mpt_table+arch_to_xen_ppn(data->ppn)))<<PAGE_SHIFT) | (vadr&(PAGE_SIZE-1));
  24.517 +            return IA64_NO_FAULT;
  24.518 +        }
  24.519 +    }
  24.520 +    else{
  24.521          if(!vhpt_enabled(vcpu, vadr, NA_REF)){
  24.522              if(vpsr.ic){
  24.523                  vcpu_set_isr(vcpu, visr.val);
    25.1 --- a/xen/arch/ia64/vmx/vmx_entry.S	Thu Mar 02 10:59:34 2006 +0100
    25.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S	Thu Mar 02 11:00:49 2006 +0100
    25.3 @@ -34,6 +34,7 @@
    25.4  #include <asm/thread_info.h>
    25.5  #include <asm/unistd.h>
    25.6  #include <asm/vhpt.h>
    25.7 +#include <asm/vmmu.h>
    25.8  #include "vmx_minstate.h"
    25.9  
   25.10  /*
   25.11 @@ -696,7 +697,7 @@ 1:
   25.12     movl r25=PAGE_KERNEL
   25.13     ;;
   25.14     or loc5 = r25,loc5          // construct PA | page properties
   25.15 -   mov r23 = IA64_GRANULE_SHIFT <<2
   25.16 +   mov r23 = VCPU_VHPT_SHIFT <<2
   25.17     ;;
   25.18     ptr.d   in3,r23
   25.19     ;;
    26.1 --- a/xen/arch/ia64/vmx/vmx_hypercall.c	Thu Mar 02 10:59:34 2006 +0100
    26.2 +++ b/xen/arch/ia64/vmx/vmx_hypercall.c	Thu Mar 02 11:00:49 2006 +0100
    26.3 @@ -31,6 +31,11 @@
    26.4  #include <xen/mm.h>
    26.5  #include <xen/multicall.h>
    26.6  #include <xen/hypercall.h>
    26.7 +#include <public/version.h>
    26.8 +#include <asm/dom_fw.h>
    26.9 +#include <xen/domain.h>
   26.10 +
   26.11 +extern long do_sched_op(int cmd, unsigned long arg);
   26.12  
   26.13  
   26.14  void hyper_not_support(void)
   26.15 @@ -48,7 +53,7 @@ void hyper_mmu_update(void)
   26.16      vcpu_get_gr_nat(vcpu,17,&r33);
   26.17      vcpu_get_gr_nat(vcpu,18,&r34);
   26.18      vcpu_get_gr_nat(vcpu,19,&r35);
   26.19 -    ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
   26.20 +    ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,(u64 *)r34,r35);
   26.21      vcpu_set_gr(vcpu, 8, ret, 0);
   26.22      vmx_vcpu_increment_iip(vcpu);
   26.23  }
   26.24 @@ -124,7 +129,6 @@ void hyper_xen_version(void)
   26.25  
   26.26  static int do_lock_page(VCPU *vcpu, u64 va, u64 lock)
   26.27  {
   26.28 -    int i;
   26.29      ia64_rr rr;
   26.30      thash_cb_t *hcb;
   26.31      hcb = vmx_vcpu_get_vtlb(vcpu);
   26.32 @@ -136,6 +140,8 @@ static int do_lock_page(VCPU *vcpu, u64 
   26.33   * Lock guest page in vTLB, so that it's not relinquished by recycle
   26.34   * session when HV is servicing that hypercall.
   26.35   */
   26.36 +
   26.37 +/*
   26.38  void hyper_lock_page(void)
   26.39  {
   26.40  //TODO:
   26.41 @@ -148,6 +154,7 @@ void hyper_lock_page(void)
   26.42  
   26.43      vmx_vcpu_increment_iip(vcpu);
   26.44  }
   26.45 + */
   26.46  
   26.47  static int do_set_shared_page(VCPU *vcpu, u64 gpa)
   26.48  {
   26.49 @@ -169,7 +176,7 @@ static int do_set_shared_page(VCPU *vcpu
   26.50      	 * to xen heap. Or else, leave to domain itself to decide.
   26.51      	 */
   26.52      	if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
   26.53 -	    	free_xenheap_page(o_info);
   26.54 +	    	free_xenheap_page((void *)o_info);
   26.55      } else
   26.56          memset(d->shared_info, 0, PAGE_SIZE);
   26.57      return 0;
    27.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Thu Mar 02 10:59:34 2006 +0100
    27.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Thu Mar 02 11:00:49 2006 +0100
    27.3 @@ -96,7 +96,7 @@ identify_vmx_feature(void)
    27.4  	if (!(vp_env_info & VP_OPCODE))
    27.5  		printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
    27.6  	vm_order = get_order(buffer_size);
    27.7 -	printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order);
    27.8 +	printk("vm buffer size: %ld, order: %ld\n", buffer_size, vm_order);
    27.9  
   27.10  	vmx_enabled = 1;
   27.11  no_vti:
   27.12 @@ -114,7 +114,7 @@ vmx_init_env(void)
   27.13  	u64 status, tmp_base;
   27.14  
   27.15  	if (!vm_buffer) {
   27.16 -		vm_buffer = alloc_xenheap_pages(vm_order);
   27.17 +		vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order);
   27.18  		ASSERT(vm_buffer);
   27.19  		printk("vm_buffer: 0x%lx\n", vm_buffer);
   27.20  	}
   27.21 @@ -126,7 +126,7 @@ vmx_init_env(void)
   27.22  
   27.23  	if (status != PAL_STATUS_SUCCESS) {
   27.24  		printk("ia64_pal_vp_init_env failed.\n");
   27.25 -		return -1;
   27.26 +		return ;
   27.27  	}
   27.28  
   27.29  	if (!__vsa_base)
   27.30 @@ -172,7 +172,15 @@ static vpd_t *alloc_vpd(void)
   27.31  	cpuid3.number = 4;	/* 5 - 1 */
   27.32  	vpd->vcpuid[3] = cpuid3.value;
   27.33  
   27.34 +    vpd->vac.a_from_int_cr = 1;
   27.35 +    vpd->vac.a_to_int_cr = 1;
   27.36 +    vpd->vac.a_from_psr = 1;
   27.37 +    vpd->vac.a_from_cpuid = 1;
   27.38 +    vpd->vac.a_cover = 1;
   27.39 +    vpd->vac.a_bsw = 1;
   27.40 +
   27.41  	vpd->vdc.d_vmsw = 1;
   27.42 +
   27.43  	return vpd;
   27.44  }
   27.45  
   27.46 @@ -190,7 +198,7 @@ vmx_create_vp(struct vcpu *v)
   27.47  	/* ia64_ivt is function pointer, so need this tranlation */
   27.48  	ivt_base = (u64) &vmx_ia64_ivt;
   27.49  	printk("ivt_base: 0x%lx\n", ivt_base);
   27.50 -	ret = ia64_pal_vp_create(vpd, ivt_base, 0);
   27.51 +	ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
   27.52  	if (ret != PAL_STATUS_SUCCESS)
   27.53  		panic("ia64_pal_vp_create failed. \n");
   27.54  }
   27.55 @@ -199,11 +207,10 @@ vmx_create_vp(struct vcpu *v)
   27.56  void
   27.57  vmx_save_state(struct vcpu *v)
   27.58  {
   27.59 -	u64 status, psr;
   27.60 -	u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
   27.61 +	u64 status;
   27.62  
   27.63  	/* FIXME: about setting of pal_proc_vector... time consuming */
   27.64 -	status = ia64_pal_vp_save(v->arch.privregs, 0);
   27.65 +	status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
   27.66  	if (status != PAL_STATUS_SUCCESS)
   27.67  		panic("Save vp status failed\n");
   27.68  
   27.69 @@ -225,10 +232,7 @@ vmx_save_state(struct vcpu *v)
   27.70  void
   27.71  vmx_load_state(struct vcpu *v)
   27.72  {
   27.73 -	u64 status, psr;
   27.74 -	u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
   27.75 -	u64 pte_xen, pte_vhpt;
   27.76 -	int i;
   27.77 +	u64 status;
   27.78  
   27.79  	status = ia64_pal_vp_restore(v->arch.privregs, 0);
   27.80  	if (status != PAL_STATUS_SUCCESS)
   27.81 @@ -304,7 +308,7 @@ io_range_t io_ranges[] = {
   27.82  int vmx_alloc_contig_pages(struct domain *d)
   27.83  {
   27.84  	unsigned int order;
   27.85 -	unsigned long i, j, start, end, pgnr, conf_nr;
   27.86 +	unsigned long i, j, start,tmp, end, pgnr, conf_nr;
   27.87  	struct page_info *page;
   27.88  	struct vcpu *v = d->vcpu[0];
   27.89  
   27.90 @@ -315,57 +319,105 @@ int vmx_alloc_contig_pages(struct domain
   27.91  	    for (j = io_ranges[i].start;
   27.92  		 j < io_ranges[i].start + io_ranges[i].size;
   27.93  		 j += PAGE_SIZE)
   27.94 -		map_domain_page(d, j, io_ranges[i].type);
   27.95 +		assign_domain_page(d, j, io_ranges[i].type);
   27.96  	}
   27.97  
   27.98  	conf_nr = VMX_CONFIG_PAGES(d);
   27.99 +    if((conf_nr<<PAGE_SHIFT)<(1UL<<(_PAGE_SIZE_64M+1)))
  27.100 +        panic("vti domain needs 128M memory at least\n");
  27.101 +/*
  27.102  	order = get_order_from_pages(conf_nr);
  27.103  	if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
  27.104  	    printk("Could not allocate order=%d pages for vmx contig alloc\n",
  27.105  			order);
  27.106  	    return -1;
  27.107  	}
  27.108 +*/
  27.109 + 
  27.110 +/* reserve contiguous 64M for linux kernel */
  27.111 +
  27.112 +    if (unlikely((page = alloc_domheap_pages(d,(KERNEL_TR_PAGE_SHIFT-PAGE_SHIFT), 0)) == NULL)) {
  27.113 +        printk("No enough memory for vti domain!!!\n");
  27.114 +        return -1;
  27.115 +    }
  27.116 +    pgnr = page_to_mfn(page);
  27.117 +	for (i=(1UL<<KERNEL_TR_PAGE_SHIFT);i<(1UL<<(KERNEL_TR_PAGE_SHIFT+1));i+=PAGE_SIZE,pgnr++){
  27.118 +	    assign_domain_page(d, i, pgnr << PAGE_SHIFT);
  27.119 +    }
  27.120 +
  27.121 +	for (i = 0; i < (1UL<<KERNEL_TR_PAGE_SHIFT) ; i += PAGE_SIZE){
  27.122 +        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
  27.123 +            printk("No enough memory for vti domain!!!\n");
  27.124 +            return -1;
  27.125 +        }
  27.126 +	    pgnr = page_to_mfn(page);
  27.127 +	    assign_domain_page(d, i, pgnr << PAGE_SHIFT);
  27.128 +    }
  27.129  
  27.130  	/* Map normal memory below 3G */
  27.131 -	pgnr = page_to_mfn(page);
  27.132  	end = conf_nr << PAGE_SHIFT;
  27.133 -	for (i = 0;
  27.134 -	     i < (end < MMIO_START ? end : MMIO_START);
  27.135 -	     i += PAGE_SIZE, pgnr++)
  27.136 -	    map_domain_page(d, i, pgnr << PAGE_SHIFT);
  27.137 -
  27.138 +    tmp = end < MMIO_START ? end : MMIO_START;
  27.139 +	for (i = (1UL<<(KERNEL_TR_PAGE_SHIFT+1)); i < tmp; i += PAGE_SIZE){
  27.140 +        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
  27.141 +            printk("No enough memory for vti domain!!!\n");
  27.142 +            return -1;
  27.143 +        }
  27.144 +	    pgnr = page_to_mfn(page);
  27.145 +	    assign_domain_page(d, i, pgnr << PAGE_SHIFT);
  27.146 +    }
  27.147  	/* Map normal memory beyond 4G */
  27.148  	if (unlikely(end > MMIO_START)) {
  27.149  	    start = 4 * MEM_G;
  27.150  	    end = start + (end - 3 * MEM_G);
  27.151 -	    for (i = start; i < end; i += PAGE_SIZE, pgnr++)
  27.152 -		map_domain_page(d, i, pgnr << PAGE_SHIFT);
  27.153 +	    for (i = start; i < end; i += PAGE_SIZE){
  27.154 +            if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
  27.155 +                printk("No enough memory for vti domain!!!\n");
  27.156 +                return -1;
  27.157 +            }
  27.158 +            pgnr = page_to_mfn(page);
  27.159 +            assign_domain_page(d, i, pgnr << PAGE_SHIFT);
  27.160 +        }
  27.161  	}
  27.162  
  27.163  	d->arch.max_pfn = end >> PAGE_SHIFT;
  27.164 -
  27.165 +/*
  27.166  	order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT);
  27.167  	if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
  27.168  	    printk("Could not allocate order=%d pages for vmx contig alloc\n",
  27.169 -			order);
  27.170 +			order);`
  27.171  	    return -1;
  27.172  	}
  27.173 -
  27.174 +*/
  27.175  	/* Map guest firmware */
  27.176 -	pgnr = page_to_mfn(page);
  27.177 -	for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
  27.178 -	    map_domain_page(d, i, pgnr << PAGE_SHIFT);
  27.179 +	for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++){
  27.180 +        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
  27.181 +            printk("No enough memory for vti domain!!!\n");
  27.182 +            return -1;
  27.183 +        }
  27.184 +	    pgnr = page_to_mfn(page);
  27.185 +	    assign_domain_page(d, i, pgnr << PAGE_SHIFT);
  27.186 +    }
  27.187  
  27.188 +/*
  27.189  	if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
  27.190  	    printk("Could not allocate order=1 pages for vmx contig alloc\n");
  27.191  	    return -1;
  27.192  	}
  27.193 -
  27.194 +*/
  27.195  	/* Map for shared I/O page and xenstore */
  27.196 +    if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
  27.197 +        printk("No enough memory for vti domain!!!\n");
  27.198 +        return -1;
  27.199 +    }
  27.200  	pgnr = page_to_mfn(page);
  27.201 -	map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
  27.202 -	pgnr++;
  27.203 -	map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
  27.204 +	assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
  27.205 +
  27.206 +    if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
  27.207 +        printk("No enough memory for vti domain!!!\n");
  27.208 +        return -1;
  27.209 +    }
  27.210 +	pgnr = page_to_mfn(page);
  27.211 +	assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
  27.212  
  27.213  	set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
  27.214  	return 0;
  27.215 @@ -375,7 +427,7 @@ void vmx_setup_platform(struct domain *d
  27.216  {
  27.217  	ASSERT(d != dom0); /* only for non-privileged vti domain */
  27.218  	d->arch.vmx_platform.shared_page_va =
  27.219 -		__va(__gpa_to_mpa(d, IO_PAGE_START));
  27.220 +		(unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
  27.221  	/* TEMP */
  27.222  	d->arch.vmx_platform.pib_base = 0xfee00000UL;
  27.223  
    28.1 --- a/xen/arch/ia64/vmx/vmx_interrupt.c	Thu Mar 02 10:59:34 2006 +0100
    28.2 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c	Thu Mar 02 11:00:49 2006 +0100
    28.3 @@ -86,7 +86,7 @@ collect_interruption(VCPU *vcpu)
    28.4  
    28.5  }
    28.6  
    28.7 -int
    28.8 +void
    28.9  inject_guest_interruption(VCPU *vcpu, u64 vec)
   28.10  {
   28.11      u64 viva;
   28.12 @@ -334,6 +334,7 @@ static void
   28.13   *  @ Nat Consumption Vector
   28.14   * Refer to SDM Vol2 Table 5-6 & 8-1
   28.15   */
   28.16 +
   28.17  static void
   28.18  ir_nat_page_consumption (VCPU *vcpu, u64 vadr)
   28.19  {
    29.1 --- a/xen/arch/ia64/vmx/vmx_irq_ia64.c	Thu Mar 02 10:59:34 2006 +0100
    29.2 +++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c	Thu Mar 02 11:00:49 2006 +0100
    29.3 @@ -24,6 +24,8 @@
    29.4  #include <asm/pgtable.h>
    29.5  #include <asm/system.h>
    29.6  
    29.7 +#include <asm/vcpu.h>
    29.8 +#include <xen/irq.h>
    29.9  #ifdef CONFIG_SMP
   29.10  #   define IS_RESCHEDULE(vec)   (vec == IA64_IPI_RESCHEDULE)
   29.11  #else
   29.12 @@ -126,6 +128,6 @@ vmx_ia64_handle_irq (ia64_vector vector,
   29.13  	 * come through until ia64_eoi() has been done.
   29.14  	 */
   29.15  	vmx_irq_exit();
   29.16 -	if ( wake_dom0 && current != dom0 ) 
   29.17 +	if (current && wake_dom0 != dom0 ) 
   29.18  		vcpu_wake(dom0->vcpu[0]);
   29.19  }
    30.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S	Thu Mar 02 10:59:34 2006 +0100
    30.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S	Thu Mar 02 11:00:49 2006 +0100
    30.3 @@ -269,6 +269,10 @@ ENTRY(vmx_alt_itlb_miss)
    30.4  (p7)br.sptk vmx_fault_3
    30.5  vmx_alt_itlb_miss_1:
    30.6  	mov r16=cr.ifa		// get address that caused the TLB miss
    30.7 +    ;;
    30.8 +    tbit.z p6,p7=r16,63
    30.9 +(p6)br.sptk vmx_fault_3
   30.10 +    ;;
   30.11  	movl r17=PAGE_KERNEL
   30.12  	mov r24=cr.ipsr
   30.13  	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
   30.14 @@ -300,6 +304,10 @@ ENTRY(vmx_alt_dtlb_miss)
   30.15  (p7)br.sptk vmx_fault_4
   30.16  vmx_alt_dtlb_miss_1:
   30.17  	mov r16=cr.ifa		// get address that caused the TLB miss
   30.18 +    ;;
   30.19 +    tbit.z p6,p7=r16,63
   30.20 +(p6)br.sptk vmx_fault_4
   30.21 +    ;;
   30.22  	movl r17=PAGE_KERNEL
   30.23  	mov r20=cr.isr
   30.24  	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
   30.25 @@ -397,7 +405,7 @@ END(vmx_break_fault)
   30.26  /////////////////////////////////////////////////////////////////////////////////////////
   30.27  // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
   30.28  ENTRY(vmx_interrupt)
   30.29 -    VMX_DBG_FAULT(12)
   30.30 +//    VMX_DBG_FAULT(12)
   30.31  	mov r31=pr		// prepare to save predicates
   30.32      mov r19=12
   30.33      mov r29=cr.ipsr
   30.34 @@ -734,7 +742,7 @@ END(vmx_single_step_trap)
   30.35  /////////////////////////////////////////////////////////////////////////////////////////
   30.36  // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
   30.37  ENTRY(vmx_virtualization_fault)
   30.38 -    VMX_DBG_FAULT(37)
   30.39 +//    VMX_DBG_FAULT(37)
   30.40  	mov r31=pr
   30.41      mov r19=37
   30.42      adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
   30.43 @@ -1138,5 +1146,5 @@ hyper_call_table:
   30.44      data8 hyper_not_support     //hyper_boot_vcpu
   30.45      data8 hyper_not_support     //hyper_ni_hypercall       /* 25 */
   30.46      data8 hyper_not_support     //hyper_mmuext_op
   30.47 -    data8 hyper_lock_page
   30.48 +    data8 hyper_not_support     //tata8 hyper_lock_page
   30.49      data8 hyper_set_shared_page
    31.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Thu Mar 02 10:59:34 2006 +0100
    31.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Thu Mar 02 11:00:49 2006 +0100
    31.3 @@ -27,7 +27,7 @@
    31.4  #include <asm/vmx_phy_mode.h>
    31.5  #include <xen/sched.h>
    31.6  #include <asm/pgtable.h>
    31.7 -
    31.8 +#include <asm/vmmu.h>
    31.9  int valid_mm_mode[8] = {
   31.10      GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
   31.11      INV_MODE,
   31.12 @@ -61,9 +61,9 @@ int mm_switch_table[8][8] = {
   31.13       *  data access can be satisfied though itlb entry for physical
   31.14       *  emulation is hit.
   31.15           */
   31.16 -    SW_SELF,0,  0,  SW_NOP, 0,  0,  0,  SW_P2V,
   31.17 -    0,  0,  0,  0,  0,  0,  0,  0,
   31.18 -    0,  0,  0,  0,  0,  0,  0,  0,
   31.19 +    {SW_SELF,0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
   31.20 +    {0,  0,  0,  0,  0,  0,  0,  0},
   31.21 +    {0,  0,  0,  0,  0,  0,  0,  0},
   31.22      /*
   31.23       *  (it,dt,rt): (0,1,1) -> (1,1,1)
   31.24       *  This kind of transition is found in OSYa.
   31.25 @@ -71,17 +71,17 @@ int mm_switch_table[8][8] = {
   31.26       *  (it,dt,rt): (0,1,1) -> (0,0,0)
   31.27       *  This kind of transition is found in OSYa
   31.28       */
   31.29 -    SW_NOP, 0,  0,  SW_SELF,0,  0,  0,  SW_P2V,
   31.30 +    {SW_NOP, 0,  0,  SW_SELF,0,  0,  0,  SW_P2V},
   31.31      /* (1,0,0)->(1,1,1) */
   31.32 -    0,  0,  0,  0,  0,  0,  0,  SW_P2V,
   31.33 +    {0,  0,  0,  0,  0,  0,  0,  SW_P2V},
   31.34      /*
   31.35           *  (it,dt,rt): (1,0,1) -> (1,1,1)
   31.36           *  This kind of transition usually occurs when Linux returns
   31.37       *  from the low level TLB miss handlers.
   31.38           *  (see "arch/ia64/kernel/ivt.S")
   31.39           */
   31.40 -    0,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V,
   31.41 -    0,  0,  0,  0,  0,  0,  0,  0,
   31.42 +    {0,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V},
   31.43 +    {0,  0,  0,  0,  0,  0,  0,  0},
   31.44      /*
   31.45           *  (it,dt,rt): (1,1,1) -> (1,0,1)
   31.46           *  This kind of transition usually occurs in Linux low level
   31.47 @@ -94,68 +94,18 @@ int mm_switch_table[8][8] = {
   31.48       *  (1,1,1)->(1,0,0)
   31.49       */
   31.50  
   31.51 -    SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF,
   31.52 +    {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
   31.53  };
   31.54  
   31.55  void
   31.56  physical_mode_init(VCPU *vcpu)
   31.57  {
   31.58 -    UINT64 psr;
   31.59 -    struct domain * d = vcpu->domain;
   31.60 -
   31.61      vcpu->arch.old_rsc = 0;
   31.62      vcpu->arch.mode_flags = GUEST_IN_PHY;
   31.63  }
   31.64  
   31.65 -extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
   31.66 -#if 0
   31.67 -void
   31.68 -physical_itlb_miss_domn(VCPU *vcpu, u64 vadr)
   31.69 -{
   31.70 -    u64 psr;
   31.71 -    IA64_PSR vpsr;
   31.72 -    u64 mppn,gppn,mpp1,gpp1;
   31.73 -    struct domain *d;
   31.74 -    static u64 test=0;
   31.75 -    d=vcpu->domain;
   31.76 -    if(test)
   31.77 -        panic("domn physical itlb miss happen\n");
   31.78 -    else
   31.79 -        test=1;
   31.80 -    vpsr.val=vmx_vcpu_get_psr(vcpu);
   31.81 -    gppn=(vadr<<1)>>13;
   31.82 -    mppn = get_mfn(DOMID_SELF,gppn,1);
   31.83 -    mppn=(mppn<<12)|(vpsr.cpl<<7);
   31.84 -    gpp1=0;
   31.85 -    mpp1 = get_mfn(DOMID_SELF,gpp1,1);
   31.86 -    mpp1=(mpp1<<12)|(vpsr.cpl<<7);
   31.87 -//    if(vadr>>63)
   31.88 -//        mppn |= PHY_PAGE_UC;
   31.89 -//    else
   31.90 -//        mppn |= PHY_PAGE_WB;
   31.91 -    mpp1 |= PHY_PAGE_WB;
   31.92 -    psr=ia64_clear_ic();
   31.93 -    ia64_itr(0x1, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
   31.94 -    ia64_srlz_i();
   31.95 -    ia64_itr(0x2, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
   31.96 -    ia64_stop();
   31.97 -    ia64_srlz_i();
   31.98 -    ia64_itr(0x1, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24);
   31.99 -    ia64_srlz_i();
  31.100 -    ia64_itr(0x2, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24);
  31.101 -    ia64_stop();
  31.102 -    ia64_srlz_i();
  31.103 -    ia64_itr(0x1, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
  31.104 -    ia64_srlz_i();
  31.105 -    ia64_itr(0x2, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
  31.106 -    ia64_stop();
  31.107 -    ia64_srlz_i();
  31.108 -    ia64_set_psr(psr);
  31.109 -    ia64_srlz_i();
  31.110 -    return;
  31.111 -}
  31.112 -#endif
  31.113 -
  31.114 +extern u64 get_mfn(struct domain *d, u64 gpfn);
  31.115 +extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
  31.116  void
  31.117  physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
  31.118  {
  31.119 @@ -164,7 +114,7 @@ physical_itlb_miss_dom0(VCPU *vcpu, u64 
  31.120      u64 mppn,gppn;
  31.121      vpsr.val=vmx_vcpu_get_psr(vcpu);
  31.122      gppn=(vadr<<1)>>13;
  31.123 -    mppn = get_mfn(DOMID_SELF,gppn,1);
  31.124 +    mppn = get_mfn(vcpu->domain,gppn);
  31.125      mppn=(mppn<<12)|(vpsr.cpl<<7); 
  31.126  //    if(vadr>>63)
  31.127  //       mppn |= PHY_PAGE_UC;
  31.128 @@ -196,7 +146,7 @@ physical_dtlb_miss(VCPU *vcpu, u64 vadr)
  31.129  //        panic("dom n physical dtlb miss happen\n");
  31.130      vpsr.val=vmx_vcpu_get_psr(vcpu);
  31.131      gppn=(vadr<<1)>>13;
  31.132 -    mppn = get_mfn(DOMID_SELF,gppn,1);
  31.133 +    mppn = get_mfn(vcpu->domain, gppn);
  31.134      mppn=(mppn<<12)|(vpsr.cpl<<7);
  31.135      if(vadr>>63)
  31.136          mppn |= PHY_PAGE_UC;
  31.137 @@ -404,7 +354,7 @@ check_mm_mode_switch (VCPU *vcpu,  IA64_
  31.138          switch_mm_mode (vcpu, old_psr, new_psr);
  31.139      }
  31.140  
  31.141 -    return 0;
  31.142 +    return;
  31.143  }
  31.144  
  31.145  
    32.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Thu Mar 02 10:59:34 2006 +0100
    32.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Thu Mar 02 11:00:49 2006 +0100
    32.3 @@ -47,9 +47,11 @@
    32.4  #include <asm/vmx_vcpu.h>
    32.5  #include <asm/kregs.h>
    32.6  #include <asm/vmx.h>
    32.7 +#include <asm/vmmu.h>
    32.8  #include <asm/vmx_mm_def.h>
    32.9  #include <asm/vmx_phy_mode.h>
   32.10  #include <xen/mm.h>
   32.11 +#include <asm/vmx_pal.h>
   32.12  /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
   32.13  #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
   32.14  
   32.15 @@ -65,7 +67,7 @@ static UINT64 vec2off[68] = {0x0,0x400,0
   32.16      0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
   32.17      0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
   32.18      0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
   32.19 -    0x7f00,
   32.20 +    0x7f00
   32.21  };
   32.22  
   32.23  
   32.24 @@ -74,7 +76,7 @@ void vmx_reflect_interruption(UINT64 ifa
   32.25       UINT64 vector,REGS *regs)
   32.26  {
   32.27      VCPU *vcpu = current;
   32.28 -    UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu);
   32.29 +    UINT64 vpsr = vmx_vcpu_get_psr(vcpu);
   32.30      if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
   32.31          panic("Guest nested fault!");
   32.32      }
   32.33 @@ -92,10 +94,8 @@ void vmx_reflect_interruption(UINT64 ifa
   32.34  IA64FAULT
   32.35  vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   32.36  {
   32.37 -	static int first_time = 1;
   32.38  	struct domain *d = (struct domain *) current->domain;
   32.39 -	struct vcpu *v = (struct domain *) current;
   32.40 -	extern unsigned long running_on_sim;
   32.41 +	struct vcpu *v = (struct vcpu *) current;
   32.42  	unsigned long i, sal_param[8];
   32.43  
   32.44  #if 0
   32.45 @@ -160,12 +160,12 @@ vmx_ia64_handle_break (unsigned long ifa
   32.46  		    case FW_HYPERCALL_EFI_GET_TIME:
   32.47  			{
   32.48  			unsigned long *tv, *tc;
   32.49 -			vcpu_get_gr_nat(v, 32, &tv);
   32.50 -			vcpu_get_gr_nat(v, 33, &tc);
   32.51 +			vcpu_get_gr_nat(v, 32, (u64 *)&tv);
   32.52 +			vcpu_get_gr_nat(v, 33, (u64 *)&tc);
   32.53  			printf("efi_get_time(%p,%p) called...",tv,tc);
   32.54 -			tv = __va(translate_domain_mpaddr(tv));
   32.55 -			if (tc) tc = __va(translate_domain_mpaddr(tc));
   32.56 -			regs->r8 = (*efi.get_time)(tv,tc);
   32.57 +			tv = __va(translate_domain_mpaddr((unsigned long)tv));
   32.58 +			if (tc) tc = __va(translate_domain_mpaddr((unsigned long)tc));
   32.59 +			regs->r8 = (*efi.get_time)((efi_time_t *)tv,(efi_time_cap_t *)tc);
   32.60  			printf("and returns %lx\n",regs->r8);
   32.61  			}
   32.62  			break;
   32.63 @@ -200,12 +200,13 @@ vmx_ia64_handle_break (unsigned long ifa
   32.64  			die_if_kernel("bug check", regs, iim);
   32.65  		vmx_reflect_interruption(ifa,isr,iim,11,regs);
   32.66      }
   32.67 +    return IA64_NO_FAULT;
   32.68  }
   32.69  
   32.70  
   32.71  void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
   32.72  {
   32.73 -    unsigned long i, * src,* dst, *sunat, *dunat;
   32.74 +    unsigned long i=0UL, * src,* dst, *sunat, *dunat;
   32.75      IA64_PSR vpsr;
   32.76      src=&regs->r16;
   32.77      sunat=&regs->eml_unat;
   32.78 @@ -262,10 +263,10 @@ void leave_hypervisor_tail(struct pt_reg
   32.79   		 *
   32.80   		 * Now hardcode the vector as 0x10 temporarily
   32.81   		 */
   32.82 - 		if (event_pending(v)&&(!(VLSAPIC_INSVC(v,0)&(1UL<<0x10)))) {
   32.83 - 			VCPU(v, irr[0]) |= 1UL << 0x10;
   32.84 - 			v->arch.irq_new_pending = 1;
   32.85 - 		}
   32.86 +// 		if (event_pending(v)&&(!(VLSAPIC_INSVC(v,0)&(1UL<<0x10)))) {
   32.87 +// 			VCPU(v, irr[0]) |= 1UL << 0x10;
   32.88 +// 			v->arch.irq_new_pending = 1;
   32.89 +// 		}
   32.90  
   32.91   		if ( v->arch.irq_new_pending ) {
   32.92   			v->arch.irq_new_pending = 0;
   32.93 @@ -287,16 +288,17 @@ static int vmx_handle_lds(REGS* regs)
   32.94  }
   32.95  
   32.96  /* We came here because the H/W VHPT walker failed to find an entry */
   32.97 -void vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
   32.98 +IA64FAULT
   32.99 +vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
  32.100  {
  32.101      IA64_PSR vpsr;
  32.102 -    CACHE_LINE_TYPE type;
  32.103 +    CACHE_LINE_TYPE type=ISIDE_TLB;
  32.104      u64 vhpt_adr, gppa;
  32.105      ISR misr;
  32.106      ia64_rr vrr;
  32.107  //    REGS *regs;
  32.108 -    thash_cb_t *vtlb, *vhpt;
  32.109 -    thash_data_t *data, me;
  32.110 +    thash_cb_t *vtlb;
  32.111 +    thash_data_t *data;
  32.112      VCPU *v = current;
  32.113      vtlb=vmx_vcpu_get_vtlb(v);
  32.114  #ifdef  VTLB_DEBUG
  32.115 @@ -313,10 +315,14 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE
  32.116          return;
  32.117      }
  32.118  */
  32.119 +    if(vadr == 0x1ea18c00 ){
  32.120 +        ia64_clear_ic();
  32.121 +        while(1);
  32.122 +    }
  32.123      if(is_physical_mode(v)&&(!(vadr<<1>>62))){
  32.124          if(vec==1){
  32.125              physical_itlb_miss(v, vadr);
  32.126 -            return;
  32.127 +            return IA64_FAULT;
  32.128          }
  32.129          if(vec==2){
  32.130              if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
  32.131 @@ -324,7 +330,7 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE
  32.132              }else{
  32.133                  physical_dtlb_miss(v, vadr);
  32.134              }
  32.135 -            return;
  32.136 +            return IA64_FAULT;
  32.137          }
  32.138      }
  32.139      vrr = vmx_vcpu_rr(v, vadr);
  32.140 @@ -334,19 +340,25 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE
  32.141  
  32.142  //    prepare_if_physical_mode(v);
  32.143  
  32.144 -    if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
  32.145 +    if((data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type))!=0){
  32.146  	gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
  32.147          if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
  32.148              emulate_io_inst(v, gppa, data->ma);
  32.149              return IA64_FAULT;
  32.150          }
  32.151  
  32.152 -    	if ( data->ps != vrr.ps ) {
  32.153 +//    	if ( data->ps != vrr.ps ) {
  32.154 +//    		machine_tlb_insert(v, data);
  32.155 +//    	}
  32.156 +//    	else {
  32.157 +/*        if ( data->contiguous&&(!data->tc)){
  32.158      		machine_tlb_insert(v, data);
  32.159 -    	}
  32.160 -    	else {
  32.161 -	        thash_insert(vtlb->ts->vhpt,data,vadr);
  32.162 -	    }
  32.163 +        }
  32.164 +        else{
  32.165 + */
  32.166 +            thash_vhpt_insert(vtlb->ts->vhpt,data,vadr);
  32.167 +//        }
  32.168 +//	    }
  32.169      }else if(type == DSIDE_TLB){
  32.170          if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
  32.171              if(vpsr.ic){
  32.172 @@ -366,8 +378,7 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE
  32.173          } else{
  32.174              vmx_vcpu_thash(v, vadr, &vhpt_adr);
  32.175              vrr=vmx_vcpu_rr(v,vhpt_adr);
  32.176 -            data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
  32.177 -            if(data){
  32.178 +            if(vhpt_lookup(vhpt_adr) ||  vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB)){
  32.179                  if(vpsr.ic){
  32.180                      vcpu_set_isr(v, misr.val);
  32.181                      dtlb_fault(v, vadr);
  32.182 @@ -410,8 +421,7 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE
  32.183          } else{
  32.184              vmx_vcpu_thash(v, vadr, &vhpt_adr);
  32.185              vrr=vmx_vcpu_rr(v,vhpt_adr);
  32.186 -            data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
  32.187 -            if(data){
  32.188 +            if(vhpt_lookup(vhpt_adr) || vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB)){
  32.189                  if(!vpsr.ic){
  32.190                      misr.ni=1;
  32.191                  }
  32.192 @@ -428,6 +438,5 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE
  32.193              }
  32.194          }
  32.195      }
  32.196 +    return IA64_NO_FAULT;
  32.197  }
  32.198 -
  32.199 -
    33.1 --- a/xen/arch/ia64/vmx/vmx_utility.c	Thu Mar 02 10:59:34 2006 +0100
    33.2 +++ b/xen/arch/ia64/vmx/vmx_utility.c	Thu Mar 02 11:00:49 2006 +0100
    33.3 @@ -307,9 +307,8 @@ check_cr_rsv_fields (int index, u64 valu
    33.4              }
    33.5              return 0;
    33.6      }
    33.7 -
    33.8 -
    33.9      panic ("Unsupported CR");
   33.10 +    return 0;
   33.11  }
   33.12  
   33.13  
   33.14 @@ -600,7 +599,6 @@ void set_isr_reg_nat_consumption(VCPU *v
   33.15  
   33.16  void set_isr_for_priv_fault(VCPU *vcpu, u64 non_access)
   33.17  {
   33.18 -    u64 value;
   33.19      ISR isr;
   33.20  
   33.21      isr.val = set_isr_ei_ni(vcpu);
    34.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Thu Mar 02 10:59:34 2006 +0100
    34.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Thu Mar 02 11:00:49 2006 +0100
    34.3 @@ -35,7 +35,7 @@
    34.4  #include <asm/gcc_intrin.h>
    34.5  #include <asm/vmx_mm_def.h>
    34.6  #include <asm/vmx.h>
    34.7 -
    34.8 +#include <asm/vmx_phy_mode.h>
    34.9  //u64  fire_itc;
   34.10  //u64  fire_itc2;
   34.11  //u64  fire_itm;
   34.12 @@ -66,7 +66,6 @@
   34.13  #include <asm/hw_irq.h>
   34.14  #include <asm/vmx_pal_vsa.h>
   34.15  #include <asm/kregs.h>
   34.16 -
   34.17  //unsigned long last_guest_rsm = 0x0;
   34.18  struct guest_psr_bundle{
   34.19      unsigned long ip;
   34.20 @@ -138,7 +137,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
   34.21      regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
   34.22  
   34.23      check_mm_mode_switch(vcpu, old_psr, new_psr);
   34.24 -    return IA64_NO_FAULT;
   34.25 +    return ;
   34.26  }
   34.27  
   34.28  /* Adjust slot both in pt_regs and vpd, upon vpsr.ri which
    35.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Thu Mar 02 10:59:34 2006 +0100
    35.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Thu Mar 02 11:00:49 2006 +0100
    35.3 @@ -30,8 +30,9 @@
    35.4  #include <asm/vmmu.h>
    35.5  #include <asm/vmx_mm_def.h>
    35.6  #include <asm/smp.h>
    35.7 -
    35.8 +#include <asm/vmx.h>
    35.9  #include <asm/virt_event.h>
   35.10 +#include <asm/vmx_phy_mode.h>
   35.11  extern UINT64 privop_trace;
   35.12  
   35.13  void
   35.14 @@ -137,6 +138,11 @@ ia64_priv_decoder(IA64_SLOT_TYPE slot_ty
   35.15                  *cause=EVENT_BSW_1;
   35.16              }
   35.17          }
   35.18 +        case I:
   35.19 +        case F:
   35.20 +        case L:
   35.21 +        case ILLEGAL:
   35.22 +        break;
   35.23      }
   35.24  }
   35.25  
   35.26 @@ -157,7 +163,6 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc
   35.27  {
   35.28      UINT64 tgt = inst.M33.r1;
   35.29      UINT64 val;
   35.30 -    IA64FAULT fault;
   35.31  
   35.32  /*
   35.33      if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
   35.34 @@ -176,7 +181,6 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc
   35.35  IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
   35.36  {
   35.37      UINT64 val;
   35.38 -    IA64FAULT fault;
   35.39      if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
   35.40  	panic(" get_psr nat bit fault\n");
   35.41  
   35.42 @@ -255,7 +259,6 @@ IA64FAULT vmx_emul_cover(VCPU *vcpu, INS
   35.43  IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
   35.44  {
   35.45      u64 r2,r3;
   35.46 -    ISR isr;
   35.47      IA64_PSR  vpsr;
   35.48  
   35.49      vpsr.val=vmx_vcpu_get_psr(vcpu);
   35.50 @@ -267,6 +270,7 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS
   35.51      }
   35.52      if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
   35.53  #ifdef  VMAL_NO_FAULT_CHECK
   35.54 +        ISR isr;
   35.55          set_isr_reg_nat_consumption(vcpu,0,0);
   35.56          rnat_comsumption(vcpu);
   35.57          return IA64_FAULT;
   35.58 @@ -287,11 +291,11 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS
   35.59  IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
   35.60  {
   35.61      u64 r3;
   35.62 -    ISR isr;
   35.63      IA64_PSR  vpsr;
   35.64  
   35.65      vpsr.val=vmx_vcpu_get_psr(vcpu);
   35.66  #ifdef  VMAL_NO_FAULT_CHECK
   35.67 +    ISR isr;
   35.68      if ( vpsr.cpl != 0) {
   35.69          /* Inject Privileged Operation fault into guest */
   35.70          set_privileged_operation_isr (vcpu, 0);
   35.71 @@ -321,10 +325,10 @@ IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, IN
   35.72  
   35.73  IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
   35.74  {
   35.75 -    ISR isr;
   35.76      IA64FAULT	ret1, ret2;
   35.77  
   35.78  #ifdef  VMAL_NO_FAULT_CHECK
   35.79 +    ISR isr;
   35.80      IA64_PSR  vpsr;
   35.81      vpsr.val=vmx_vcpu_get_psr(vcpu);
   35.82      if ( vpsr.cpl != 0) {
   35.83 @@ -373,9 +377,9 @@ IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INS
   35.84  IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
   35.85  {
   35.86      u64 r1,r3;
   35.87 +#ifdef  CHECK_FAULT
   35.88      ISR visr;
   35.89      IA64_PSR vpsr;
   35.90 -#ifdef  CHECK_FAULT
   35.91      if(check_target_register(vcpu, inst.M46.r1)){
   35.92          set_illegal_op_isr(vcpu);
   35.93          illegal_op(vcpu);
   35.94 @@ -403,9 +407,11 @@ IA64FAULT vmx_emul_thash(VCPU *vcpu, INS
   35.95  IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
   35.96  {
   35.97      u64 r1,r3;
   35.98 +#ifdef  CHECK_FAULT
   35.99      ISR visr;
  35.100      IA64_PSR vpsr;
  35.101 - #ifdef  CHECK_FAULT
  35.102 +#endif
  35.103 +#ifdef  CHECK_FAULT
  35.104      if(check_target_register(vcpu, inst.M46.r1)){
  35.105          set_illegal_op_isr(vcpu);
  35.106          illegal_op(vcpu);
  35.107 @@ -433,8 +439,8 @@ IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST
  35.108  IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
  35.109  {
  35.110      u64 r1,r3;
  35.111 +#ifdef  CHECK_FAULT
  35.112      ISR visr;
  35.113 -#ifdef  CHECK_FAULT
  35.114      if(check_target_register(vcpu, inst.M46.r1)){
  35.115          set_illegal_op_isr(vcpu);
  35.116          illegal_op(vcpu);
  35.117 @@ -477,10 +483,10 @@ IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST6
  35.118  IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
  35.119  {
  35.120      u64 r1,r3;
  35.121 +#ifdef  CHECK_FAULT
  35.122      ISR visr;
  35.123      IA64_PSR vpsr;
  35.124      int fault=IA64_NO_FAULT;
  35.125 -#ifdef  CHECK_FAULT
  35.126      visr.val=0;
  35.127      if(check_target_register(vcpu, inst.M46.r1)){
  35.128          set_illegal_op_isr(vcpu);
  35.129 @@ -514,8 +520,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST6
  35.130  
  35.131  IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
  35.132  {
  35.133 -    UINT64 fault, itir, ifa, pte, slot;
  35.134 -    ISR isr;
  35.135 +    UINT64 itir, ifa, pte, slot;
  35.136      IA64_PSR  vpsr;
  35.137      vpsr.val=vmx_vcpu_get_psr(vcpu);
  35.138      if ( vpsr.ic ) {
  35.139 @@ -524,6 +529,7 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
  35.140          return IA64_FAULT;
  35.141      }
  35.142  #ifdef  VMAL_NO_FAULT_CHECK
  35.143 +    ISR isr;
  35.144      if ( vpsr.cpl != 0) {
  35.145          /* Inject Privileged Operation fault into guest */
  35.146          set_privileged_operation_isr (vcpu, 0);
  35.147 @@ -571,8 +577,10 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
  35.148  
  35.149  IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
  35.150  {
  35.151 -    UINT64 fault, itir, ifa, pte, slot;
  35.152 +    UINT64 itir, ifa, pte, slot;
  35.153 +#ifdef  VMAL_NO_FAULT_CHECK
  35.154      ISR isr;
  35.155 +#endif
  35.156      IA64_PSR  vpsr;
  35.157      vpsr.val=vmx_vcpu_get_psr(vcpu);
  35.158      if ( vpsr.ic ) {
  35.159 @@ -628,8 +636,6 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
  35.160  
  35.161  IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
  35.162  {
  35.163 -    UINT64 fault;
  35.164 -    ISR isr;
  35.165      IA64_PSR  vpsr;
  35.166      IA64FAULT	ret1;
  35.167  
  35.168 @@ -641,6 +647,8 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN
  35.169      }
  35.170  
  35.171  #ifdef  VMAL_NO_FAULT_CHECK
  35.172 +    UINT64 fault;
  35.173 +    ISR isr;
  35.174      if ( vpsr.cpl != 0) {
  35.175          /* Inject Privileged Operation fault into guest */
  35.176          set_privileged_operation_isr (vcpu, 0);
  35.177 @@ -1146,7 +1154,7 @@ IA64FAULT vmx_emul_mov_from_cpuid(VCPU *
  35.178  
  35.179  IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
  35.180  {
  35.181 -    u64 r2,cr3;
  35.182 +    u64 r2;
  35.183  #ifdef  CHECK_FAULT
  35.184      IA64_PSR  vpsr;
  35.185      vpsr.val=vmx_vcpu_get_psr(vcpu);
  35.186 @@ -1292,9 +1300,7 @@ extern IA64_SLOT_TYPE  slot_types[0x20][
  35.187  IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
  35.188  {
  35.189  	IA64_BUNDLE bundle;
  35.190 -
  35.191 -	fetch_code( current,iip, &bundle.i64[0]);
  35.192 -	fetch_code( current,iip+8, &bundle.i64[1]);
  35.193 +	fetch_code( current, iip, &bundle.i64[0], &bundle.i64[1]);
  35.194  	return bundle;
  35.195  }
  35.196  
  35.197 @@ -1309,14 +1315,10 @@ IA64_BUNDLE __vmx_get_domain_bundle(u64 
  35.198  void
  35.199  vmx_emulate(VCPU *vcpu, REGS *regs)
  35.200  {
  35.201 -    IA64_BUNDLE bundle;
  35.202 -    int slot;
  35.203 -    IA64_SLOT_TYPE slot_type;
  35.204      IA64FAULT status;
  35.205      INST64 inst;
  35.206      UINT64 iip, cause, opcode;
  35.207      iip = regs->cr_iip;
  35.208 -    IA64_PSR vpsr;
  35.209      cause = VMX(vcpu,cause);
  35.210      opcode = VMX(vcpu,opcode);
  35.211  
  35.212 @@ -1342,6 +1344,10 @@ if ( (cause == 0xff && opcode == 0x1e000
  35.213  #endif
  35.214  #ifdef BYPASS_VMAL_OPCODE
  35.215      // make a local copy of the bundle containing the privop
  35.216 +    IA64_BUNDLE bundle;
  35.217 +    int slot;
  35.218 +    IA64_SLOT_TYPE slot_type;
  35.219 +    IA64_PSR vpsr;
  35.220      bundle = __vmx_get_domain_bundle(iip);
  35.221      slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
  35.222      if (!slot) inst.inst = bundle.slot0;
  35.223 @@ -1483,11 +1489,11 @@ if ( (cause == 0xff && opcode == 0x1e000
  35.224          status=vmx_emul_mov_from_cpuid(vcpu, inst);
  35.225          break;
  35.226      case EVENT_VMSW:
  35.227 -        printf ("Unimplemented instruction %d\n", cause);
  35.228 +        printf ("Unimplemented instruction %ld\n", cause);
  35.229  	status=IA64_FAULT;
  35.230          break;
  35.231      default:
  35.232 -        printf("unknown cause %d, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
  35.233 +        printf("unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
  35.234          while(1);
  35.235  	/* For unknown cause, let hardware to re-execute */
  35.236  	status=IA64_RETRY;
    36.1 --- a/xen/arch/ia64/vmx/vtlb.c	Thu Mar 02 10:59:34 2006 +0100
    36.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Thu Mar 02 11:00:49 2006 +0100
    36.3 @@ -28,8 +28,11 @@
    36.4  #include <asm/gcc_intrin.h>
    36.5  #include <linux/interrupt.h>
    36.6  #include <asm/vmx_vcpu.h>
    36.7 +#include <asm/vmmu.h>
    36.8 +#include <asm/tlbflush.h>
    36.9  #define  MAX_CCH_LENGTH     40
   36.10  
   36.11 +thash_data_t *__alloc_chain(thash_cb_t *, thash_data_t *);
   36.12  
   36.13  static void cch_mem_init(thash_cb_t *hcb)
   36.14  {
   36.15 @@ -50,8 +53,10 @@ static thash_data_t *cch_alloc(thash_cb_
   36.16  
   36.17      if ( (p = hcb->cch_freelist) != NULL ) {
   36.18          hcb->cch_freelist = p->next;
   36.19 +        return (thash_data_t *)p;
   36.20 +    }else{
   36.21 +        return NULL;
   36.22      }
   36.23 -    return &(p->data);
   36.24  }
   36.25  
   36.26  static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
   36.27 @@ -65,36 +70,38 @@ static void cch_free(thash_cb_t *hcb, th
   36.28  /*
   36.29   * Check to see if the address rid:va is translated by the TLB
   36.30   */
   36.31 -static int __is_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE cl)
   36.32 +
   36.33 +static int __is_tr_translated(thash_data_t *tlb, u64 rid, u64 va, CACHE_LINE_TYPE cl)
   36.34  {
   36.35 -    u64  size1,sa1,ea1;
   36.36 -    if ( tlb->rid != rid ||(!tlb->tc && tlb->cl != cl) )
   36.37 -        return 0;
   36.38 -    size1 = PSIZE(tlb->ps);
   36.39 -    sa1 = tlb->vadr & ~(size1-1);   // mask the low address bits
   36.40 -    ea1 = sa1 + size1;
   36.41 -
   36.42 -    if ( va >= sa1 && (va < ea1 || ea1 == 0) )
   36.43 +    u64  size;
   36.44 +    size = PSIZE(tlb->ps);
   36.45 +    if(tlb->vadr&(size-1))
   36.46 +        while(1);
   36.47 +    if ((tlb->rid == rid) && ((va-tlb->vadr)<size))
   36.48          return 1;
   36.49      else
   36.50          return 0;
   36.51  }
   36.52  
   36.53  /*
   36.54 - * Only for TLB format.
   36.55 + * Only for GUEST TR format.
   36.56   */
   36.57  static int
   36.58 -__is_tlb_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, u64 eva)
   36.59 +__is_tr_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, u64 eva)
   36.60  {
   36.61 -    uint64_t size1,size2,sa1,ea1,ea2;
   36.62 +    uint64_t size, sa1, ea1;
   36.63  
   36.64 -    if ( entry->invalid || entry->rid != rid || (!entry->tc && entry->cl != cl ) ) {
   36.65 +//    if ( entry->invalid || entry->rid != rid || (entry->cl != cl ) ) {
   36.66 +    if ( entry->invalid || entry->rid != rid ) {
   36.67          return 0;
   36.68      }
   36.69 -    size1=PSIZE(entry->ps);
   36.70 -    sa1 = entry->vadr & ~(size1-1); // mask the low address bits
   36.71 -    ea1 = sa1 + size1;
   36.72 -    if ( (sva >= ea1 && ea1 != 0) || (eva <= sa1 && eva != 0) ) 
   36.73 +    size = PSIZE(entry->ps);
   36.74 +    sa1 = entry->vadr;
   36.75 +    ea1 = sa1 + size -1;
   36.76 +    eva -= 1;
   36.77 +    if(sa1&(size-1))
   36.78 +        while(1);
   36.79 +    if ( (sva>ea1) || (sa1>eva) )
   36.80          return 0;
   36.81      else
   36.82          return 1;
   36.83 @@ -103,9 +110,11 @@ static int
   36.84  
   36.85  static void __rem_tr (thash_cb_t *hcb, thash_data_t *tr)
   36.86  {
   36.87 +/*
   36.88      if ( hcb->remove_notifier ) {
   36.89          (hcb->remove_notifier)(hcb,tr);
   36.90      }
   36.91 +*/
   36.92      tr->invalid = 1;
   36.93  }
   36.94  
   36.95 @@ -142,7 +151,7 @@ static void rep_tr(thash_cb_t *hcb,thash
   36.96      else {
   36.97          tr = &DTR(hcb,idx);
   36.98      }
   36.99 -    if ( !INVALID_TLB(tr) ) {
  36.100 +    if ( !INVALID_TR(tr) ) {
  36.101          __rem_tr(hcb, tr);
  36.102      }
  36.103      __set_tr (tr, insert, idx);
  36.104 @@ -151,6 +160,7 @@ static void rep_tr(thash_cb_t *hcb,thash
  36.105  /*
  36.106   * remove TR entry.
  36.107   */
  36.108 +/*
  36.109  static void rem_tr(thash_cb_t *hcb,CACHE_LINE_TYPE cl, int idx)
  36.110  {
  36.111      thash_data_t *tr;
  36.112 @@ -161,17 +171,18 @@ static void rem_tr(thash_cb_t *hcb,CACHE
  36.113      else {
  36.114          tr = &DTR(hcb,idx);
  36.115      }
  36.116 -    if ( !INVALID_TLB(tr) ) {
  36.117 +    if ( !INVALID_TR(tr) ) {
  36.118          __rem_tr(hcb, tr);
  36.119      }
  36.120  }
  36.121 -
  36.122 + */
  36.123  /*
  36.124   * Delete an thash entry in collision chain.
  36.125   *  prev: the previous entry.
  36.126   *  rem: the removed entry.
  36.127   */
  36.128 -static void __rem_chain(thash_cb_t *hcb/*, thash_data_t *prev*/, thash_data_t *rem)
  36.129 +/*
  36.130 +static void __rem_chain(thash_cb_t *hcb, thash_data_t *prev, thash_data_t *rem)
  36.131  {
  36.132      //prev->next = rem->next;
  36.133      if ( hcb->remove_notifier ) {
  36.134 @@ -179,6 +190,7 @@ static void __rem_chain(thash_cb_t *hcb/
  36.135      }
  36.136      cch_free (hcb, rem);
  36.137  }
  36.138 + */
  36.139  
  36.140  /*
  36.141   * Delete an thash entry leading collision chain.
  36.142 @@ -187,15 +199,16 @@ static void __rem_hash_head(thash_cb_t *
  36.143  {
  36.144      thash_data_t *next=hash->next;
  36.145  
  36.146 -    if ( hcb->remove_notifier ) {
  36.147 +/*    if ( hcb->remove_notifier ) {
  36.148          (hcb->remove_notifier)(hcb,hash);
  36.149 -    }
  36.150 +    } */
  36.151      if ( next != NULL ) {
  36.152 +        next->len=hash->len-1;
  36.153          *hash = *next;
  36.154          cch_free (hcb, next);
  36.155      }
  36.156      else {
  36.157 -        INVALIDATE_HASH(hcb, hash);
  36.158 +        INVALIDATE_HASH_HEADER(hcb, hash);
  36.159      }
  36.160  }
  36.161  
  36.162 @@ -215,8 +228,8 @@ thash_data_t *__vtr_lookup(thash_cb_t *h
  36.163          num = NDTRS;
  36.164      }
  36.165      for ( i=0; i<num; i++ ) {
  36.166 -        if ( !INVALID_ENTRY(hcb,&tr[i]) &&
  36.167 -            __is_translated(&tr[i], rid, va, cl) )
  36.168 +        if ( !INVALID_TR(&tr[i]) &&
  36.169 +            __is_tr_translated(&tr[i], rid, va, cl) )
  36.170              return &tr[i];
  36.171      }
  36.172      return NULL;
  36.173 @@ -227,6 +240,7 @@ thash_data_t *__vtr_lookup(thash_cb_t *h
  36.174   * Find overlap VHPT entry within current collision chain
  36.175   * base on internal priv info.
  36.176   */
  36.177 +/*
  36.178  static inline thash_data_t* _vhpt_next_overlap_in_chain(thash_cb_t *hcb)
  36.179  {
  36.180      thash_data_t    *cch;
  36.181 @@ -240,26 +254,27 @@ static inline thash_data_t* _vhpt_next_o
  36.182      }
  36.183      return NULL;
  36.184  }
  36.185 -
  36.186 +*/
  36.187  /*
  36.188   * Find overlap TLB/VHPT entry within current collision chain
  36.189   * base on internal priv info.
  36.190   */
  36.191 +/*
  36.192  static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb)
  36.193  {
  36.194      thash_data_t    *cch;
  36.195      thash_internal_t *priv = &hcb->priv;
  36.196  
  36.197 -    /* Find overlap TLB entry */
  36.198 +    // Find overlap TLB entry
  36.199      for (cch=priv->cur_cch; cch; cch = cch->next) {
  36.200          if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr )  &&
  36.201 -            __is_tlb_overlap(hcb, cch, priv->rid, priv->cl,
  36.202 -                priv->_curva, priv->_eva) ) {
  36.203 +            __is_translated( cch, priv->rid, priv->_curva, priv->cl)) {
  36.204              return cch;
  36.205          }
  36.206      }
  36.207      return NULL;
  36.208  }
  36.209 + */
  36.210  
  36.211  /*
  36.212   * Get the machine format of VHPT entry.
  36.213 @@ -281,26 +296,190 @@ int __tlb_to_vhpt(thash_cb_t *hcb,
  36.214              thash_data_t *tlb, u64 va,
  36.215              thash_data_t *vhpt)
  36.216  {
  36.217 -    u64 pages,mfn;
  36.218 -    ia64_rr vrr;
  36.219 -
  36.220 +    u64 padr,pte;
  36.221 +//    ia64_rr vrr;
  36.222      ASSERT ( hcb->ht == THASH_VHPT );
  36.223 -    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
  36.224 -    pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
  36.225 -    mfn = (hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages);
  36.226 -    if ( mfn == INVALID_MFN ) return 0;
  36.227 -
  36.228 +//    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
  36.229 +    padr = tlb->ppn >>(tlb->ps-ARCH_PAGE_SHIFT)<<tlb->ps;
  36.230 +    padr += va&((1UL<<tlb->ps)-1);
  36.231 +    pte=lookup_domain_mpa(current->domain,padr);
  36.232 +    if((pte>>56))
  36.233 +        return 0;
  36.234      // TODO with machine discontinuous address space issue.
  36.235 -    vhpt->etag = (hcb->vs->tag_func)( hcb->pta, tlb->vadr);
  36.236 +    vhpt->etag = ia64_ttag(va);
  36.237      //vhpt->ti = 0;
  36.238      vhpt->itir = tlb->itir & ~ITIR_RV_MASK;
  36.239      vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
  36.240 -    vhpt->ppn = mfn;
  36.241 +    vhpt->ps = PAGE_SHIFT;
  36.242 +    vhpt->ppn = (pte&((1UL<<IA64_MAX_PHYS_BITS)-(1UL<<PAGE_SHIFT)))>>ARCH_PAGE_SHIFT;
  36.243      vhpt->next = 0;
  36.244      return 1;
  36.245  }
  36.246  
  36.247 +static void thash_remove_cch(thash_cb_t *hcb, thash_data_t *hash)
  36.248 +{
  36.249 +    thash_data_t *prev, *next;
  36.250 +    prev = hash; next= hash->next;
  36.251 +    while(next){
  36.252 +    	prev=next;
  36.253 +    	next=prev->next;
  36.254 +    	cch_free(hcb, prev);
  36.255 +    }
  36.256 +    hash->next = NULL;
  36.257 +    hash->len = 0;
  36.258 +}
  36.259  
  36.260 +/*  vhpt only has entries with PAGE_SIZE page size */
  36.261 +
  36.262 +void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
  36.263 +{
  36.264 +    thash_data_t   vhpt_entry, *hash_table, *cch;
  36.265 +//    ia64_rr vrr;
  36.266 +
  36.267 +    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
  36.268 +        return;
  36.269 +    //panic("Can't convert to machine VHPT entry\n");
  36.270 +    }
  36.271 +
  36.272 +    hash_table = (thash_data_t *)ia64_thash(va);
  36.273 +    if( INVALID_VHPT(hash_table) ) {
  36.274 +        *hash_table = vhpt_entry;
  36.275 +        hash_table->next = 0;
  36.276 +	return;
  36.277 +    }
  36.278 +
  36.279 +    cch = hash_table;
  36.280 +    while(cch){
  36.281 +        if(cch->etag == vhpt_entry.etag){
  36.282 +            if(cch->ppn == vhpt_entry.ppn)
  36.283 +                return;
  36.284 +            else
  36.285 +                while(1);
  36.286 +        }
  36.287 +        cch = cch->next;
  36.288 +    }
  36.289 +    if(hash_table->len>=MAX_CCN_DEPTH){
  36.290 +    	thash_remove_cch(hcb, hash_table);
  36.291 +    	cch = cch_alloc(hcb);
  36.292 +    	*cch = *hash_table;
  36.293 +        *hash_table = vhpt_entry;
  36.294 +    	hash_table->len = 1;
  36.295 +        hash_table->next = cch;
  36.296 +    	return;
  36.297 +    }
  36.298 +	
  36.299 +    // TODO: Add collision chain length limitation.
  36.300 +     cch = __alloc_chain(hcb,entry);
  36.301 +     if(cch == NULL){
  36.302 +           *hash_table = vhpt_entry;
  36.303 +            hash_table->next = 0;
  36.304 +     }else{
  36.305 +            *cch = *hash_table;
  36.306 +            *hash_table = vhpt_entry;
  36.307 +            hash_table->next = cch;
  36.308 +	    hash_table->len = cch->len + 1;
  36.309 +	    cch->len = 0;	
  36.310 +//            if(hash_table->tag==hash_table->next->tag)
  36.311 +//                while(1);
  36.312 +
  36.313 +    }
  36.314 +    return /*hash_table*/;
  36.315 +}
  36.316 +
  36.317 +/*
  36.318 + *   vhpt lookup
  36.319 + */
  36.320 +
  36.321 +thash_data_t * vhpt_lookup(u64 va)
  36.322 +{
  36.323 +    thash_data_t *hash;
  36.324 +    u64 tag;
  36.325 +    hash = (thash_data_t *)ia64_thash(va);
  36.326 +    tag = ia64_ttag(va);
  36.327 +    while(hash){
  36.328 +    	if(hash->etag == tag)
  36.329 +	        return hash;
  36.330 +        hash=hash->next;
  36.331 +    }
  36.332 +    return NULL;
  36.333 +}
  36.334 +
  36.335 +
  36.336 +/*
  36.337 + *  purge software guest tlb
  36.338 + */
  36.339 +
  36.340 +static void vtlb_purge(thash_cb_t *hcb, u64 va, u64 ps)
  36.341 +{
  36.342 +    thash_data_t *hash_table, *prev, *next;
  36.343 +    u64 start, end, size, tag, rid;
  36.344 +    ia64_rr vrr;
  36.345 +    vrr=vmx_vcpu_rr(current, va);
  36.346 +    rid = vrr.rid;
  36.347 +    size = PSIZE(ps);
  36.348 +    start = va & (-size);
  36.349 +    end = start + size;
  36.350 +    while(start < end){
  36.351 +        hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
  36.352 +//	    tag = ia64_ttag(start);
  36.353 +        if(!INVALID_TLB(hash_table)){
  36.354 +    	if(hash_table->etag == tag){
  36.355 +            __rem_hash_head(hcb, hash_table);
  36.356 +    	}
  36.357 +	    else{
  36.358 +    	    prev=hash_table;
  36.359 +	        next=prev->next;
  36.360 +	        while(next){
  36.361 +        		if(next->etag == tag){
  36.362 +	        	    prev->next=next->next;
  36.363 +		            cch_free(hcb,next);
  36.364 +		            hash_table->len--;
  36.365 +        		    break;
  36.366 +	        	}
  36.367 +		        prev=next;
  36.368 +    		    next=next->next;
  36.369 +    	    }
  36.370 +    	}
  36.371 +        }
  36.372 +	    start += PAGE_SIZE;
  36.373 +    }
  36.374 +//    machine_tlb_purge(va, ps);
  36.375 +}
  36.376 +/*
  36.377 + *  purge VHPT and machine TLB
  36.378 + */
  36.379 +
  36.380 +static void vhpt_purge(thash_cb_t *hcb, u64 va, u64 ps)
  36.381 +{
  36.382 +    thash_data_t *hash_table, *prev, *next;
  36.383 +    u64 start, end, size, tag;
  36.384 +    size = PSIZE(ps);
  36.385 +    start = va & (-size);
  36.386 +    end = start + size;
  36.387 +    while(start < end){
  36.388 +    	hash_table = (thash_data_t *)ia64_thash(start);
  36.389 +	    tag = ia64_ttag(start);
  36.390 +    	if(hash_table->etag == tag ){
  36.391 +            __rem_hash_head(hcb, hash_table);
  36.392 +    	}
  36.393 +	    else{
  36.394 +    	    prev=hash_table;
  36.395 +	        next=prev->next;
  36.396 +	        while(next){
  36.397 +        		if(next->etag == tag){
  36.398 +	        	    prev->next=next->next;
  36.399 +		            cch_free(hcb,next);
  36.400 +		            hash_table->len--;
  36.401 +        		    break;
  36.402 +	        	}
  36.403 +		        prev=next;
  36.404 +    		    next=next->next;
  36.405 +    	    }
  36.406 +    	}
  36.407 +	    start += PAGE_SIZE;
  36.408 +    }
  36.409 +    machine_tlb_purge(va, ps);
  36.410 +}
  36.411  /*
  36.412   * Insert an entry to hash table. 
  36.413   *    NOTES:
  36.414 @@ -327,61 +506,96 @@ void thash_tr_insert(thash_cb_t *hcb, th
  36.415      entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
  36.416      entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
  36.417      rep_tr(hcb, entry, idx);
  36.418 +//    thash_vhpt_insert(hcb->ts->vhpt, entry, va);
  36.419      return ;
  36.420  }
  36.421 +
  36.422 +
  36.423 +/*
  36.424 + * Recycle all collisions chain in VTLB or VHPT.
  36.425 + *
  36.426 + */
  36.427 +
  36.428 +void thash_recycle_cch(thash_cb_t *hcb)
  36.429 +{
  36.430 +    thash_data_t    *hash_table;
  36.431 +
  36.432 +    hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
  36.433 +    for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
  36.434 +        thash_remove_cch(hcb,hash_table);
  36.435 +    }
  36.436 +}
  36.437 +/*
  36.438  thash_data_t *vtlb_alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
  36.439  {
  36.440      thash_data_t *cch;
  36.441 -    
  36.442 +
  36.443      cch = cch_alloc(hcb);
  36.444      if(cch == NULL){
  36.445 -        thash_purge_all(hcb);
  36.446 +        thash_recycle_cch(hcb);
  36.447 +        cch = cch_alloc(hcb);
  36.448      }
  36.449      return cch;
  36.450  }
  36.451 - 
  36.452 +*/
  36.453  
  36.454  thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
  36.455  {
  36.456      thash_data_t *cch;
  36.457 -    
  36.458 +
  36.459      cch = cch_alloc(hcb);
  36.460      if(cch == NULL){
  36.461          // recycle
  36.462 -        if ( hcb->recycle_notifier ) {
  36.463 -                hcb->recycle_notifier(hcb,(u64)entry);
  36.464 -        }
  36.465 -        thash_purge_all(hcb);
  36.466 -//        cch = cch_alloc(hcb);
  36.467 +//        if ( hcb->recycle_notifier ) {
  36.468 +//                hcb->recycle_notifier(hcb,(u64)entry);
  36.469 +//        }
  36.470 +        thash_recycle_cch(hcb);
  36.471 +        cch = cch_alloc(hcb);
  36.472      }
  36.473      return cch;
  36.474  }
  36.475 - 
  36.476 +
  36.477  /*
  36.478   * Insert an entry into hash TLB or VHPT.
  36.479   * NOTES:
  36.480   *  1: When inserting VHPT to thash, "va" is a must covered
  36.481   *  address by the inserted machine VHPT entry.
  36.482   *  2: The format of entry is always in TLB.
  36.483 - *  3: The caller need to make sure the new entry will not overlap 
  36.484 + *  3: The caller need to make sure the new entry will not overlap
  36.485   *     with any existed entry.
  36.486   */
  36.487  void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
  36.488  {
  36.489      thash_data_t    *hash_table, *cch;
  36.490 -    int flag;
  36.491 +    /* int flag; */
  36.492      ia64_rr vrr;
  36.493 -    u64 gppn;
  36.494 -    u64 ppns, ppne;
  36.495 -
  36.496 -    hash_table = (hcb->hash_func)(hcb->pta, va);
  36.497 -    if( INVALID_ENTRY(hcb, hash_table) ) {
  36.498 +    /* u64 gppn, ppns, ppne; */
  36.499 +    u64 tag;
  36.500 +    vrr=vmx_vcpu_rr(current, va);
  36.501 +    if (vrr.ps != entry->ps) {
  36.502 +//        machine_tlb_insert(hcb->vcpu, entry);
  36.503 +    	panic("not preferred ps with va: 0x%lx\n", va);
  36.504 +    	return;
  36.505 +    }
  36.506 +    entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
  36.507 +    entry->ppn = PAGEALIGN(entry->ppn, entry->ps-12);
  36.508 +    hash_table = vsa_thash(hcb->pta, va, vrr.rrval, &tag);
  36.509 +    entry->etag = tag;
  36.510 +    if( INVALID_TLB(hash_table) ) {
  36.511          *hash_table = *entry;
  36.512          hash_table->next = 0;
  36.513      }
  36.514 +    else if (hash_table->len>=MAX_CCN_DEPTH){
  36.515 +        thash_remove_cch(hcb, hash_table);
  36.516 +        cch = cch_alloc(hcb);
  36.517 +        *cch = *hash_table;
  36.518 +        *hash_table = *entry;
  36.519 +        hash_table->len = 1;
  36.520 +        hash_table->next = cch;
  36.521 +    }
  36.522      else {
  36.523          // TODO: Add collision chain length limitation.
  36.524 -        cch = vtlb_alloc_chain(hcb,entry);
  36.525 +        cch = __alloc_chain(hcb,entry);
  36.526          if(cch == NULL){
  36.527              *hash_table = *entry;
  36.528              hash_table->next = 0;
  36.529 @@ -389,22 +603,17 @@ void vtlb_insert(thash_cb_t *hcb, thash_
  36.530              *cch = *hash_table;
  36.531              *hash_table = *entry;
  36.532              hash_table->next = cch;
  36.533 +            hash_table->len = cch->len + 1;
  36.534 +            cch->len = 0;
  36.535          }
  36.536      }
  36.537 +#if 0
  36.538      if(hcb->vcpu->domain->domain_id==0){
  36.539         thash_insert(hcb->ts->vhpt, entry, va);
  36.540          return;
  36.541      }
  36.542 -
  36.543 -#if 1
  36.544 -    vrr=vmx_vcpu_rr(current, va);
  36.545 -    if (vrr.ps != entry->ps) {
  36.546 -        machine_tlb_insert(hcb->vcpu, entry);
  36.547 -	printk("not preferred ps with va: 0x%lx\n", va);
  36.548 -	return;
  36.549 -    }
  36.550 -#endif 
  36.551 -
  36.552 +#endif
  36.553 +/*
  36.554      flag = 1;
  36.555      gppn = (POFFSET(va,entry->ps)|PAGEALIGN((entry->ppn<<12),entry->ps))>>PAGE_SHIFT;
  36.556      ppns = PAGEALIGN((entry->ppn<<12),entry->ps);
  36.557 @@ -413,46 +622,18 @@ void vtlb_insert(thash_cb_t *hcb, thash_
  36.558          flag = 0;
  36.559      if((__gpfn_is_mem(hcb->vcpu->domain, gppn)&&flag))
  36.560         thash_insert(hcb->ts->vhpt, entry, va);
  36.561 +*/
  36.562      return ;
  36.563  }
  36.564  
  36.565 -static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
  36.566 -{
  36.567 -    thash_data_t   vhpt_entry, *hash_table, *cch;
  36.568 -    ia64_rr vrr;
  36.569 -    if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
  36.570 -        panic("Can't convert to machine VHPT entry\n");
  36.571 -    }
  36.572 -    hash_table = (hcb->hash_func)(hcb->pta, va);
  36.573 -    if( INVALID_ENTRY(hcb, hash_table) ) {
  36.574 -        *hash_table = vhpt_entry;
  36.575 -        hash_table->next = 0;
  36.576 -    }
  36.577 -    else {
  36.578 -        // TODO: Add collision chain length limitation.
  36.579 -        cch = __alloc_chain(hcb,entry);
  36.580 -        if(cch == NULL){
  36.581 -            *hash_table = vhpt_entry;
  36.582 -            hash_table->next = 0;
  36.583 -        }else{
  36.584 -            *cch = *hash_table;
  36.585 -            *hash_table = vhpt_entry;
  36.586 -            hash_table->next = cch;
  36.587 -            if(hash_table->tag==hash_table->next->tag)
  36.588 -                while(1);
  36.589  
  36.590 -        }
  36.591 -
  36.592 -    }
  36.593 -    return /*hash_table*/;
  36.594 -}
  36.595 -
  36.596 +/*
  36.597  void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
  36.598  {
  36.599      thash_data_t    *hash_table;
  36.600      ia64_rr vrr;
  36.601      
  36.602 -    vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
  36.603 +    vrr = vmx_vcpu_rr(hcb->vcpu,entry->vadr);
  36.604      if ( entry->ps != vrr.ps && entry->tc ) {
  36.605          panic("Not support for multiple page size now\n");
  36.606      }
  36.607 @@ -461,7 +642,8 @@ void thash_insert(thash_cb_t *hcb, thash
  36.608      (hcb->ins_hash)(hcb, entry, va);
  36.609      
  36.610  }
  36.611 -
  36.612 +*/
  36.613 +/*
  36.614  static void rem_thash(thash_cb_t *hcb, thash_data_t *entry)
  36.615  {
  36.616      thash_data_t    *hash_table, *p, *q;
  36.617 @@ -482,6 +664,7 @@ static void rem_thash(thash_cb_t *hcb, t
  36.618  //            if ( PURGABLE_ENTRY(hcb,q ) ) {
  36.619                  p->next = q->next;
  36.620                  __rem_chain(hcb, entry);
  36.621 +                hash_table->len--;
  36.622  //            }
  36.623              return ;
  36.624          }
  36.625 @@ -489,7 +672,8 @@ static void rem_thash(thash_cb_t *hcb, t
  36.626      }
  36.627      panic("Entry not existed or bad sequence\n");
  36.628  }
  36.629 -
  36.630 +*/
  36.631 +/*
  36.632  static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
  36.633  {
  36.634      thash_data_t    *hash_table, *p, *q;
  36.635 @@ -501,7 +685,7 @@ static void rem_vtlb(thash_cb_t *hcb, th
  36.636      }
  36.637      rem_thash(hcb, entry);
  36.638  }    
  36.639 -
  36.640 +*/
  36.641  int   cch_depth=0;
  36.642  /*
  36.643   * Purge the collision chain starting from cch.
  36.644 @@ -509,6 +693,7 @@ int   cch_depth=0;
  36.645   *     For those UN-Purgable entries(FM), this function will return
  36.646   * the head of left collision chain.
  36.647   */
  36.648 +/*
  36.649  static thash_data_t *thash_rem_cch(thash_cb_t *hcb, thash_data_t *cch)
  36.650  {
  36.651      thash_data_t *next;
  36.652 @@ -532,6 +717,7 @@ static thash_data_t *thash_rem_cch(thash
  36.653          return cch;
  36.654      }
  36.655  }
  36.656 + */
  36.657  
  36.658  /*
  36.659   * Purge one hash line (include the entry in hash table).
  36.660 @@ -540,10 +726,11 @@ static thash_data_t *thash_rem_cch(thash
  36.661   *  hash: The head of collision chain (hash table)
  36.662   *
  36.663   */
  36.664 +/*
  36.665  static void thash_rem_line(thash_cb_t *hcb, thash_data_t *hash)
  36.666  {
  36.667      if ( INVALID_ENTRY(hcb, hash) ) return;
  36.668 -    
  36.669 +
  36.670      if ( hash->next ) {
  36.671          cch_depth = 0;
  36.672          hash->next = thash_rem_cch(hcb, hash->next);
  36.673 @@ -553,7 +740,7 @@ static void thash_rem_line(thash_cb_t *h
  36.674          __rem_hash_head(hcb, hash);
  36.675      }
  36.676  }
  36.677 -
  36.678 + */
  36.679  
  36.680  /*
  36.681   * Find an overlap entry in hash table and its collision chain.
  36.682 @@ -568,14 +755,18 @@ static void thash_rem_line(thash_cb_t *h
  36.683   *    NOTES:
  36.684   *
  36.685   */
  36.686 -thash_data_t *thash_find_overlap(thash_cb_t *hcb, 
  36.687 +
  36.688 +/*
  36.689 +thash_data_t *thash_find_overlap(thash_cb_t *hcb,
  36.690              thash_data_t *in, search_section_t s_sect)
  36.691  {
  36.692 -    return (hcb->find_overlap)(hcb, in->vadr, 
  36.693 +    return (hcb->find_overlap)(hcb, in->vadr,
  36.694              PSIZE(in->ps), in->rid, in->cl, s_sect);
  36.695  }
  36.696 +*/
  36.697  
  36.698 -static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb, 
  36.699 +/*
  36.700 +static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
  36.701          u64 va, u64 size, int rid, char cl, search_section_t s_sect)
  36.702  {
  36.703      thash_data_t    *hash_table;
  36.704 @@ -586,9 +777,9 @@ static thash_data_t *vtlb_find_overlap(t
  36.705      priv->_curva = va & ~(size-1);
  36.706      priv->_eva = priv->_curva + size;
  36.707      priv->rid = rid;
  36.708 -    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
  36.709 +    vrr = vmx_vcpu_rr(hcb->vcpu,va);
  36.710      priv->ps = vrr.ps;
  36.711 -    hash_table = (hcb->hash_func)(hcb->pta, priv->_curva);
  36.712 +    hash_table = vsa_thash(hcb->pta, priv->_curva, vrr.rrval, &tag);
  36.713      priv->s_sect = s_sect;
  36.714      priv->cl = cl;
  36.715      priv->_tr_idx = 0;
  36.716 @@ -596,8 +787,10 @@ static thash_data_t *vtlb_find_overlap(t
  36.717      priv->cur_cch = hash_table;
  36.718      return (hcb->next_overlap)(hcb);
  36.719  }
  36.720 +*/
  36.721  
  36.722 -static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb, 
  36.723 +/*
  36.724 +static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb,
  36.725          u64 va, u64 size, int rid, char cl, search_section_t s_sect)
  36.726  {
  36.727      thash_data_t    *hash_table;
  36.728 @@ -608,17 +801,43 @@ static thash_data_t *vhpt_find_overlap(t
  36.729      priv->_curva = va & ~(size-1);
  36.730      priv->_eva = priv->_curva + size;
  36.731      priv->rid = rid;
  36.732 -    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
  36.733 +    vrr = vmx_vcpu_rr(hcb->vcpu,va);
  36.734      priv->ps = vrr.ps;
  36.735 -    hash_table = (hcb->hash_func)( hcb->pta, priv->_curva);
  36.736 -    tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva);
  36.737 +    hash_table = ia64_thash(priv->_curva);
  36.738 +    tag = ia64_ttag(priv->_curva);
  36.739      priv->tag = tag;
  36.740      priv->hash_base = hash_table;
  36.741      priv->cur_cch = hash_table;
  36.742      return (hcb->next_overlap)(hcb);
  36.743  }
  36.744 +*/
  36.745  
  36.746  
  36.747 +thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, char cl)
  36.748 +{
  36.749 +    thash_data_t    *tr;
  36.750 +    int  i,num;
  36.751 +    u64 end;
  36.752 +
  36.753 +    if (cl == ISIDE_TLB ) {
  36.754 +        num = NITRS;
  36.755 +        tr = &ITR(hcb,0);
  36.756 +    }
  36.757 +    else {
  36.758 +        num = NDTRS;
  36.759 +        tr = &DTR(hcb,0);
  36.760 +    }
  36.761 +    end=data->vadr + PSIZE(data->ps);
  36.762 +    for (i=0; i<num; i++ ) {
  36.763 +        if ( __is_tr_overlap(hcb, &tr[i], data->rid, cl, data->vadr, end )) {
  36.764 +            return &tr[i];
  36.765 +        }
  36.766 +    }
  36.767 +    return NULL;
  36.768 +}
  36.769 +
  36.770 +
  36.771 +/*
  36.772  static thash_data_t *vtr_find_next_overlap(thash_cb_t *hcb)
  36.773  {
  36.774      thash_data_t    *tr;
  36.775 @@ -634,7 +853,7 @@ static thash_data_t *vtr_find_next_overl
  36.776          tr = &DTR(hcb,0);
  36.777      }
  36.778      for (; priv->_tr_idx < num; priv->_tr_idx ++ ) {
  36.779 -        if ( __is_tlb_overlap(hcb, &tr[priv->_tr_idx],
  36.780 +        if ( __is_tr_overlap(hcb, &tr[priv->_tr_idx],
  36.781                  priv->rid, priv->cl,
  36.782                  priv->_curva, priv->_eva) ) {
  36.783              return &tr[priv->_tr_idx++];
  36.784 @@ -642,17 +861,19 @@ static thash_data_t *vtr_find_next_overl
  36.785      }
  36.786      return NULL;
  36.787  }
  36.788 +*/
  36.789  
  36.790  /*
  36.791   * Similar with vtlb_next_overlap but find next entry.
  36.792   *    NOTES:
  36.793   *  Intermediate position information is stored in hcb->priv.
  36.794   */
  36.795 +/*
  36.796  static thash_data_t *vtlb_next_overlap(thash_cb_t *hcb)
  36.797  {
  36.798      thash_data_t    *ovl;
  36.799      thash_internal_t *priv = &hcb->priv;
  36.800 -    u64 addr,rr_psize;
  36.801 +    u64 addr,rr_psize,tag;
  36.802      ia64_rr vrr;
  36.803  
  36.804      if ( priv->s_sect.tr ) {
  36.805 @@ -661,7 +882,7 @@ static thash_data_t *vtlb_next_overlap(t
  36.806          priv->s_sect.tr = 0;
  36.807      }
  36.808      if ( priv->s_sect.v == 0 ) return NULL;
  36.809 -    vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
  36.810 +    vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva);
  36.811      rr_psize = PSIZE(vrr.ps);
  36.812  
  36.813      while ( priv->_curva < priv->_eva ) {
  36.814 @@ -673,12 +894,15 @@ static thash_data_t *vtlb_next_overlap(t
  36.815              }
  36.816          }
  36.817          priv->_curva += rr_psize;
  36.818 -        priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva);
  36.819 +        priv->hash_base = vsa_thash( hcb->pta, priv->_curva, vrr.rrval, &tag);
  36.820          priv->cur_cch = priv->hash_base;
  36.821      }
  36.822      return NULL;
  36.823  }
  36.824 + */
  36.825  
  36.826 +
  36.827 +/*
  36.828  static thash_data_t *vhpt_next_overlap(thash_cb_t *hcb)
  36.829  {
  36.830      thash_data_t    *ovl;
  36.831 @@ -686,7 +910,7 @@ static thash_data_t *vhpt_next_overlap(t
  36.832      u64 addr,rr_psize;
  36.833      ia64_rr vrr;
  36.834  
  36.835 -    vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
  36.836 +    vrr = vmx_vcpu_rr(hcb->vcpu,priv->_curva);
  36.837      rr_psize = PSIZE(vrr.ps);
  36.838  
  36.839      while ( priv->_curva < priv->_eva ) {
  36.840 @@ -698,13 +922,13 @@ static thash_data_t *vhpt_next_overlap(t
  36.841              }
  36.842          }
  36.843          priv->_curva += rr_psize;
  36.844 -        priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva);
  36.845 -        priv->tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva);
  36.846 +        priv->hash_base = ia64_thash(priv->_curva);
  36.847 +        priv->tag = ia64_ttag(priv->_curva);
  36.848          priv->cur_cch = priv->hash_base;
  36.849      }
  36.850      return NULL;
  36.851  }
  36.852 -
  36.853 +*/
  36.854  
  36.855  /*
  36.856   * Find and purge overlap entries in hash table and its collision chain.
  36.857 @@ -716,7 +940,7 @@ static thash_data_t *vhpt_next_overlap(t
  36.858   *    NOTES:
  36.859   *
  36.860   */
  36.861 -void thash_purge_entries(thash_cb_t *hcb, 
  36.862 +void thash_purge_entries(thash_cb_t *hcb,
  36.863              thash_data_t *in, search_section_t p_sect)
  36.864  {
  36.865      return thash_purge_entries_ex(hcb, in->rid, in->vadr,
  36.866 @@ -724,10 +948,11 @@ void thash_purge_entries(thash_cb_t *hcb
  36.867  }
  36.868  
  36.869  void thash_purge_entries_ex(thash_cb_t *hcb,
  36.870 -            u64 rid, u64 va, u64 ps, 
  36.871 -            search_section_t p_sect, 
  36.872 +            u64 rid, u64 va, u64 ps,
  36.873 +            search_section_t p_sect,
  36.874              CACHE_LINE_TYPE cl)
  36.875  {
  36.876 +/*
  36.877      thash_data_t    *ovl;
  36.878  
  36.879      ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
  36.880 @@ -735,19 +960,22 @@ void thash_purge_entries_ex(thash_cb_t *
  36.881          (hcb->rem_hash)(hcb, ovl);
  36.882          ovl = (hcb->next_overlap)(hcb);
  36.883      };
  36.884 + */
  36.885 +    vtlb_purge(hcb, va, ps);
  36.886 +    vhpt_purge(hcb->ts->vhpt, va, ps);
  36.887  }
  36.888  
  36.889  /*
  36.890   * Purge overlap TCs and then insert the new entry to emulate itc ops.
  36.891   *    Notes: Only TC entry can purge and insert.
  36.892   */
  36.893 -void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in)
  36.894 +void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va)
  36.895  {
  36.896 -    thash_data_t    *ovl;
  36.897 +    /* thash_data_t    *ovl; */
  36.898      search_section_t sections;
  36.899  
  36.900  #ifdef   XEN_DEBUGGER
  36.901 -    vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr);
  36.902 +    vrr = vmx_vcpu_rr(hcb->vcpu,in->vadr);
  36.903  	if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
  36.904  		panic ("Oops, wrong call for purge_and_insert\n");
  36.905  		return;
  36.906 @@ -757,10 +985,14 @@ void thash_purge_and_insert(thash_cb_t *
  36.907      in->ppn = PAGEALIGN(in->ppn, in->ps-12);
  36.908      sections.tr = 0;
  36.909      sections.tc = 1;
  36.910 +/*
  36.911      ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
  36.912      				 in->rid, in->cl, sections);
  36.913      if(ovl)
  36.914          (hcb->rem_hash)(hcb, ovl);
  36.915 + */
  36.916 +    vtlb_purge(hcb, va, in->ps);
  36.917 +    vhpt_purge(hcb->ts->vhpt, va, in->ps);
  36.918  #ifdef   XEN_DEBUGGER
  36.919      ovl = (hcb->next_overlap)(hcb);
  36.920      if ( ovl ) {
  36.921 @@ -768,7 +1000,9 @@ void thash_purge_and_insert(thash_cb_t *
  36.922  		return;
  36.923      }
  36.924  #endif
  36.925 -    (hcb->ins_hash)(hcb, in, in->vadr);
  36.926 +    if(in->ps!=PAGE_SHIFT)
  36.927 +        vtlb_insert(hcb, in, va);
  36.928 +    thash_vhpt_insert(hcb->ts->vhpt, in, va);
  36.929  }
  36.930  /*
  36.931   * Purge one hash line (include the entry in hash table).
  36.932 @@ -777,6 +1011,7 @@ void thash_purge_and_insert(thash_cb_t *
  36.933   *  hash: The head of collision chain (hash table)
  36.934   *
  36.935   */
  36.936 +/*
  36.937  static void thash_purge_line(thash_cb_t *hcb, thash_data_t *hash)
  36.938  {
  36.939      if ( INVALID_ENTRY(hcb, hash) ) return;
  36.940 @@ -790,6 +1025,16 @@ static void thash_purge_line(thash_cb_t 
  36.941      // Then hash table itself.
  36.942      INVALIDATE_HASH(hcb, hash);
  36.943  }
  36.944 +*/
  36.945 +
  36.946 +
  36.947 +
  36.948 +
  36.949 +
  36.950 +
  36.951 +
  36.952 +
  36.953 +
  36.954  /*
  36.955   * Purge all TCs or VHPT entries including those in Hash table.
  36.956   *
  36.957 @@ -799,7 +1044,10 @@ static void thash_purge_line(thash_cb_t 
  36.958  void thash_purge_all(thash_cb_t *hcb)
  36.959  {
  36.960      thash_data_t    *hash_table;
  36.961 -    
  36.962 +    /* thash_data_t    *entry; */
  36.963 +    thash_cb_t  *vhpt;
  36.964 +    /* u64 i, start, end; */
  36.965 +
  36.966  #ifdef  VTLB_DEBUG
  36.967  	extern u64  sanity_check;
  36.968      static u64 statistics_before_purge_all=0;
  36.969 @@ -808,18 +1056,35 @@ void thash_purge_all(thash_cb_t *hcb)
  36.970          check_vtlb_sanity(hcb);
  36.971      }
  36.972  #endif
  36.973 +    ASSERT ( hcb->ht == THASH_TLB );
  36.974  
  36.975      hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
  36.976      for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
  36.977 -        thash_purge_line(hcb, hash_table);
  36.978 +        INVALIDATE_TLB_HEADER(hash_table);
  36.979 +    }
  36.980 +    cch_mem_init (hcb);
  36.981 +
  36.982 +    vhpt = hcb->ts->vhpt;
  36.983 +    hash_table = (thash_data_t*)((u64)vhpt->hash + vhpt->hash_sz);
  36.984 +    for (--hash_table;(u64)hash_table >= (u64)vhpt->hash;hash_table--) {
  36.985 +        INVALIDATE_VHPT_HEADER(hash_table);
  36.986      }
  36.987 -    if(hcb->ht== THASH_TLB) {
  36.988 -        hcb = hcb->ts->vhpt;
  36.989 -        hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
  36.990 -        for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
  36.991 -            thash_purge_line(hcb, hash_table);
  36.992 +    cch_mem_init (vhpt);
  36.993 +    
  36.994 +/*
  36.995 +    entry = &hcb->ts->itr[0];
  36.996 +    for(i=0; i< (NITRS+NDTRS); i++){
  36.997 +        if(!INVALID_TLB(entry)){
  36.998 +            start=entry->vadr & (-PSIZE(entry->ps));
  36.999 +            end = start + PSIZE(entry->ps);
 36.1000 +            while(start<end){
 36.1001 +                thash_vhpt_insert(vhpt, entry, start);
 36.1002 +                start += PAGE_SIZE;
 36.1003 +            }
 36.1004          }
 36.1005 +        entry++;
 36.1006      }
 36.1007 +*/
 36.1008      local_flush_tlb_all();
 36.1009  }
 36.1010  
 36.1011 @@ -845,20 +1110,21 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t 
 36.1012      u64     tag;
 36.1013      ia64_rr vrr;
 36.1014     
 36.1015 -    ASSERT ( hcb->ht == THASH_VTLB );
 36.1016 +    ASSERT ( hcb->ht == THASH_TLB );
 36.1017      
 36.1018      cch = __vtr_lookup(hcb, rid, va, cl);;
 36.1019      if ( cch ) return cch;
 36.1020  
 36.1021 -    vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
 36.1022 -    hash_table = (hcb->hash_func)( hcb->pta, va);
 36.1023 +    vrr = vmx_vcpu_rr(hcb->vcpu,va);
 36.1024 +    hash_table = vsa_thash( hcb->pta, va, vrr.rrval, &tag);
 36.1025  
 36.1026      if ( INVALID_ENTRY(hcb, hash_table ) )
 36.1027          return NULL;
 36.1028  
 36.1029          
 36.1030      for (cch=hash_table; cch; cch = cch->next) {
 36.1031 -        if ( __is_translated(cch, rid, va, cl) )
 36.1032 +//        if ( __is_translated(cch, rid, va, cl) )
 36.1033 +        if(cch->etag == tag)
 36.1034              return cch;
 36.1035      }
 36.1036      return NULL;
 36.1037 @@ -871,6 +1137,7 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t 
 36.1038   *          1: failure
 36.1039   *          0: success
 36.1040   */
 36.1041 +/*
 36.1042  int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock)
 36.1043  {
 36.1044  	thash_data_t	*ovl;
 36.1045 @@ -900,6 +1167,7 @@ int thash_lock_tc(thash_cb_t *hcb, u64 v
 36.1046  	}
 36.1047  	return 1;
 36.1048  }
 36.1049 +*/
 36.1050  
 36.1051  /*
 36.1052   * Notifier when TLB is deleted from hash table and its collision chain.
 36.1053 @@ -911,15 +1179,17 @@ int thash_lock_tc(thash_cb_t *hcb, u64 v
 36.1054   *  2: The format of entry is always in TLB.
 36.1055   *
 36.1056   */
 36.1057 -void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
 36.1058 -{
 36.1059 -    thash_cb_t  *vhpt;
 36.1060 -    search_section_t    s_sect;
 36.1061 +//void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
 36.1062 +//{
 36.1063 +//    vhpt_purge(hcb->ts->vhpt,entry->vadr,entry->ps);
 36.1064 +//    thash_cb_t  *vhpt;
 36.1065      
 36.1066 -    s_sect.v = 0;
 36.1067 -    thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
 36.1068 -    machine_tlb_purge(entry->vadr, entry->ps);
 36.1069 -}
 36.1070 +//    search_section_t    s_sect;
 36.1071 +    
 36.1072 +//    s_sect.v = 0;
 36.1073 +//    thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
 36.1074 +//    machine_tlb_purge(entry->vadr, entry->ps);
 36.1075 +//}
 36.1076  
 36.1077  /*
 36.1078   * Initialize internal control data before service.
 36.1079 @@ -930,34 +1200,33 @@ void thash_init(thash_cb_t *hcb, u64 sz)
 36.1080  
 36.1081      cch_mem_init (hcb);
 36.1082      hcb->magic = THASH_CB_MAGIC;
 36.1083 -    hcb->pta.val = hcb->hash;
 36.1084 +    hcb->pta.val = (unsigned long)hcb->hash;
 36.1085      hcb->pta.vf = 1;
 36.1086      hcb->pta.ve = 1;
 36.1087      hcb->pta.size = sz;
 36.1088 -    hcb->get_rr_fn = vmmu_get_rr;
 36.1089 +//    hcb->get_rr_fn = vmmu_get_rr;
 36.1090      ASSERT ( hcb->hash_sz % sizeof(thash_data_t) == 0 );
 36.1091      if ( hcb->ht == THASH_TLB ) {
 36.1092 -        hcb->remove_notifier =  tlb_remove_notifier;
 36.1093 -        hcb->find_overlap = vtlb_find_overlap;
 36.1094 -        hcb->next_overlap = vtlb_next_overlap;
 36.1095 -        hcb->rem_hash = rem_vtlb;
 36.1096 -        hcb->ins_hash = vtlb_insert;
 36.1097 +//        hcb->remove_notifier =  NULL;	//tlb_remove_notifier;
 36.1098 +//        hcb->find_overlap = vtlb_find_overlap;
 36.1099 +//        hcb->next_overlap = vtlb_next_overlap;
 36.1100 +//        hcb->rem_hash = rem_vtlb;
 36.1101 +//        hcb->ins_hash = vtlb_insert;
 36.1102          __init_tr(hcb);
 36.1103      }
 36.1104      else {
 36.1105 -        hcb->remove_notifier =  NULL;
 36.1106 -        hcb->find_overlap = vhpt_find_overlap;
 36.1107 -        hcb->next_overlap = vhpt_next_overlap;
 36.1108 -        hcb->rem_hash = rem_thash;
 36.1109 -        hcb->ins_hash = vhpt_insert;
 36.1110 +//        hcb->remove_notifier =  NULL;
 36.1111 +//        hcb->find_overlap = vhpt_find_overlap;
 36.1112 +//        hcb->next_overlap = vhpt_next_overlap;
 36.1113 +//        hcb->rem_hash = rem_thash;
 36.1114 +//        hcb->ins_hash = thash_vhpt_insert;
 36.1115      }
 36.1116      hash_table = (thash_data_t*)((u64)hcb->hash + hcb->hash_sz);
 36.1117 -    
 36.1118 +
 36.1119      for (--hash_table;(u64)hash_table >= (u64)hcb->hash;hash_table--) {
 36.1120 -        INVALIDATE_HASH(hcb,hash_table);
 36.1121 +        INVALIDATE_HASH_HEADER(hcb,hash_table);
 36.1122      }
 36.1123  }
 36.1124 -#define VTLB_DEBUG
 36.1125  #ifdef  VTLB_DEBUG
 36.1126  static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
 36.1127  u64  sanity_check=0;
 36.1128 @@ -967,7 +1236,7 @@ u64 vtlb_chain_sanity(thash_cb_t *vtlb, 
 36.1129      thash_data_t    *ovl;
 36.1130      search_section_t s_sect;
 36.1131      u64     num=0;
 36.1132 -    
 36.1133 +
 36.1134      s_sect.v = 0;
 36.1135      for (cch=hash; cch; cch=cch->next) {
 36.1136          ovl = thash_find_overlap(vhpt, cch, s_sect);
 36.1137 @@ -997,7 +1266,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb)
 36.1138      search_section_t s_sect;
 36.1139      thash_cb_t *vhpt = vtlb->ts->vhpt;
 36.1140      u64   invalid_ratio;
 36.1141 -    
 36.1142 + 
 36.1143      if ( sanity_check == 0 ) return;
 36.1144      sanity_check --;
 36.1145      s_sect.v = 0;
 36.1146 @@ -1010,7 +1279,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb)
 36.1147  //    vb2 = vb1 + vtlb->hash_sz;
 36.1148      hash_num = vhpt->hash_sz / sizeof(thash_data_t);
 36.1149  //    printf("vb2=%lp, size=%lx hash_num=%lx\n", vb2, vhpt->hash_sz, hash_num);
 36.1150 -    printf("vtlb=%lp, hash=%lp size=0x%lx; vhpt=%lp, hash=%lp size=0x%lx\n", 
 36.1151 +    printf("vtlb=%p, hash=%p size=0x%lx; vhpt=%p, hash=%p size=0x%lx\n", 
 36.1152                  vtlb, vtlb->hash,vtlb->hash_sz,
 36.1153                  vhpt, vhpt->hash, vhpt->hash_sz);
 36.1154      //memcpy(vb1, vtlb->hash, vtlb->hash_sz);
 36.1155 @@ -1018,9 +1287,9 @@ void check_vtlb_sanity(thash_cb_t *vtlb)
 36.1156      for ( i=0; i < sizeof(cch_length_statistics)/sizeof(cch_length_statistics[0]); i++ ) {
 36.1157      	cch_length_statistics[i] = 0;
 36.1158      }
 36.1159 -    
 36.1160 +
 36.1161      local_irq_save(psr);
 36.1162 -    
 36.1163 +
 36.1164      hash = vhpt->hash;
 36.1165      for (i=0; i < hash_num; i++) {
 36.1166          if ( !INVALID_ENTRY(vhpt, hash) ) {
 36.1167 @@ -1043,7 +1312,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb)
 36.1168          }
 36.1169          hash ++;
 36.1170      }
 36.1171 -    printf("Done vtlb entry check, hash=%lp\n", hash);
 36.1172 +    printf("Done vtlb entry check, hash=%p\n", hash);
 36.1173      printf("check_ok_num = 0x%lx check_invalid=0x%lx\n", check_ok_num,check_invalid);
 36.1174      invalid_ratio = 1000*check_invalid / hash_num;
 36.1175      printf("%02ld.%01ld%% entries are invalid\n", 
 36.1176 @@ -1072,7 +1341,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb)
 36.1177          if ( !INVALID_ENTRY(vhpt, hash) ) {
 36.1178              for ( cch= hash; cch; cch=cch->next) {
 36.1179                  if ( !cch->checked ) {
 36.1180 -                    printf ("!!!Hash=%lp cch=%lp not within vtlb\n", hash, cch);
 36.1181 +                    printf ("!!!Hash=%p cch=%p not within vtlb\n", hash, cch);
 36.1182                      check_fail_num ++;
 36.1183                  }
 36.1184                  else {
 36.1185 @@ -1103,18 +1372,18 @@ void dump_vtlb(thash_cb_t *vtlb)
 36.1186      static u64  dump_vtlb=0;
 36.1187      thash_data_t  *hash, *cch, *tr;
 36.1188      u64     hash_num,i;
 36.1189 -    
 36.1190 +
 36.1191      if ( dump_vtlb == 0 ) return;
 36.1192      dump_vtlb --;
 36.1193      hash_num = vtlb->hash_sz / sizeof(thash_data_t);
 36.1194      hash = vtlb->hash;
 36.1195 -    
 36.1196 +
 36.1197      printf("Dump vTC\n");
 36.1198      for ( i = 0; i < hash_num; i++ ) {
 36.1199          if ( !INVALID_ENTRY(vtlb, hash) ) {
 36.1200 -            printf("VTLB at hash=%lp\n", hash);
 36.1201 +            printf("VTLB at hash=%p\n", hash);
 36.1202              for (cch=hash; cch; cch=cch->next) {
 36.1203 -                printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
 36.1204 +                printf("Entry %p va=%lx ps=%d rid=%d\n",
 36.1205                      cch, cch->vadr, cch->ps, cch->rid);
 36.1206              }
 36.1207          }
 36.1208 @@ -1123,13 +1392,13 @@ void dump_vtlb(thash_cb_t *vtlb)
 36.1209      printf("Dump vDTR\n");
 36.1210      for (i=0; i<NDTRS; i++) {
 36.1211          tr = &DTR(vtlb,i);
 36.1212 -        printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
 36.1213 +        printf("Entry %p va=%lx ps=%d rid=%d\n",
 36.1214                      tr, tr->vadr, tr->ps, tr->rid);
 36.1215      }
 36.1216      printf("Dump vITR\n");
 36.1217      for (i=0; i<NITRS; i++) {
 36.1218          tr = &ITR(vtlb,i);
 36.1219 -        printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
 36.1220 +        printf("Entry %p va=%lx ps=%d rid=%d\n",
 36.1221                      tr, tr->vadr, tr->ps, tr->rid);
 36.1222      }
 36.1223      printf("End of vTLB dump\n");
    37.1 --- a/xen/arch/ia64/xen/acpi.c	Thu Mar 02 10:59:34 2006 +0100
    37.2 +++ b/xen/arch/ia64/xen/acpi.c	Thu Mar 02 11:00:49 2006 +0100
    37.3 @@ -178,7 +178,7 @@ acpi_parse_lapic_addr_ovr (
    37.4  
    37.5  	if (lapic->address) {
    37.6  		iounmap((void *) ipi_base_addr);
    37.7 -		ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
    37.8 +		ipi_base_addr = (void __iomem *) ioremap(lapic->address, 0);
    37.9  	}
   37.10  	return 0;
   37.11  }
   37.12 @@ -265,7 +265,9 @@ acpi_parse_plat_int_src (
   37.13  	acpi_table_entry_header *header, const unsigned long end)
   37.14  {
   37.15  	struct acpi_table_plat_int_src *plintsrc;
   37.16 +#if 0
   37.17  	int vector;
   37.18 +#endif
   37.19  
   37.20  	plintsrc = (struct acpi_table_plat_int_src *) header;
   37.21  
   37.22 @@ -369,9 +371,9 @@ acpi_parse_madt (unsigned long phys_addr
   37.23  	/* Get base address of IPI Message Block */
   37.24  
   37.25  	if (acpi_madt->lapic_address)
   37.26 -		ipi_base_addr = (unsigned long) ioremap(acpi_madt->lapic_address, 0);
   37.27 +		ipi_base_addr = (void __iomem *) ioremap(acpi_madt->lapic_address, 0);
   37.28  
   37.29 -	printk(KERN_INFO PREFIX "Local APIC address 0x%lx\n", ipi_base_addr);
   37.30 +	printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
   37.31  
   37.32  	acpi_madt_oem_check(acpi_madt->header.oem_id,
   37.33  		acpi_madt->header.oem_table_id);
    38.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Thu Mar 02 10:59:34 2006 +0100
    38.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Thu Mar 02 11:00:49 2006 +0100
    38.3 @@ -17,6 +17,7 @@
    38.4  #include <xen/trace.h>
    38.5  #include <xen/console.h>
    38.6  #include <public/sched_ctl.h>
    38.7 +#include <asm/vmx.h>
    38.8  
    38.9  long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
   38.10  {
   38.11 @@ -143,7 +144,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
   38.12              n += j;
   38.13          }
   38.14  
   38.15 -        free_xenheap_page((unsigned long)l_arr);
   38.16 +        free_xenheap_page((void *) l_arr);
   38.17  
   38.18          put_domain(d);
   38.19      }
   38.20 @@ -160,7 +161,6 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
   38.21          unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
   38.22          unsigned long mfn;
   38.23          unsigned long *buffer = op->u.getmemlist.buffer;
   38.24 -        struct page *page;
   38.25  
   38.26          ret = -EINVAL;
   38.27          if ( d != NULL )
    39.1 --- a/xen/arch/ia64/xen/dom_fw.c	Thu Mar 02 10:59:34 2006 +0100
    39.2 +++ b/xen/arch/ia64/xen/dom_fw.c	Thu Mar 02 11:00:49 2006 +0100
    39.3 @@ -39,7 +39,8 @@ unsigned long dom_pa(unsigned long imva)
    39.4  		while(1);
    39.5  	}
    39.6  	if (imva - imva_fw_base > PAGE_SIZE) {
    39.7 -		printf("dom_pa: bad offset! imva=%p, imva_fw_base=%p (spinning...)\n",imva,imva_fw_base);
    39.8 +		printf("dom_pa: bad offset! imva=0x%lx, imva_fw_base=0x%lx (spinning...)\n",
    39.9 +			imva, imva_fw_base);
   39.10  		while(1);
   39.11  	}
   39.12  	return dom_fw_base_mpa + (imva - imva_fw_base);
   39.13 @@ -48,31 +49,29 @@ unsigned long dom_pa(unsigned long imva)
   39.14  // builds a hypercall bundle at domain physical address
   39.15  void dom_efi_hypercall_patch(struct domain *d, unsigned long paddr, unsigned long hypercall)
   39.16  {
   39.17 -	unsigned long imva;
   39.18 +	unsigned long *imva;
   39.19  
   39.20  	if (d == dom0) paddr += dom0_start;
   39.21 -	imva = domain_mpa_to_imva(d,paddr);
   39.22 -	build_hypercall_bundle(imva,d->arch.breakimm,hypercall,1);
   39.23 +	imva = (unsigned long *) domain_mpa_to_imva(d, paddr);
   39.24 +	build_hypercall_bundle(imva, d->arch.breakimm, hypercall, 1);
   39.25  }
   39.26  
   39.27  
   39.28  // builds a hypercall bundle at domain physical address
   39.29  static void dom_fw_hypercall_patch(struct domain *d, unsigned long paddr, unsigned long hypercall,unsigned long ret)
   39.30  {
   39.31 -	unsigned long imva;
   39.32 +	unsigned long *imva;
   39.33  
   39.34 -	imva = domain_mpa_to_imva(d,paddr);
   39.35 -	build_hypercall_bundle(imva,d->arch.breakimm,hypercall,ret);
   39.36 +	imva = (unsigned long *) domain_mpa_to_imva(d, paddr);
   39.37 +	build_hypercall_bundle(imva, d->arch.breakimm, hypercall, ret);
   39.38  }
   39.39  
   39.40  static void dom_fw_pal_hypercall_patch(struct domain *d, unsigned long paddr)
   39.41  {
   39.42  	unsigned long *imva;
   39.43  
   39.44 -	imva = (unsigned long *)domain_mpa_to_imva(d,paddr);
   39.45 -
   39.46 -	build_pal_hypercall_bundles (imva, d->arch.breakimm,
   39.47 -				      FW_HYPERCALL_PAL_CALL);
   39.48 +	imva = (unsigned long *) domain_mpa_to_imva(d, paddr);
   39.49 +	build_pal_hypercall_bundles(imva, d->arch.breakimm, FW_HYPERCALL_PAL_CALL);
   39.50  }
   39.51  
   39.52  
   39.53 @@ -85,16 +84,14 @@ unsigned long dom_fw_setup(struct domain
   39.54  
   39.55  	dom_fw_base_mpa = 0;
   39.56  	if (d == dom0) dom_fw_base_mpa += dom0_start;
   39.57 -	imva_fw_base = domain_mpa_to_imva(d,dom_fw_base_mpa);
   39.58 -	bp = dom_fw_init(d,args,arglen,imva_fw_base,PAGE_SIZE);
   39.59 -	return dom_pa((unsigned long)bp);
   39.60 +	imva_fw_base = domain_mpa_to_imva(d, dom_fw_base_mpa);
   39.61 +	bp = dom_fw_init(d, args, arglen, (char *) imva_fw_base, PAGE_SIZE);
   39.62 +	return dom_pa((unsigned long) bp);
   39.63  }
   39.64  
   39.65  
   39.66  /* the following heavily leveraged from linux/arch/ia64/hp/sim/fw-emu.c */
   39.67  
   39.68 -#define MB	(1024*1024UL)
   39.69 -
   39.70  #define NUM_EFI_SYS_TABLES 6
   39.71  # define NUM_MEM_DESCS	5
   39.72  
   39.73 @@ -256,7 +253,8 @@ sal_emulator (long index, unsigned long 
   39.74  			if (((in1 & ~0xffffffffUL) && (in4 == 0)) ||
   39.75  			    (in4 > 1) ||
   39.76  			    (in2 > 8) || (in2 & (in2-1)))
   39.77 -			    	printf("*** SAL_PCI_CONF_WRITE?!?(adr=%p,typ=%p,sz=%p,val=%p)\n",in1,in4,in2,in3);
   39.78 +				printf("*** SAL_PCI_CONF_WRITE?!?(adr=0x%lx,typ=0x%lx,sz=0x%lx,val=0x%lx)\n",
   39.79 +					in1,in4,in2,in3);
   39.80  			// note that args are in a different order!!
   39.81  			status = ia64_sal_pci_config_write(in1,in4,in2,in3);
   39.82  		}
   39.83 @@ -296,7 +294,7 @@ xen_pal_emulator(unsigned long index, un
   39.84  	long status = -1;
   39.85  
   39.86  	if (running_on_sim) return pal_emulator_static(index);
   39.87 -	printk("xen_pal_emulator: index=%d\n",index);
   39.88 +	printk("xen_pal_emulator: index=%lu\n", index);
   39.89  	// pal code must be mapped by a TR when pal is called, however
   39.90  	// calls are rare enough that we will map it lazily rather than
   39.91  	// at every context switch
   39.92 @@ -312,10 +310,16 @@ xen_pal_emulator(unsigned long index, un
   39.93  		status = ia64_pal_proc_get_features(&r9,&r10,&r11);
   39.94  		break;
   39.95  	    case PAL_BUS_GET_FEATURES:
   39.96 -		status = ia64_pal_bus_get_features(&r9,&r10,&r11);
   39.97 +		status = ia64_pal_bus_get_features(
   39.98 +				(pal_bus_features_u_t *) &r9,
   39.99 +				(pal_bus_features_u_t *) &r10,
  39.100 +				(pal_bus_features_u_t *) &r11);
  39.101  		break;
  39.102  	    case PAL_FREQ_RATIOS:
  39.103 -		status = ia64_pal_freq_ratios(&r9,&r10,&r11);
  39.104 +		status = ia64_pal_freq_ratios(
  39.105 +				(struct pal_freq_ratio *) &r9,
  39.106 +				(struct pal_freq_ratio *) &r10,
  39.107 +				(struct pal_freq_ratio *) &r11);
  39.108  		break;
  39.109  	    case PAL_PTCE_INFO:
  39.110  		{
  39.111 @@ -326,7 +330,9 @@ xen_pal_emulator(unsigned long index, un
  39.112  		}
  39.113  		break;
  39.114  	    case PAL_VERSION:
  39.115 -		status = ia64_pal_version(&r9,&r10);
  39.116 +		status = ia64_pal_version(
  39.117 +				(pal_version_u_t *) &r9,
  39.118 +				(pal_version_u_t *) &r10);
  39.119  		break;
  39.120  	    case PAL_VM_PAGE_SIZE:
  39.121  		status = ia64_pal_vm_page_size(&r9,&r10);
  39.122 @@ -341,13 +347,21 @@ xen_pal_emulator(unsigned long index, un
  39.123  		// FIXME: what should xen return for these, figure out later
  39.124  		// For now, linux does the right thing if pal call fails
  39.125  		// In particular, rid_size must be set properly!
  39.126 -		//status = ia64_pal_vm_summary(&r9,&r10);
  39.127 +		//status = ia64_pal_vm_summary(
  39.128 +		//		(pal_vm_info_1_u_t *) &r9,
  39.129 +		//		(pal_vm_info_2_u_t *) &r10);
  39.130  		break;
  39.131  	    case PAL_RSE_INFO:
  39.132 -		status = ia64_pal_rse_info(&r9,&r10);
  39.133 +		status = ia64_pal_rse_info(
  39.134 +				&r9,
  39.135 +				(pal_hints_u_t *) &r10);
  39.136  		break;
  39.137  	    case PAL_VM_INFO:
  39.138 -		status = ia64_pal_vm_info(in1,in2,&r9,&r10);
  39.139 +		status = ia64_pal_vm_info(
  39.140 +				in1,
  39.141 +				in2,
  39.142 +				(pal_tc_info_u_t *) &r9,
  39.143 +				&r10);
  39.144  		break;
  39.145  	    case PAL_REGISTER_INFO:
  39.146  		status = ia64_pal_register_info(in1,&r9,&r10);
  39.147 @@ -360,11 +374,12 @@ xen_pal_emulator(unsigned long index, un
  39.148  	    case PAL_PERF_MON_INFO:
  39.149  		{
  39.150  			unsigned long pm_buffer[16];
  39.151 -			int i;
  39.152 -			status = ia64_pal_perf_mon_info(pm_buffer,&r9);
  39.153 +			status = ia64_pal_perf_mon_info(
  39.154 +					pm_buffer,
  39.155 +					(pal_perf_mon_info_u_t *) &r9);
  39.156  			if (status != 0) {
  39.157  				while(1)
  39.158 -				printk("PAL_PERF_MON_INFO fails ret=%d\n",status);
  39.159 +				printk("PAL_PERF_MON_INFO fails ret=%ld\n", status);
  39.160  				break;
  39.161  			}
  39.162  			if (copy_to_user((void __user *)in1,pm_buffer,128)) {
  39.163 @@ -409,7 +424,7 @@ xen_pal_emulator(unsigned long index, un
  39.164  			    domain_shutdown (current->domain, 0);
  39.165  		    break;
  39.166  	    default:
  39.167 -		printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %d!!!!\n",
  39.168 +		printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %lu!!!!\n",
  39.169  				index);
  39.170  		break;
  39.171  	}
  39.172 @@ -434,7 +449,7 @@ static u32 lsapic_flag=1;
  39.173  
  39.174  /* Provide only one LP to guest */
  39.175  static int 
  39.176 -acpi_update_lsapic (acpi_table_entry_header *header)
  39.177 +acpi_update_lsapic (acpi_table_entry_header *header, const unsigned long end)
  39.178  {
  39.179  	struct acpi_table_lsapic *lsapic;
  39.180  
  39.181 @@ -529,8 +544,8 @@ dom_fw_fake_acpi(struct fake_acpi_tables
  39.182  	strcpy(xsdt->asl_compiler_id, "XEN");
  39.183  	xsdt->asl_compiler_revision = (XEN_VERSION<<16)|(XEN_SUBVERSION);
  39.184  
  39.185 -	xsdt->table_offset_entry[0] = dom_pa(fadt);
  39.186 -	tables->madt_ptr = dom_pa(madt);
  39.187 +	xsdt->table_offset_entry[0] = dom_pa((unsigned long) fadt);
  39.188 +	tables->madt_ptr = dom_pa((unsigned long) madt);
  39.189  
  39.190  	xsdt->checksum = generate_acpi_checksum(xsdt, xsdt->length);
  39.191  
  39.192 @@ -547,8 +562,8 @@ dom_fw_fake_acpi(struct fake_acpi_tables
  39.193  	facs->version = 1;
  39.194  	facs->length = sizeof(struct facs_descriptor_rev2);
  39.195  
  39.196 -	fadt->xfirmware_ctrl = dom_pa(facs);
  39.197 -	fadt->Xdsdt = dom_pa(dsdt);
  39.198 +	fadt->xfirmware_ctrl = dom_pa((unsigned long) facs);
  39.199 +	fadt->Xdsdt = dom_pa((unsigned long) dsdt);
  39.200  
  39.201  	/*
  39.202  	 * All of the below FADT entries are filled it to prevent warnings
  39.203 @@ -558,15 +573,15 @@ dom_fw_fake_acpi(struct fake_acpi_tables
  39.204  	fadt->pm1_evt_len = 4;
  39.205  	fadt->xpm1a_evt_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
  39.206  	fadt->xpm1a_evt_blk.register_bit_width = 8;
  39.207 -	fadt->xpm1a_evt_blk.address = dom_pa(&tables->pm1a_evt_blk);
  39.208 +	fadt->xpm1a_evt_blk.address = dom_pa((unsigned long) &tables->pm1a_evt_blk);
  39.209  	fadt->pm1_cnt_len = 1;
  39.210  	fadt->xpm1a_cnt_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
  39.211  	fadt->xpm1a_cnt_blk.register_bit_width = 8;
  39.212 -	fadt->xpm1a_cnt_blk.address = dom_pa(&tables->pm1a_cnt_blk);
  39.213 +	fadt->xpm1a_cnt_blk.address = dom_pa((unsigned long) &tables->pm1a_cnt_blk);
  39.214  	fadt->pm_tm_len = 4;
  39.215  	fadt->xpm_tmr_blk.address_space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
  39.216  	fadt->xpm_tmr_blk.register_bit_width = 8;
  39.217 -	fadt->xpm_tmr_blk.address = dom_pa(&tables->pm_tmr_blk);
  39.218 +	fadt->xpm_tmr_blk.address = dom_pa((unsigned long) &tables->pm_tmr_blk);
  39.219  
  39.220  	fadt->checksum = generate_acpi_checksum(fadt, fadt->length);
  39.221  
  39.222 @@ -575,7 +590,7 @@ dom_fw_fake_acpi(struct fake_acpi_tables
  39.223  	strcpy(rsdp->oem_id, "XEN");
  39.224  	rsdp->revision = 2; /* ACPI 2.0 includes XSDT */
  39.225  	rsdp->length = sizeof(struct acpi20_table_rsdp);
  39.226 -	rsdp->xsdt_address = dom_pa(xsdt);
  39.227 +	rsdp->xsdt_address = dom_pa((unsigned long) xsdt);
  39.228  
  39.229  	rsdp->checksum = generate_acpi_checksum(rsdp,
  39.230  	                                        ACPI_RSDP_CHECKSUM_LENGTH);
  39.231 @@ -640,7 +655,7 @@ dom_fw_init (struct domain *d, char *arg
  39.232  	unsigned long maxmem = (d->max_pages - d->arch.sys_pgnr) * PAGE_SIZE;
  39.233  	const unsigned long start_mpaddr = ((d==dom0)?dom0_start:0);
  39.234  
  39.235 -#	define MAKE_MD(typ, attr, start, end, abs) 	\	
  39.236 +#	define MAKE_MD(typ, attr, start, end, abs) 	\
  39.237  	do {						\
  39.238  		md = efi_memmap + i++;			\
  39.239  		md->type = typ;				\
  39.240 @@ -669,7 +684,7 @@ dom_fw_init (struct domain *d, char *arg
  39.241  	sal_ed      = (void *) cp; cp += sizeof(*sal_ed);
  39.242  	efi_memmap  = (void *) cp; cp += NUM_MEM_DESCS*sizeof(*efi_memmap);
  39.243  	bp	    = (void *) cp; cp += sizeof(*bp);
  39.244 -	pfn        = (void *) cp; cp += NFUNCPTRS * 2 * sizeof(pfn);
  39.245 +	pfn         = (void *) cp; cp += NFUNCPTRS * 2 * sizeof(pfn);
  39.246  	cmd_line    = (void *) cp;
  39.247  
  39.248  	if (args) {
  39.249 @@ -690,19 +705,19 @@ dom_fw_init (struct domain *d, char *arg
  39.250  	cp += sizeof(FW_VENDOR) + (8-((unsigned long)cp & 7)); // round to 64-bit boundary
  39.251  
  39.252  	memcpy(fw_vendor,FW_VENDOR,sizeof(FW_VENDOR));
  39.253 -	efi_systab->fw_vendor = dom_pa(fw_vendor);
  39.254 +	efi_systab->fw_vendor = dom_pa((unsigned long) fw_vendor);
  39.255  	
  39.256  	efi_systab->fw_revision = 1;
  39.257 -	efi_systab->runtime = (void *) dom_pa(efi_runtime);
  39.258 +	efi_systab->runtime = (void *) dom_pa((unsigned long) efi_runtime);
  39.259  	efi_systab->nr_tables = NUM_EFI_SYS_TABLES;
  39.260 -	efi_systab->tables = dom_pa(efi_tables);
  39.261 +	efi_systab->tables = dom_pa((unsigned long) efi_tables);
  39.262  
  39.263  	efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
  39.264  	efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
  39.265  	efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr);
  39.266  #define EFI_HYPERCALL_PATCH(tgt,call) do { \
  39.267      dom_efi_hypercall_patch(d,FW_HYPERCALL_##call##_PADDR,FW_HYPERCALL_##call); \
  39.268 -    tgt = dom_pa(pfn); \
  39.269 +    tgt = dom_pa((unsigned long) pfn); \
  39.270      *pfn++ = FW_HYPERCALL_##call##_PADDR + start_mpaddr; \
  39.271      *pfn++ = 0; \
  39.272      } while (0)
  39.273 @@ -719,7 +734,7 @@ dom_fw_init (struct domain *d, char *arg
  39.274  	EFI_HYPERCALL_PATCH(efi_runtime->reset_system,EFI_RESET_SYSTEM);
  39.275  
  39.276  	efi_tables[0].guid = SAL_SYSTEM_TABLE_GUID;
  39.277 -	efi_tables[0].table = dom_pa(sal_systab);
  39.278 +	efi_tables[0].table = dom_pa((unsigned long) sal_systab);
  39.279  	for (i = 1; i < NUM_EFI_SYS_TABLES; i++) {
  39.280  		efi_tables[i].guid = NULL_GUID;
  39.281  		efi_tables[i].table = 0;
  39.282 @@ -730,7 +745,7 @@ dom_fw_init (struct domain *d, char *arg
  39.283  		if (efi.mps) {
  39.284  			efi_tables[i].guid = MPS_TABLE_GUID;
  39.285  			efi_tables[i].table = __pa(efi.mps);
  39.286 -			printf(" MPS=%0xlx",efi_tables[i].table);
  39.287 +			printf(" MPS=0x%lx",efi_tables[i].table);
  39.288  			i++;
  39.289  		}
  39.290  
  39.291 @@ -739,25 +754,25 @@ dom_fw_init (struct domain *d, char *arg
  39.292  		if (efi.acpi20) {
  39.293  			efi_tables[i].guid = ACPI_20_TABLE_GUID;
  39.294  			efi_tables[i].table = __pa(efi.acpi20);
  39.295 -			printf(" ACPI 2.0=%0xlx",efi_tables[i].table);
  39.296 +			printf(" ACPI 2.0=0x%lx",efi_tables[i].table);
  39.297  			i++;
  39.298  		}
  39.299  		if (efi.acpi) {
  39.300  			efi_tables[i].guid = ACPI_TABLE_GUID;
  39.301  			efi_tables[i].table = __pa(efi.acpi);
  39.302 -			printf(" ACPI=%0xlx",efi_tables[i].table);
  39.303 +			printf(" ACPI=0x%lx",efi_tables[i].table);
  39.304  			i++;
  39.305  		}
  39.306  		if (efi.smbios) {
  39.307  			efi_tables[i].guid = SMBIOS_TABLE_GUID;
  39.308  			efi_tables[i].table = __pa(efi.smbios);
  39.309 -			printf(" SMBIOS=%0xlx",efi_tables[i].table);
  39.310 +			printf(" SMBIOS=0x%lx",efi_tables[i].table);
  39.311  			i++;
  39.312  		}
  39.313  		if (efi.hcdp) {
  39.314  			efi_tables[i].guid = HCDP_TABLE_GUID;
  39.315  			efi_tables[i].table = __pa(efi.hcdp);
  39.316 -			printf(" HCDP=%0xlx",efi_tables[i].table);
  39.317 +			printf(" HCDP=0x%lx",efi_tables[i].table);
  39.318  			i++;
  39.319  		}
  39.320  		printf("\n");
  39.321 @@ -773,8 +788,8 @@ dom_fw_init (struct domain *d, char *arg
  39.322  			dom_fw_fake_acpi(acpi_tables);
  39.323  
  39.324  			efi_tables[i].guid = ACPI_20_TABLE_GUID;
  39.325 -			efi_tables[i].table = dom_pa(acpi_tables);
  39.326 -			printf(" ACPI 2.0=%0xlx",efi_tables[i].table);
  39.327 +			efi_tables[i].table = dom_pa((unsigned long) acpi_tables);
  39.328 +			printf(" ACPI 2.0=0x%lx",efi_tables[i].table);
  39.329  			i++;
  39.330  		}
  39.331  	}
  39.332 @@ -850,12 +865,12 @@ dom_fw_init (struct domain *d, char *arg
  39.333  		MAKE_MD(EFI_RESERVED_TYPE,0,0,0,0);
  39.334  	}
  39.335  
  39.336 -	bp->efi_systab = dom_pa(fw_mem);
  39.337 -	bp->efi_memmap = dom_pa(efi_memmap);
  39.338 +	bp->efi_systab = dom_pa((unsigned long) fw_mem);
  39.339 +	bp->efi_memmap = dom_pa((unsigned long) efi_memmap);
  39.340  	bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t);
  39.341  	bp->efi_memdesc_size = sizeof(efi_memory_desc_t);
  39.342  	bp->efi_memdesc_version = 1;
  39.343 -	bp->command_line = dom_pa(cmd_line);
  39.344 +	bp->command_line = dom_pa((unsigned long) cmd_line);
  39.345  	bp->console_info.num_cols = 80;
  39.346  	bp->console_info.num_rows = 25;
  39.347  	bp->console_info.orig_x = 0;
  39.348 @@ -870,7 +885,7 @@ dom_fw_init (struct domain *d, char *arg
  39.349  		bp->initrd_start = d->arch.initrd_start;
  39.350  		bp->initrd_size  = d->arch.initrd_len;
  39.351  	}
  39.352 -	printf(" initrd start %0xlx", bp->initrd_start);
  39.353 -	printf(" initrd size %0xlx", bp->initrd_size);
  39.354 +	printf(" initrd start 0x%lx", bp->initrd_start);
  39.355 +	printf(" initrd size 0x%lx\n", bp->initrd_size);
  39.356  	return bp;
  39.357  }
    40.1 --- a/xen/arch/ia64/xen/domain.c	Thu Mar 02 10:59:34 2006 +0100
    40.2 +++ b/xen/arch/ia64/xen/domain.c	Thu Mar 02 11:00:49 2006 +0100
    40.3 @@ -45,7 +45,9 @@
    40.4  #include <asm/vmx.h>
    40.5  #include <asm/vmx_vcpu.h>
    40.6  #include <asm/vmx_vpd.h>
    40.7 +#include <asm/vmx_phy_mode.h>
    40.8  #include <asm/pal.h>
    40.9 +#include <asm/vhpt.h>
   40.10  #include <public/hvm/ioreq.h>
   40.11  
   40.12  #define CONFIG_DOMAIN0_CONTIGUOUS
   40.13 @@ -63,8 +65,16 @@ extern unsigned long running_on_sim;
   40.14  extern int readelfimage_base_and_size(char *, unsigned long,
   40.15  	              unsigned long *, unsigned long *, unsigned long *);
   40.16  
   40.17 -unsigned long map_domain_page0(struct domain *);
   40.18  extern unsigned long dom_fw_setup(struct domain *, char *, int);
   40.19 +/* FIXME: where these declarations should be there ? */
   40.20 +extern void domain_pend_keyboard_interrupt(int);
   40.21 +extern long platform_is_hp_ski(void);
   40.22 +extern unsigned long allocate_metaphysical_rr(void);
   40.23 +extern int allocate_rid_range(struct domain *, unsigned long);
   40.24 +extern void sync_split_caches(void);
   40.25 +extern void init_all_rr(struct vcpu *);
   40.26 +extern void serial_input_init(void);
   40.27 +
   40.28  static void init_switch_stack(struct vcpu *v);
   40.29  
   40.30  /* this belongs in include/asm, but there doesn't seem to be a suitable place */
   40.31 @@ -251,9 +261,12 @@ int arch_domain_create(struct domain *d)
   40.32  	return 0;
   40.33  
   40.34  fail_nomem:
   40.35 -	free_xenheap_page(d->shared_info);
   40.36 -	xfree(d->arch.mm);
   40.37 -	pgd_free(d->arch.mm->pgd);
   40.38 +	if (d->arch.mm->pgd != NULL)
   40.39 +	    pgd_free(d->arch.mm->pgd);
   40.40 +	if (d->arch.mm != NULL)
   40.41 +	    xfree(d->arch.mm);
   40.42 +	if (d->shared_info != NULL)
   40.43 +	    free_xenheap_page(d->shared_info);
   40.44  	return -ENOMEM;
   40.45  }
   40.46  
   40.47 @@ -272,8 +285,6 @@ int arch_set_info_guest(struct vcpu *v, 
   40.48  {
   40.49  	struct pt_regs *regs = vcpu_regs (v);
   40.50  	struct domain *d = v->domain;
   40.51 -	int i, rc, ret;
   40.52 -	unsigned long progress = 0;
   40.53  
   40.54  	printf("arch_set_info_guest\n");
   40.55  	if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
   40.56 @@ -301,7 +312,7 @@ int arch_set_info_guest(struct vcpu *v, 
   40.57   	v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
   40.58  	if ( c->vcpu.privregs && copy_from_user(v->arch.privregs,
   40.59  			   c->vcpu.privregs, sizeof(mapped_regs_t))) {
   40.60 -	    printk("Bad ctxt address in arch_set_info_guest: 0x%lx\n", c->vcpu.privregs);
   40.61 +	    printk("Bad ctxt address in arch_set_info_guest: %p\n", c->vcpu.privregs);
   40.62  	    return -EFAULT;
   40.63  	}
   40.64  
   40.65 @@ -328,10 +339,8 @@ void new_thread(struct vcpu *v,
   40.66  {
   40.67  	struct domain *d = v->domain;
   40.68  	struct pt_regs *regs;
   40.69 -	struct ia64_boot_param *bp;
   40.70  	extern char saved_command_line[];
   40.71  
   40.72 -
   40.73  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
   40.74  	if (d == dom0) start_pc += dom0_start;
   40.75  #endif
   40.76 @@ -378,18 +387,19 @@ void new_thread(struct vcpu *v,
   40.77  	}
   40.78  }
   40.79  
   40.80 -static struct page * map_new_domain0_page(unsigned long mpaddr)
   40.81 +static struct page * assign_new_domain0_page(unsigned long mpaddr)
   40.82  {
   40.83  	if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   40.84 -		printk("map_new_domain0_page: bad domain0 mpaddr %p!\n",mpaddr);
   40.85 -printk("map_new_domain0_page: start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
   40.86 +		printk("assign_new_domain0_page: bad domain0 mpaddr 0x%lx!\n",mpaddr);
   40.87 +		printk("assign_new_domain0_page: start=0x%lx,end=0x%lx!\n",
   40.88 +			dom0_start, dom0_start+dom0_size);
   40.89  		while(1);
   40.90  	}
   40.91  	return mfn_to_page((mpaddr >> PAGE_SHIFT));
   40.92  }
   40.93  
   40.94  /* allocate new page for domain and map it to the specified metaphysical addr */
   40.95 -struct page * map_new_domain_page(struct domain *d, unsigned long mpaddr)
   40.96 +struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
   40.97  {
   40.98  	struct mm_struct *mm = d->arch.mm;
   40.99  	struct page *p = (struct page *)0;
  40.100 @@ -397,10 +407,9 @@ struct page * map_new_domain_page(struct
  40.101  	pud_t *pud;
  40.102  	pmd_t *pmd;
  40.103  	pte_t *pte;
  40.104 -extern unsigned long vhpt_paddr, vhpt_pend;
  40.105  
  40.106  	if (!mm->pgd) {
  40.107 -		printk("map_new_domain_page: domain pgd must exist!\n");
  40.108 +		printk("assign_new_domain_page: domain pgd must exist!\n");
  40.109  		return(p);
  40.110  	}
  40.111  	pgd = pgd_offset(mm,mpaddr);
  40.112 @@ -419,7 +428,7 @@ extern unsigned long vhpt_paddr, vhpt_pe
  40.113  	pte = pte_offset_map(pmd, mpaddr);
  40.114  	if (pte_none(*pte)) {
  40.115  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
  40.116 -		if (d == dom0) p = map_new_domain0_page(mpaddr);
  40.117 +		if (d == dom0) p = assign_new_domain0_page(mpaddr);
  40.118  		else
  40.119  #endif
  40.120  		{
  40.121 @@ -428,21 +437,23 @@ extern unsigned long vhpt_paddr, vhpt_pe
  40.122  			if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
  40.123  		}
  40.124  		if (unlikely(!p)) {
  40.125 -printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
  40.126 +			printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
  40.127  			return(p);
  40.128  		}
  40.129 -if (unlikely(page_to_maddr(p) > vhpt_paddr && page_to_maddr(p) < vhpt_pend)) {
  40.130 -  printf("map_new_domain_page: reassigned vhpt page %p!!\n",page_to_maddr(p));
  40.131 -}
  40.132 +		if (unlikely(page_to_maddr(p) > __get_cpu_var(vhpt_paddr)
  40.133 +			     && page_to_maddr(p) < __get_cpu_var(vhpt_pend))) {
  40.134 +			printf("assign_new_domain_page: reassigned vhpt page %lx!!\n",
  40.135 +				page_to_maddr(p));
  40.136 +		}
  40.137  		set_pte(pte, pfn_pte(page_to_maddr(p) >> PAGE_SHIFT,
  40.138  			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
  40.139  	}
  40.140 -	else printk("map_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
  40.141 +	else printk("assign_new_domain_page: mpaddr %lx already mapped!\n",mpaddr);
  40.142  	return p;
  40.143  }
  40.144  
  40.145  /* map a physical address to the specified metaphysical addr */
  40.146 -void map_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
  40.147 +void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
  40.148  {
  40.149  	struct mm_struct *mm = d->arch.mm;
  40.150  	pgd_t *pgd;
  40.151 @@ -451,7 +462,7 @@ void map_domain_page(struct domain *d, u
  40.152  	pte_t *pte;
  40.153  
  40.154  	if (!mm->pgd) {
  40.155 -		printk("map_domain_page: domain pgd must exist!\n");
  40.156 +		printk("assign_domain_page: domain pgd must exist!\n");
  40.157  		return;
  40.158  	}
  40.159  	pgd = pgd_offset(mm,mpaddr);
  40.160 @@ -472,11 +483,14 @@ void map_domain_page(struct domain *d, u
  40.161  		set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
  40.162  			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
  40.163  	}
  40.164 -	else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
  40.165 +	else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
  40.166 +    if((physaddr>>PAGE_SHIFT)<max_page){
  40.167 +        *(mpt_table + (physaddr>>PAGE_SHIFT))=(mpaddr>>PAGE_SHIFT);
  40.168 +    }
  40.169  }
  40.170  #if 0
  40.171  /* map a physical address with specified I/O flag */
  40.172 -void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags)
  40.173 +void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags)
  40.174  {
  40.175  	struct mm_struct *mm = d->arch.mm;
  40.176  	pgd_t *pgd;
  40.177 @@ -486,7 +500,7 @@ void map_domain_io_page(struct domain *d
  40.178  	pte_t io_pte;
  40.179  
  40.180  	if (!mm->pgd) {
  40.181 -		printk("map_domain_page: domain pgd must exist!\n");
  40.182 +		printk("assign_domain_page: domain pgd must exist!\n");
  40.183  		return;
  40.184  	}
  40.185  	ASSERT(flags & GPFN_IO_MASK);
  40.186 @@ -509,7 +523,7 @@ void map_domain_io_page(struct domain *d
  40.187  		pte_val(io_pte) = flags;
  40.188  		set_pte(pte, io_pte);
  40.189  	}
  40.190 -	else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr);
  40.191 +	else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
  40.192  }
  40.193  #endif
  40.194  void mpafoo(unsigned long mpaddr)
  40.195 @@ -530,8 +544,8 @@ unsigned long lookup_domain_mpa(struct d
  40.196  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
  40.197  	if (d == dom0) {
  40.198  		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
  40.199 -			//printk("lookup_domain_mpa: bad dom0 mpaddr %p!\n",mpaddr);
  40.200 -//printk("lookup_domain_mpa: start=%p,end=%p!\n",dom0_start,dom0_start+dom0_size);
  40.201 +			//printk("lookup_domain_mpa: bad dom0 mpaddr 0x%lx!\n",mpaddr);
  40.202 +			//printk("lookup_domain_mpa: start=0x%lx,end=0x%lx!\n",dom0_start,dom0_start+dom0_size);
  40.203  			mpafoo(mpaddr);
  40.204  		}
  40.205  		pte_t pteval = pfn_pte(mpaddr >> PAGE_SHIFT,
  40.206 @@ -557,10 +571,10 @@ tryagain:
  40.207  	}
  40.208  	/* if lookup fails and mpaddr is "legal", "create" the page */
  40.209  	if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
  40.210 -		if (map_new_domain_page(d,mpaddr)) goto tryagain;
  40.211 +		if (assign_new_domain_page(d,mpaddr)) goto tryagain;
  40.212  	}
  40.213 -	printk("lookup_domain_mpa: bad mpa %p (> %p\n",
  40.214 -		mpaddr,d->max_pages<<PAGE_SHIFT);
  40.215 +	printk("lookup_domain_mpa: bad mpa 0x%lx (> 0x%lx)\n",
  40.216 +		mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
  40.217  	mpafoo(mpaddr);
  40.218  	return 0;
  40.219  }
  40.220 @@ -573,7 +587,7 @@ unsigned long domain_mpa_to_imva(struct 
  40.221  	unsigned long imva;
  40.222  
  40.223  	pte &= _PAGE_PPN_MASK;
  40.224 -	imva = __va(pte);
  40.225 +	imva = (unsigned long) __va(pte);
  40.226  	imva |= mpaddr & ~PAGE_MASK;
  40.227  	return(imva);
  40.228  }
  40.229 @@ -602,13 +616,13 @@ static void copy_memory(void *dst, void 
  40.230  {
  40.231  	int remain;
  40.232  
  40.233 -	if (IS_XEN_ADDRESS(dom0,src)) {
  40.234 +	if (IS_XEN_ADDRESS(dom0,(unsigned long) src)) {
  40.235  		memcpy(dst,src,size);
  40.236  	}
  40.237  	else {
  40.238  		printf("About to call __copy_from_user(%p,%p,%d)\n",
  40.239  			dst,src,size);
  40.240 -		while (remain = __copy_from_user(dst,src,size)) {
  40.241 +		while ((remain = __copy_from_user(dst,src,size)) != 0) {
  40.242  			printf("incomplete user copy, %d remain of %d\n",
  40.243  				remain,size);
  40.244  			dst += size - remain; src += size - remain;
  40.245 @@ -619,16 +633,15 @@ static void copy_memory(void *dst, void 
  40.246  
  40.247  void loaddomainelfimage(struct domain *d, unsigned long image_start)
  40.248  {
  40.249 -	char *elfbase = image_start;
  40.250 +	char *elfbase = (char *) image_start;
  40.251  	//Elf_Ehdr *ehdr = (Elf_Ehdr *)image_start;
  40.252  	Elf_Ehdr ehdr;
  40.253  	Elf_Phdr phdr;
  40.254 -	int h, filesz, memsz, paddr;
  40.255 +	int h, filesz, memsz;
  40.256  	unsigned long elfaddr, dom_mpaddr, dom_imva;
  40.257  	struct page *p;
  40.258 -	unsigned long pteval;
  40.259    
  40.260 -	copy_memory(&ehdr,image_start,sizeof(Elf_Ehdr));
  40.261 +	copy_memory(&ehdr, (void *) image_start, sizeof(Elf_Ehdr));
  40.262  	for ( h = 0; h < ehdr.e_phnum; h++ ) {
  40.263  		copy_memory(&phdr,elfbase + ehdr.e_phoff + (h*ehdr.e_phentsize),
  40.264  		sizeof(Elf_Phdr));
  40.265 @@ -637,7 +650,7 @@ void loaddomainelfimage(struct domain *d
  40.266  	        continue;
  40.267  	}
  40.268  	filesz = phdr.p_filesz; memsz = phdr.p_memsz;
  40.269 -	elfaddr = elfbase + phdr.p_offset;
  40.270 +	elfaddr = (unsigned long) elfbase + phdr.p_offset;
  40.271  	dom_mpaddr = phdr.p_paddr;
  40.272  //printf("p_offset: %x, size=%x\n",elfaddr,filesz);
  40.273  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
  40.274 @@ -646,37 +659,31 @@ void loaddomainelfimage(struct domain *d
  40.275  			printf("Domain0 doesn't fit in allocated space!\n");
  40.276  			while(1);
  40.277  		}
  40.278 -		dom_imva = __va(dom_mpaddr + dom0_start);
  40.279 -		copy_memory(dom_imva,elfaddr,filesz);
  40.280 -		if (memsz > filesz) memset(dom_imva+filesz,0,memsz-filesz);
  40.281 +		dom_imva = (unsigned long) __va(dom_mpaddr + dom0_start);
  40.282 +		copy_memory((void *) dom_imva, (void *) elfaddr, filesz);
  40.283 +		if (memsz > filesz) memset((void *) dom_imva+filesz, 0, memsz-filesz);
  40.284  //FIXME: This test for code seems to find a lot more than objdump -x does
  40.285  		if (phdr.p_flags & PF_X) privify_memory(dom_imva,filesz);
  40.286  	}
  40.287  	else
  40.288  #endif
  40.289  	while (memsz > 0) {
  40.290 -#ifdef DOMU_AUTO_RESTART
  40.291 -		pteval = lookup_domain_mpa(d,dom_mpaddr);
  40.292 -		if (pteval) dom_imva = __va(pteval & _PFN_MASK);
  40.293 -		else { printf("loaddomainelfimage: BAD!\n"); while(1); }
  40.294 -#else
  40.295 -		p = map_new_domain_page(d,dom_mpaddr);
  40.296 +		p = assign_new_domain_page(d,dom_mpaddr);
  40.297  		if (unlikely(!p)) BUG();
  40.298 -		dom_imva = __va(page_to_maddr(p));
  40.299 -#endif
  40.300 +		dom_imva = (unsigned long) __va(page_to_maddr(p));
  40.301  		if (filesz > 0) {
  40.302  			if (filesz >= PAGE_SIZE)
  40.303 -				copy_memory(dom_imva,elfaddr,PAGE_SIZE);
  40.304 +				copy_memory((void *) dom_imva, (void *) elfaddr, PAGE_SIZE);
  40.305  			else { // copy partial page, zero the rest of page
  40.306 -				copy_memory(dom_imva,elfaddr,filesz);
  40.307 -				memset(dom_imva+filesz,0,PAGE_SIZE-filesz);
  40.308 +				copy_memory((void *) dom_imva, (void *) elfaddr, filesz);
  40.309 +				memset((void *) dom_imva+filesz, 0, PAGE_SIZE-filesz);
  40.310  			}
  40.311  //FIXME: This test for code seems to find a lot more than objdump -x does
  40.312  			if (phdr.p_flags & PF_X)
  40.313  				privify_memory(dom_imva,PAGE_SIZE);
  40.314  		}
  40.315  		else if (memsz > 0) // always zero out entire page
  40.316 -			memset(dom_imva,0,PAGE_SIZE);
  40.317 +			memset((void *) dom_imva, 0, PAGE_SIZE);
  40.318  		memsz -= PAGE_SIZE; filesz -= PAGE_SIZE;
  40.319  		elfaddr += PAGE_SIZE; dom_mpaddr += PAGE_SIZE;
  40.320  	}
  40.321 @@ -691,33 +698,33 @@ parsedomainelfimage(char *elfbase, unsig
  40.322  	copy_memory(&ehdr,elfbase,sizeof(Elf_Ehdr));
  40.323  
  40.324  	if ( !elf_sanity_check(&ehdr) ) {
  40.325 -	    printk("ELF sanity check failed.\n");
  40.326 -	    return -EINVAL;
  40.327 +		printk("ELF sanity check failed.\n");
  40.328 +		return -EINVAL;
  40.329  	}
  40.330  
  40.331  	if ( (ehdr.e_phoff + (ehdr.e_phnum * ehdr.e_phentsize)) > elfsize )
  40.332  	{
  40.333 -	    printk("ELF program headers extend beyond end of image.\n");
  40.334 -	    return -EINVAL;
  40.335 +		printk("ELF program headers extend beyond end of image.\n");
  40.336 +		return -EINVAL;
  40.337  	}
  40.338  
  40.339  	if ( (ehdr.e_shoff + (ehdr.e_shnum * ehdr.e_shentsize)) > elfsize )
  40.340  	{
  40.341 -	    printk("ELF section headers extend beyond end of image.\n");
  40.342 -	    return -EINVAL;
  40.343 +		printk("ELF section headers extend beyond end of image.\n");
  40.344 +		return -EINVAL;
  40.345  	}
  40.346  
  40.347  #if 0
  40.348  	/* Find the section-header strings table. */
  40.349  	if ( ehdr.e_shstrndx == SHN_UNDEF )
  40.350  	{
  40.351 -	    printk("ELF image has no section-header strings table (shstrtab).\n");
  40.352 -	    return -EINVAL;
  40.353 +		printk("ELF image has no section-header strings table (shstrtab).\n");
  40.354 +		return -EINVAL;
  40.355  	}
  40.356  #endif
  40.357  
  40.358  	*entry = ehdr.e_entry;
  40.359 -printf("parsedomainelfimage: entry point = %p\n",*entry);
  40.360 +	printf("parsedomainelfimage: entry point = 0x%lx\n", *entry);
  40.361  
  40.362  	return 0;
  40.363  }
  40.364 @@ -729,22 +736,21 @@ void alloc_dom0(void)
  40.365  	if (platform_is_hp_ski()) {
  40.366  	dom0_size = 128*1024*1024; //FIXME: Should be configurable
  40.367  	}
  40.368 -	printf("alloc_dom0: starting (initializing %d MB...)\n",dom0_size/(1024*1024));
  40.369 +	printf("alloc_dom0: starting (initializing %lu MB...)\n",dom0_size/(1024*1024));
  40.370   
  40.371 -     /* FIXME: The first trunk (say 256M) should always be assigned to
  40.372 -      * Dom0, since Dom0's physical == machine address for DMA purpose.
  40.373 -      * Some old version linux, like 2.4, assumes physical memory existing
  40.374 -      * in 2nd 64M space.
  40.375 -      */
  40.376 -     dom0_start = alloc_boot_pages(
  40.377 -         dom0_size >> PAGE_SHIFT, dom0_align >> PAGE_SHIFT);
  40.378 -     dom0_start <<= PAGE_SHIFT;
  40.379 +	/* FIXME: The first trunk (say 256M) should always be assigned to
  40.380 +	 * Dom0, since Dom0's physical == machine address for DMA purpose.
  40.381 +	 * Some old version linux, like 2.4, assumes physical memory existing
  40.382 +	 * in 2nd 64M space.
  40.383 +	 */
  40.384 +	dom0_start = alloc_boot_pages(dom0_size >> PAGE_SHIFT, dom0_align >> PAGE_SHIFT);
  40.385 +	dom0_start <<= PAGE_SHIFT;
  40.386  	if (!dom0_start) {
  40.387 -	printf("construct_dom0: can't allocate contiguous memory size=%p\n",
  40.388 +	printf("alloc_dom0: can't allocate contiguous memory size=%lu\n",
  40.389  		dom0_size);
  40.390  	while(1);
  40.391  	}
  40.392 -	printf("alloc_dom0: dom0_start=%p\n",dom0_start);
  40.393 +	printf("alloc_dom0: dom0_start=0x%lx\n", dom0_start);
  40.394  #else
  40.395  	dom0_start = 0;
  40.396  #endif
  40.397 @@ -772,13 +778,8 @@ int construct_dom0(struct domain *d,
  40.398  	               unsigned long initrd_start, unsigned long initrd_len,
  40.399  	               char *cmdline)
  40.400  {
  40.401 -	char *dst;
  40.402  	int i, rc;
  40.403 -	unsigned long pfn, mfn;
  40.404 -	unsigned long nr_pt_pages;
  40.405 -	unsigned long count;
  40.406  	unsigned long alloc_start, alloc_end;
  40.407 -	struct page_info *page = NULL;
  40.408  	start_info_t *si;
  40.409  	struct vcpu *v = d->vcpu[0];
  40.410  
  40.411 @@ -788,16 +789,23 @@ int construct_dom0(struct domain *d,
  40.412  	unsigned long pkern_entry;
  40.413  	unsigned long pkern_end;
  40.414  	unsigned long pinitrd_start = 0;
  40.415 -	unsigned long ret, progress = 0;
  40.416 +	unsigned long pstart_info;
  40.417 +#if 0
  40.418 +	char *dst;
  40.419 +	unsigned long nr_pt_pages;
  40.420 +	unsigned long count;
  40.421 +#endif
  40.422 +#ifdef VALIDATE_VT
  40.423 +	unsigned long mfn;
  40.424 +	struct page_info *page = NULL;
  40.425 +#endif
  40.426  
  40.427  //printf("construct_dom0: starting\n");
  40.428  
  40.429 -#ifndef CLONE_DOMAIN0
  40.430  	/* Sanity! */
  40.431  	BUG_ON(d != dom0);
  40.432  	BUG_ON(d->vcpu[0] == NULL);
  40.433  	BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
  40.434 -#endif
  40.435  
  40.436  	memset(&dsi, 0, sizeof(struct domain_setup_info));
  40.437  
  40.438 @@ -846,20 +854,26 @@ int construct_dom0(struct domain *d,
  40.439               pinitrd_start=(dom0_start+dom0_size) -
  40.440                            (PAGE_ALIGN(initrd_len) + 4*1024*1024);
  40.441  
  40.442 -             memcpy(__va(pinitrd_start),initrd_start,initrd_len);
  40.443 +             memcpy(__va(pinitrd_start), (void *) initrd_start, initrd_len);
  40.444 +             pstart_info = PAGE_ALIGN(pinitrd_start + initrd_len);
  40.445 +        } else {
  40.446 +             pstart_info = PAGE_ALIGN(pkern_end);
  40.447          }
  40.448  
  40.449  	printk("METAPHYSICAL MEMORY ARRANGEMENT:\n"
  40.450  	       " Kernel image:  %lx->%lx\n"
  40.451  	       " Entry address: %lx\n"
  40.452 -               " Init. ramdisk: %lx len %lx\n",
  40.453 -               pkern_start, pkern_end, pkern_entry, pinitrd_start, initrd_len);
  40.454 +	       " Init. ramdisk: %lx len %lx\n"
  40.455 +	       " Start info.:   %lx->%lx\n",
  40.456 +	       pkern_start, pkern_end, pkern_entry, pinitrd_start, initrd_len,
  40.457 +	       pstart_info, pstart_info + PAGE_SIZE);
  40.458  
  40.459  	if ( (pkern_end - pkern_start) > (d->max_pages * PAGE_SIZE) )
  40.460  	{
  40.461  	    printk("Initial guest OS requires too much space\n"
  40.462  	           "(%luMB is greater than %luMB limit)\n",
  40.463 -	           (pkern_end-pkern_start)>>20, (d->max_pages<<PAGE_SHIFT)>>20);
  40.464 +	           (pkern_end-pkern_start)>>20,
  40.465 +	           (unsigned long) (d->max_pages<<PAGE_SHIFT)>>20);
  40.466  	    return -ENOMEM;
  40.467  	}
  40.468  
  40.469 @@ -908,9 +922,9 @@ int construct_dom0(struct domain *d,
  40.470  
  40.471  
  40.472  	/* Set up start info area. */
  40.473 -	si = (start_info_t *)alloc_xenheap_page();
  40.474 +	d->shared_info->arch.start_info_pfn = pstart_info >> PAGE_SHIFT;
  40.475 +	si = __va(pstart_info);
  40.476  	memset(si, 0, PAGE_SIZE);
  40.477 -	d->shared_info->arch.start_info_pfn = __pa(si) >> PAGE_SHIFT;
  40.478  	sprintf(si->magic, "xen-%i.%i-ia64", XEN_VERSION, XEN_SUBVERSION);
  40.479  	si->nr_pages     = d->tot_pages;
  40.480  
  40.481 @@ -962,80 +976,11 @@ int construct_dom0(struct domain *d,
  40.482  	sync_split_caches();
  40.483  
  40.484  	// FIXME: Hack for keyboard input
  40.485 -#ifdef CLONE_DOMAIN0
  40.486 -if (d == dom0)
  40.487 -#endif
  40.488  	serial_input_init();
  40.489 -	if (d == dom0) {
  40.490 -		VCPU(v, delivery_mask[0]) = -1L;
  40.491 -		VCPU(v, delivery_mask[1]) = -1L;
  40.492 -		VCPU(v, delivery_mask[2]) = -1L;
  40.493 -		VCPU(v, delivery_mask[3]) = -1L;
  40.494 -	}
  40.495 -	else __set_bit(0x30, VCPU(v, delivery_mask));
  40.496  
  40.497  	return 0;
  40.498  }
  40.499  
  40.500 -// FIXME: When dom0 can construct domains, this goes away (or is rewritten)
  40.501 -int construct_domU(struct domain *d,
  40.502 -		   unsigned long image_start, unsigned long image_len,
  40.503 -	           unsigned long initrd_start, unsigned long initrd_len,
  40.504 -	           char *cmdline)
  40.505 -{
  40.506 -	int i, rc;
  40.507 -	struct vcpu *v = d->vcpu[0];
  40.508 -	unsigned long pkern_entry;
  40.509 -
  40.510 -#ifndef DOMU_AUTO_RESTART
  40.511 -	BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
  40.512 -#endif
  40.513 -
  40.514 -	printk("*** LOADING DOMAIN %d ***\n",d->domain_id);
  40.515 -
  40.516 -	d->max_pages = dom0_size/PAGE_SIZE;	// FIXME: use dom0 size
  40.517 -	// FIXME: use domain0 command line
  40.518 -	rc = parsedomainelfimage(image_start, image_len, &pkern_entry);
  40.519 -	printk("parsedomainelfimage returns %d\n",rc);
  40.520 -	if ( rc != 0 ) return rc;
  40.521 -
  40.522 -	/* Mask all upcalls... */
  40.523 -	for ( i = 0; i < MAX_VIRT_CPUS; i++ )
  40.524 -		d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
  40.525 -
  40.526 -	/* Copy the OS image. */
  40.527 -	printk("calling loaddomainelfimage(%p,%p)\n",d,image_start);
  40.528 -	loaddomainelfimage(d,image_start);
  40.529 -	printk("loaddomainelfimage returns\n");
  40.530 -
  40.531 -	set_bit(_VCPUF_initialised, &v->vcpu_flags);
  40.532 -
  40.533 -	printk("calling new_thread, entry=%p\n",pkern_entry);
  40.534 -#ifdef DOMU_AUTO_RESTART
  40.535 -	v->domain->arch.image_start = image_start;
  40.536 -	v->domain->arch.image_len = image_len;
  40.537 -	v->domain->arch.entry = pkern_entry;
  40.538 -#endif
  40.539 -	new_thread(v, pkern_entry, 0, 0);
  40.540 -	printk("new_thread returns\n");
  40.541 -	sync_split_caches();
  40.542 -	__set_bit(0x30, VCPU(v, delivery_mask));
  40.543 -
  40.544 -	return 0;
  40.545 -}
  40.546 -
  40.547 -#ifdef DOMU_AUTO_RESTART
  40.548 -void reconstruct_domU(struct vcpu *v)
  40.549 -{
  40.550 -	/* re-copy the OS image to reset data values to original */
  40.551 -	printk("reconstruct_domU: restarting domain %d...\n",
  40.552 -		v->domain->domain_id);
  40.553 -	loaddomainelfimage(v->domain,v->domain->arch.image_start);
  40.554 -	new_thread(v, v->domain->arch.entry, 0, 0);
  40.555 -	sync_split_caches();
  40.556 -}
  40.557 -#endif
  40.558 -
  40.559  void machine_restart(char * __unused)
  40.560  {
  40.561  	if (platform_is_hp_ski()) dummy();
    41.1 --- a/xen/arch/ia64/xen/hypercall.c	Thu Mar 02 10:59:34 2006 +0100
    41.2 +++ b/xen/arch/ia64/xen/hypercall.c	Thu Mar 02 11:00:49 2006 +0100
    41.3 @@ -9,24 +9,66 @@
    41.4  #include <xen/config.h>
    41.5  #include <xen/sched.h>
    41.6  #include <xen/hypercall.h>
    41.7 +#include <xen/multicall.h>
    41.8  
    41.9  #include <linux/efi.h>	/* FOR EFI_UNIMPLEMENTED */
   41.10  #include <asm/sal.h>	/* FOR struct ia64_sal_retval */
   41.11  
   41.12  #include <asm/vcpu.h>
   41.13  #include <asm/dom_fw.h>
   41.14 +#include <public/dom0_ops.h>
   41.15 +#include <public/event_channel.h>
   41.16  #include <public/memory.h>
   41.17  #include <public/sched.h>
   41.18  
   41.19  extern unsigned long translate_domain_mpaddr(unsigned long);
   41.20 +/* FIXME: where these declarations should be there ? */
   41.21 +extern int dump_privop_counts_to_user(char *, int);
   41.22 +extern int zero_privop_counts_to_user(char *, int);
   41.23  
   41.24  unsigned long idle_when_pending = 0;
   41.25  unsigned long pal_halt_light_count = 0;
   41.26  
   41.27 +hypercall_t ia64_hypercall_table[] =
   41.28 +	{
   41.29 +	(hypercall_t)do_ni_hypercall,		/* do_set_trap_table */		/*  0 */
   41.30 +	(hypercall_t)do_ni_hypercall,		/* do_mmu_update */
   41.31 +	(hypercall_t)do_ni_hypercall,		/* do_set_gdt */
   41.32 +	(hypercall_t)do_ni_hypercall,		/* do_stack_switch */
   41.33 +	(hypercall_t)do_ni_hypercall,		/* do_set_callbacks */
   41.34 +	(hypercall_t)do_ni_hypercall,		/* do_fpu_taskswitch */		/*  5 */
   41.35 +	(hypercall_t)do_ni_hypercall,		/* do_sched_op */
   41.36 +	(hypercall_t)do_dom0_op,
   41.37 +	(hypercall_t)do_ni_hypercall,		/* do_set_debugreg */
   41.38 +	(hypercall_t)do_ni_hypercall,		/* do_get_debugreg */
   41.39 +	(hypercall_t)do_ni_hypercall,		/* do_update_descriptor */	/* 10 */
   41.40 +	(hypercall_t)do_ni_hypercall,		/* do_ni_hypercall */
   41.41 +	(hypercall_t)do_memory_op,
   41.42 +	(hypercall_t)do_multicall,
   41.43 +	(hypercall_t)do_ni_hypercall,		/* do_update_va_mapping */
   41.44 +	(hypercall_t)do_ni_hypercall,		/* do_set_timer_op */		/* 15 */
   41.45 +	(hypercall_t)do_event_channel_op,
   41.46 +	(hypercall_t)do_xen_version,
   41.47 +	(hypercall_t)do_console_io,
   41.48 +	(hypercall_t)do_ni_hypercall,           /* do_physdev_op */
   41.49 +	(hypercall_t)do_grant_table_op,						/* 20 */
   41.50 +	(hypercall_t)do_ni_hypercall,		/* do_vm_assist */
   41.51 +	(hypercall_t)do_ni_hypercall,		/* do_update_va_mapping_otherdomain */
   41.52 +	(hypercall_t)do_ni_hypercall,		/* (x86 only) */
   41.53 +	(hypercall_t)do_ni_hypercall,		/* do_vcpu_op */
   41.54 +	(hypercall_t)do_ni_hypercall,		/* (x86_64 only) */		/* 25 */
   41.55 +	(hypercall_t)do_ni_hypercall,		/* do_mmuext_op */
   41.56 +	(hypercall_t)do_ni_hypercall,		/* do_acm_op */
   41.57 +	(hypercall_t)do_ni_hypercall,		/* do_nmi_op */
   41.58 +	(hypercall_t)do_ni_hypercall,		/*  */
   41.59 +	(hypercall_t)do_ni_hypercall,		/*  */				/* 30 */
   41.60 +	(hypercall_t)do_ni_hypercall		/*  */
   41.61 +	};
   41.62 +
   41.63  int
   41.64  ia64_hypercall (struct pt_regs *regs)
   41.65  {
   41.66 -	struct vcpu *v = (struct domain *) current;
   41.67 +	struct vcpu *v = current;
   41.68  	struct sal_ret_values x;
   41.69  	unsigned long *tv, *tc;
   41.70  	int pi;
   41.71 @@ -94,23 +136,16 @@ ia64_hypercall (struct pt_regs *regs)
   41.72  			printf("(by dom0)\n ");
   41.73  			(*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
   41.74  		}
   41.75 -#ifdef DOMU_AUTO_RESTART
   41.76 -		else {
   41.77 -			reconstruct_domU(current);
   41.78 -			return 0;  // don't increment ip!
   41.79 -		}
   41.80 -#else	
   41.81  		printf("(not supported for non-0 domain)\n");
   41.82  		regs->r8 = EFI_UNSUPPORTED;
   41.83 -#endif
   41.84  		break;
   41.85  	    case FW_HYPERCALL_EFI_GET_TIME:
   41.86 -		tv = vcpu_get_gr(v,32);
   41.87 -		tc = vcpu_get_gr(v,33);
   41.88 +		tv = (unsigned long *) vcpu_get_gr(v,32);
   41.89 +		tc = (unsigned long *) vcpu_get_gr(v,33);
   41.90  		//printf("efi_get_time(%p,%p) called...",tv,tc);
   41.91 -		tv = __va(translate_domain_mpaddr(tv));
   41.92 -		if (tc) tc = __va(translate_domain_mpaddr(tc));
   41.93 -		regs->r8 = (*efi.get_time)(tv,tc);
   41.94 +		tv = (unsigned long *) __va(translate_domain_mpaddr((unsigned long) tv));
   41.95 +		if (tc) tc = (unsigned long *) __va(translate_domain_mpaddr((unsigned long) tc));
   41.96 +		regs->r8 = (*efi.get_time)((efi_time_t *) tv, (efi_time_cap_t *) tc);
   41.97  		//printf("and returns %lx\n",regs->r8);
   41.98  		break;
   41.99  	    case FW_HYPERCALL_EFI_SET_TIME:
  41.100 @@ -131,23 +166,23 @@ ia64_hypercall (struct pt_regs *regs)
  41.101  		break;
  41.102  	    case 0xffff:
  41.103  		regs->r8 = dump_privop_counts_to_user(
  41.104 -			vcpu_get_gr(v,32),
  41.105 -			vcpu_get_gr(v,33));
  41.106 +			(char *) vcpu_get_gr(v,32),
  41.107 +			(int) vcpu_get_gr(v,33));
  41.108  		break;
  41.109  	    case 0xfffe:
  41.110  		regs->r8 = zero_privop_counts_to_user(
  41.111 -			vcpu_get_gr(v,32),
  41.112 -			vcpu_get_gr(v,33));
  41.113 +			(char *) vcpu_get_gr(v,32),
  41.114 +			(int) vcpu_get_gr(v,33));
  41.115  		break;
  41.116  	    case __HYPERVISOR_dom0_op:
  41.117 -		regs->r8 = do_dom0_op(regs->r14);
  41.118 +		regs->r8 = do_dom0_op((struct dom0_op *) regs->r14);
  41.119  		break;
  41.120  
  41.121  	    case __HYPERVISOR_memory_op:
  41.122  		/* we don't handle reservations; just return success */
  41.123  		{
  41.124  		    struct xen_memory_reservation reservation;
  41.125 -		    void *arg = regs->r15;
  41.126 +		    void *arg = (void *) regs->r15;
  41.127  
  41.128  		    switch(regs->r14) {
  41.129  		    case XENMEM_increase_reservation:
  41.130 @@ -159,31 +194,35 @@ ia64_hypercall (struct pt_regs *regs)
  41.131  			    regs->r8 = reservation.nr_extents;
  41.132  			break;
  41.133  		    default:
  41.134 -			regs->r8 = do_memory_op(regs->r14, regs->r15);
  41.135 +			regs->r8 = do_memory_op((int) regs->r14, (void *)regs->r15);
  41.136  			break;
  41.137  		    }
  41.138  		}
  41.139  		break;
  41.140  
  41.141  	    case __HYPERVISOR_event_channel_op:
  41.142 -		regs->r8 = do_event_channel_op(regs->r14);
  41.143 +		regs->r8 = do_event_channel_op((struct evtchn_op *) regs->r14);
  41.144  		break;
  41.145  
  41.146  	    case __HYPERVISOR_grant_table_op:
  41.147 -		regs->r8 = do_grant_table_op(regs->r14, regs->r15, regs->r16);
  41.148 +		regs->r8 = do_grant_table_op((unsigned int) regs->r14, (void *) regs->r15, (unsigned int) regs->r16);
  41.149  		break;
  41.150  
  41.151  	    case __HYPERVISOR_console_io:
  41.152 -		regs->r8 = do_console_io(regs->r14, regs->r15, regs->r16);
  41.153 +		regs->r8 = do_console_io((int) regs->r14, (int) regs->r15, (char *) regs->r16);
  41.154  		break;
  41.155  
  41.156  	    case __HYPERVISOR_xen_version:
  41.157 -		regs->r8 = do_xen_version(regs->r14, regs->r15);
  41.158 +		regs->r8 = do_xen_version((int) regs->r14, (void *) regs->r15);
  41.159 +		break;
  41.160 +
  41.161 +	    case __HYPERVISOR_multicall:
  41.162 +		regs->r8 = do_multicall((struct multicall_entry *) regs->r14, (unsigned int) regs->r15);
  41.163  		break;
  41.164  
  41.165  	    default:
  41.166 -		printf("unknown hypercall %x\n", regs->r2);
  41.167 -		regs->r8 = (unsigned long)-1;
  41.168 +		printf("unknown hypercall %lx\n", regs->r2);
  41.169 +		regs->r8 = do_ni_hypercall();
  41.170  	}
  41.171  	return 1;
  41.172  }
    42.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Thu Mar 02 10:59:34 2006 +0100
    42.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Thu Mar 02 11:00:49 2006 +0100
    42.3 @@ -1336,7 +1336,7 @@ ENTRY(hyper_ssm_dt)
    42.4  	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
    42.5  	ld8 r22=[r22];;
    42.6  	adds r22=IA64_VCPU_META_SAVED_RR0_OFFSET,r22;;
    42.7 -	ld4 r23=[r22];;
    42.8 +	ld8 r23=[r22];;
    42.9  	mov rr[r0]=r23;;
   42.10  	srlz.i;;
   42.11  	st4 [r20]=r0 ;;
   42.12 @@ -1372,7 +1372,7 @@ ENTRY(hyper_rsm_dt)
   42.13  	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
   42.14  	ld8 r22=[r22];;
   42.15  	adds r22=IA64_VCPU_META_RR0_OFFSET,r22;;
   42.16 -	ld4 r23=[r22];;
   42.17 +	ld8 r23=[r22];;
   42.18  	mov rr[r0]=r23;;
   42.19  	srlz.i;;
   42.20  	adds r21=1,r0 ;;
   42.21 @@ -1733,7 +1733,7 @@ ENTRY(hyper_set_rr)
   42.22  	dep r23=r28,r23,16,8;;
   42.23  	dep r23=r29,r23,8,8
   42.24  	cmp.eq p6,p0=r25,r0;;	// if rr0, save for metaphysical
   42.25 -(p6)	st4 [r24]=r23
   42.26 +(p6)	st8 [r24]=r23
   42.27  	mov rr[r8]=r23;;
   42.28  	// done, mosey on back
   42.29  1:	mov r24=cr.ipsr
    43.1 --- a/xen/arch/ia64/xen/irq.c	Thu Mar 02 10:59:34 2006 +0100
    43.2 +++ b/xen/arch/ia64/xen/irq.c	Thu Mar 02 11:00:49 2006 +0100
    43.3 @@ -129,7 +129,9 @@ unsigned int __ia64_local_vector_to_irq 
    43.4  }
    43.5  #endif
    43.6  
    43.7 +#ifndef XEN
    43.8  static void register_irq_proc (unsigned int irq);
    43.9 +#endif
   43.10  
   43.11  /*
   43.12   * Special irq handlers.
   43.13 @@ -286,7 +288,9 @@ EXPORT_SYMBOL(synchronize_irq);
   43.14  int handle_IRQ_event(unsigned int irq,
   43.15  		struct pt_regs *regs, struct irqaction *action)
   43.16  {
   43.17 +#ifndef XEN
   43.18  	int status = 1;	/* Force the "do bottom halves" bit */
   43.19 +#endif
   43.20  	int retval = 0;
   43.21  
   43.22  #ifndef XEN
   43.23 @@ -657,8 +661,10 @@ int request_irq(unsigned int irq,
   43.24  	if (!action)
   43.25  		return -ENOMEM;
   43.26  
   43.27 +#ifdef XEN
   43.28 +	action->handler = (void *) handler;
   43.29 +#else
   43.30  	action->handler = handler;
   43.31 -#ifndef XEN
   43.32  	action->flags = irqflags;
   43.33  	action->mask = 0;
   43.34  #endif
   43.35 @@ -698,7 +704,9 @@ void free_irq(unsigned int irq, void *de
   43.36  #endif
   43.37  {
   43.38  	irq_desc_t *desc;
   43.39 +#ifndef XEN
   43.40  	struct irqaction **p;
   43.41 +#endif
   43.42  	unsigned long flags;
   43.43  
   43.44  	if (irq >= NR_IRQS)
   43.45 @@ -755,7 +763,8 @@ EXPORT_SYMBOL(free_irq);
   43.46   * disabled.
   43.47   */
   43.48  
   43.49 -static DECLARE_MUTEX(probe_sem);
   43.50 +#ifndef XEN
   43.51 +static int DECLARE_MUTEX(probe_sem);
   43.52  
   43.53  /**
   43.54   *	probe_irq_on	- begin an interrupt autodetect
   43.55 @@ -765,7 +774,6 @@ static DECLARE_MUTEX(probe_sem);
   43.56   *
   43.57   */
   43.58  
   43.59 -#ifndef XEN
   43.60  unsigned long probe_irq_on(void)
   43.61  {
   43.62  	unsigned int i;
   43.63 @@ -936,7 +944,9 @@ EXPORT_SYMBOL(probe_irq_off);
   43.64  
   43.65  int setup_irq(unsigned int irq, struct irqaction * new)
   43.66  {
   43.67 +#ifndef XEN
   43.68  	int shared = 0;
   43.69 +#endif
   43.70  	unsigned long flags;
   43.71  	struct irqaction *old, **p;
   43.72  	irq_desc_t *desc = irq_descp(irq);
   43.73 @@ -1371,7 +1381,7 @@ int pirq_guest_unmask(struct domain *d)
   43.74      return 0;
   43.75  }
   43.76  
   43.77 -int pirq_guest_bind(struct vcpu *d, int irq, int will_share)
   43.78 +int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
   43.79  {
   43.80      irq_desc_t         *desc = &irq_desc[irq];
   43.81      irq_guest_action_t *action;
   43.82 @@ -1431,7 +1441,7 @@ int pirq_guest_bind(struct vcpu *d, int 
   43.83          goto out;
   43.84      }
   43.85  
   43.86 -    action->guest[action->nr_guests++] = d;
   43.87 +    action->guest[action->nr_guests++] = v->domain;
   43.88  
   43.89   out:
   43.90      spin_unlock_irqrestore(&desc->lock, flags);
   43.91 @@ -1480,9 +1490,11 @@ int pirq_guest_unbind(struct domain *d, 
   43.92  #ifdef XEN
   43.93  #ifdef IA64
   43.94  // this is a temporary hack until real console input is implemented
   43.95 +extern void domain_pend_keyboard_interrupt(int irq);
   43.96  irqreturn_t guest_forward_keyboard_input(int irq, void *nada, struct pt_regs *regs)
   43.97  {
   43.98  	domain_pend_keyboard_interrupt(irq);
   43.99 +	return 0;
  43.100  }
  43.101  
  43.102  void serial_input_init(void)
    44.1 --- a/xen/arch/ia64/xen/ivt.S	Thu Mar 02 10:59:34 2006 +0100
    44.2 +++ b/xen/arch/ia64/xen/ivt.S	Thu Mar 02 11:00:49 2006 +0100
    44.3 @@ -298,12 +298,83 @@ ENTRY(dtlb_miss)
    44.4  	DBG_FAULT(2)
    44.5  #ifdef XEN
    44.6  	VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
    44.7 +#if VHPT_ENABLED
    44.8 +	// XXX TODO optimization
    44.9 +	mov r31=pr				// save predicates
   44.10 +	mov r30=cr.ipsr
   44.11 +	mov r28=cr.iip			
   44.12 +	mov r16=cr.ifa				// get virtual address
   44.13 +	mov r17=cr.isr				// save predicates
   44.14 +	;;
   44.15 +
   44.16 +	extr.u r18 = r30, IA64_PSR_CPL0_BIT, 2	// extract psr.cpl
   44.17 +	;; 
   44.18 +	cmp.ne p6, p0 = r0, r18			// cpl == 0?
   44.19 +(p6)	br.cond.sptk 2f
   44.20 +
   44.21 +	// is speculation bit on?
   44.22 +	tbit.nz p7,p0=r17,IA64_ISR_SP_BIT	
   44.23 +	;; 
   44.24 +(p7)	br.cond.spnt 2f
   44.25 +
   44.26 +	// is non-access bit on?
   44.27 +	tbit.nz p8,p0=r17,IA64_ISR_NA_BIT	
   44.28 +	;;
   44.29 +(p8)	br.cond.spnt 2f
   44.30 +
   44.31 +	// cr.isr.code == IA64_ISR_CODE_LFETCH?
   44.32 +	and r18=IA64_ISR_CODE_MASK,r17		// get the isr.code field
   44.33 +	;; 
   44.34 +	cmp.eq p9,p0=IA64_ISR_CODE_LFETCH,r18	// check isr.code field
   44.35 +(p9)	br.cond.spnt 2f
   44.36 +
   44.37 +	// Is the faulted iip in vmm area?
   44.38 +	// check [59:58] bit
   44.39 +	// 00, 11: guest
   44.40 +	// 01, 10: vmm
   44.41 +	extr.u r19 = r28, 58, 2
   44.42 +	;; 
   44.43 +	cmp.eq p10, p0 = 0x0, r19
   44.44 +(p10)	br.cond.sptk 2f
   44.45 +	cmp.eq p11, p0 = 0x3, r19
   44.46 +(p11)	br.cond.sptk 2f
   44.47 +
   44.48 +	// Is the faulted address is in the identity mapping area?
   44.49 +	// 0xf000... or 0xe8000...
   44.50 +	extr.u r20 = r16, 59, 5
   44.51 +	;; 
   44.52 +	cmp.eq p12, p0 = 0x1e, r20 // (0xf0 >> 3) = 0x1e
   44.53 +(p12)	br.cond.spnt 1f
   44.54 +	cmp.eq p0, p13 = 0x1d, r20 // (0xe8 >> 3) = 0x1d
   44.55 +(p13)	br.cond.sptk 2f
   44.56 +
   44.57 +1:
   44.58 +	// xen identity mappin area.
   44.59 +	movl r24=PAGE_KERNEL
   44.60 +	movl r25=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
   44.61 +	;;
   44.62 +	shr.u r26=r16,55	// move address bit 59 to bit 4
   44.63 +	and r25=r25,r16		// clear ed, reserved bits, and PTE control bits
   44.64 +	;;
   44.65 +	and r26=0x10,r26	// bit 4=address-bit(59)
   44.66 +	;; 
   44.67 +	or r25=r25,r24		// insert PTE control bits into r25
   44.68 +	;;
   44.69 +	or r25=r25,r26		// set bit 4 (uncached) if the access was to region 6
   44.70 +	;;
   44.71 +	itc.d r25		// insert the TLB entry
   44.72 +	mov pr=r31,-1
   44.73 +	rfi
   44.74 +
   44.75 +2:
   44.76 +#endif	
   44.77  #ifdef VHPT_GLOBAL
   44.78  //	br.cond.sptk page_fault
   44.79  	br.cond.sptk fast_tlb_miss_reflect
   44.80  	;;
   44.81  #endif
   44.82 -#endif
   44.83 +	mov r29=b0				// save b0
   44.84 +#else	
   44.85  	/*
   44.86  	 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
   44.87  	 * page table.  If a nested TLB miss occurs, we switch into physical
   44.88 @@ -313,6 +384,7 @@ ENTRY(dtlb_miss)
   44.89  	mov r16=cr.ifa				// get virtual address
   44.90  	mov r29=b0				// save b0
   44.91  	mov r31=pr				// save predicates
   44.92 +#endif
   44.93  dtlb_fault:
   44.94  	mov r17=cr.iha				// get virtual address of L3 PTE
   44.95  	movl r30=1f				// load nested fault continuation point
   44.96 @@ -399,6 +471,9 @@ late_alt_itlb_miss:
   44.97  	;;
   44.98  	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
   44.99  (p8)	br.cond.spnt page_fault
  44.100 +#ifdef XEN
  44.101 +	FORCE_CRASH
  44.102 +#endif	
  44.103  	;;
  44.104  	itc.i r19		// insert the TLB entry
  44.105  	mov pr=r31,-1
    45.1 --- a/xen/arch/ia64/xen/mm_init.c	Thu Mar 02 10:59:34 2006 +0100
    45.2 +++ b/xen/arch/ia64/xen/mm_init.c	Thu Mar 02 11:00:49 2006 +0100
    45.3 @@ -47,6 +47,7 @@
    45.4  #include <asm/uaccess.h>
    45.5  #include <asm/unistd.h>
    45.6  #include <asm/mca.h>
    45.7 +#include <asm/vhpt.h>
    45.8  
    45.9  #ifndef XEN
   45.10  DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
   45.11 @@ -63,7 +64,7 @@ struct page *vmem_map;
   45.12  EXPORT_SYMBOL(vmem_map);
   45.13  #endif
   45.14  
   45.15 -static int pgt_cache_water[2] = { 25, 50 };
   45.16 +// static int pgt_cache_water[2] = { 25, 50 };
   45.17  
   45.18  struct page *zero_page_memmap_ptr;		/* map entry for zero page */
   45.19  EXPORT_SYMBOL(zero_page_memmap_ptr);
   45.20 @@ -222,7 +223,7 @@ inline void
   45.21  ia64_set_rbs_bot (void)
   45.22  {
   45.23  #ifdef XEN
   45.24 -	unsigned stack_size = MAX_USER_STACK_SIZE;
   45.25 +	unsigned long stack_size = MAX_USER_STACK_SIZE;
   45.26  #else
   45.27  	unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
   45.28  #endif
   45.29 @@ -279,7 +280,7 @@ printf("ia64_init_addr_space: called, no
   45.30  #endif
   45.31  }
   45.32  
   45.33 -setup_gate (void)
   45.34 +void setup_gate (void)
   45.35  {
   45.36  	printk("setup_gate not-implemented.\n");
   45.37  }
   45.38 @@ -287,7 +288,10 @@ setup_gate (void)
   45.39  void __devinit
   45.40  ia64_mmu_init (void *my_cpu_data)
   45.41  {
   45.42 -	unsigned long psr, pta, impl_va_bits;
   45.43 +	unsigned long psr, impl_va_bits;
   45.44 +#if 0
   45.45 +	unsigned long pta;
   45.46 +#endif
   45.47  	extern void __devinit tlb_init (void);
   45.48  	int cpu;
   45.49  
    46.1 --- a/xen/arch/ia64/xen/pcdp.c	Thu Mar 02 10:59:34 2006 +0100
    46.2 +++ b/xen/arch/ia64/xen/pcdp.c	Thu Mar 02 11:00:49 2006 +0100
    46.3 @@ -71,7 +71,9 @@ efi_setup_pcdp_console(char *cmdline)
    46.4  {
    46.5  	struct pcdp *pcdp;
    46.6  	struct pcdp_uart *uart;
    46.7 +#ifndef XEN
    46.8  	struct pcdp_device *dev, *end;
    46.9 +#endif
   46.10  	int i, serial = 0;
   46.11  
   46.12  	pcdp = efi.hcdp;
    47.1 --- a/xen/arch/ia64/xen/privop.c	Thu Mar 02 10:59:34 2006 +0100
    47.2 +++ b/xen/arch/ia64/xen/privop.c	Thu Mar 02 11:00:49 2006 +0100
    47.3 @@ -11,8 +11,13 @@
    47.4  #include <asm/processor.h>
    47.5  #include <asm/delay.h>	// Debug only
    47.6  #include <asm/dom_fw.h>
    47.7 +#include <asm/vhpt.h>
    47.8  //#include <debug.h>
    47.9  
   47.10 +/* FIXME: where these declarations should be there ? */
   47.11 +extern int dump_reflect_counts(char *);
   47.12 +extern void zero_reflect_counts(void);
   47.13 +
   47.14  long priv_verbose=0;
   47.15  
   47.16  /**************************************************************************
   47.17 @@ -524,7 +529,7 @@ IA64FAULT priv_mov_from_psr(VCPU *vcpu, 
   47.18  Privileged operation decode and dispatch routines
   47.19  **************************************************************************/
   47.20  
   47.21 -IA64_SLOT_TYPE slot_types[0x20][3] = {
   47.22 +static const IA64_SLOT_TYPE slot_types[0x20][3] = {
   47.23  	{M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
   47.24  	{M, I, ILLEGAL}, {M, I, ILLEGAL},
   47.25  	{ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
   47.26 @@ -544,7 +549,7 @@ IA64_SLOT_TYPE slot_types[0x20][3] = {
   47.27  // pointer to privileged emulation function
   47.28  typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
   47.29  
   47.30 -PPEFCN Mpriv_funcs[64] = {
   47.31 +static const PPEFCN Mpriv_funcs[64] = {
   47.32    priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
   47.33    priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
   47.34    0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
   47.35 @@ -600,7 +605,7 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
   47.36  	if (__copy_from_user(&bundle,iip,sizeof(bundle)))
   47.37  #endif
   47.38  	{
   47.39 -//printf("*** priv_handle_op: privop bundle @%p not mapped, retrying\n",iip);
   47.40 +//printf("*** priv_handle_op: privop bundle at 0x%lx not mapped, retrying\n",iip);
   47.41  		return vcpu_force_data_miss(vcpu,regs->cr_iip);
   47.42  	}
   47.43  #if 0
   47.44 @@ -613,8 +618,8 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
   47.45  #endif
   47.46  	if (privop_trace) {
   47.47  		static long i = 400;
   47.48 -		//if (i > 0) printf("privop @%p\n",iip);
   47.49 -		if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
   47.50 +		//if (i > 0) printf("priv_handle_op: at 0x%lx\n",iip);
   47.51 +		if (i > 0) printf("priv_handle_op: privop trace at 0x%lx, itc=%lx, itm=%lx\n",
   47.52  			iip,ia64_get_itc(),ia64_get_itm());
   47.53  		i--;
   47.54  	}
   47.55 @@ -727,7 +732,7 @@ priv_handle_op(VCPU *vcpu, REGS *regs, i
   47.56  		break;
   47.57  	}
   47.58          //printf("We who are about do die salute you\n");
   47.59 -	printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d), ipsr=%p\n",
   47.60 +	printf("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d), ipsr=0x%lx\n",
   47.61  		 iip, (UINT64)inst.inst, slot, slot_type, ipsr);
   47.62          //printf("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip));
   47.63          //thread_mozambique("privop fault\n");
   47.64 @@ -768,7 +773,7 @@ priv_emulate(VCPU *vcpu, REGS *regs, UIN
   47.65  		(void)vcpu_increment_iip(vcpu);
   47.66  	}
   47.67  	if (fault == IA64_ILLOP_FAULT)
   47.68 -		printf("priv_emulate: priv_handle_op fails, isr=%p\n",isr);
   47.69 +		printf("priv_emulate: priv_handle_op fails, isr=0x%lx\n",isr);
   47.70  	return fault;
   47.71  }
   47.72  
   47.73 @@ -794,11 +799,10 @@ priv_emulate(VCPU *vcpu, REGS *regs, UIN
   47.74  #define HYPERPRIVOP_SET_KR		0x12
   47.75  #define HYPERPRIVOP_MAX			0x12
   47.76  
   47.77 -char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
   47.78 +static const char * const hyperpriv_str[HYPERPRIVOP_MAX+1] = {
   47.79  	0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
   47.80  	"=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
   47.81 -	"=rr", "rr=", "kr=",
   47.82 -	0
   47.83 +	"=rr", "rr=", "kr="
   47.84  };
   47.85  
   47.86  unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
   47.87 @@ -809,15 +813,14 @@ unsigned long fast_hyperpriv_cnt[HYPERPR
   47.88  int
   47.89  ia64_hyperprivop(unsigned long iim, REGS *regs)
   47.90  {
   47.91 -	struct vcpu *v = (struct domain *) current;
   47.92 -	INST64 inst;
   47.93 +	struct vcpu *v = current;
   47.94  	UINT64 val;
   47.95  	UINT64 itir, ifa;
   47.96  
   47.97  // FIXME: Handle faults appropriately for these
   47.98  	if (!iim || iim > HYPERPRIVOP_MAX) {
   47.99  		printf("bad hyperprivop; ignored\n");
  47.100 -		printf("iim=%d, iip=%p\n",iim,regs->cr_iip);
  47.101 +		printf("iim=%lx, iip=0x%lx\n", iim, regs->cr_iip);
  47.102  		return 1;
  47.103  	}
  47.104  	slow_hyperpriv_cnt[iim]++;
  47.105 @@ -895,7 +898,7 @@ ia64_hyperprivop(unsigned long iim, REGS
  47.106  Privileged operation instrumentation routines
  47.107  **************************************************************************/
  47.108  
  47.109 -char *Mpriv_str[64] = {
  47.110 +static const char * const Mpriv_str[64] = {
  47.111    "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
  47.112    "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
  47.113    "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
  47.114 @@ -915,7 +918,7 @@ char *Mpriv_str[64] = {
  47.115  };
  47.116  
  47.117  #define RS "Rsvd"
  47.118 -char *cr_str[128] = {
  47.119 +static const char * const cr_str[128] = {
  47.120    "dcr","itm","iva",RS,RS,RS,RS,RS,
  47.121    "pta",RS,RS,RS,RS,RS,RS,RS,
  47.122    "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
  47.123 @@ -946,48 +949,48 @@ int dump_privop_counts(char *buf)
  47.124  	for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
  47.125  	s += sprintf(s,"Privop statistics: (Total privops: %ld)\n",sum);
  47.126  	if (privcnt.mov_to_ar_imm)
  47.127 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.mov_to_ar_imm,
  47.128 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_to_ar_imm,
  47.129  			"mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
  47.130  	if (privcnt.mov_to_ar_reg)
  47.131 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.mov_to_ar_reg,
  47.132 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_to_ar_reg,
  47.133  			"mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
  47.134  	if (privcnt.mov_from_ar)
  47.135 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.mov_from_ar,
  47.136 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.mov_from_ar,
  47.137  			"privified-mov_from_ar", (privcnt.mov_from_ar*100L)/sum);
  47.138  	if (privcnt.ssm)
  47.139 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.ssm,
  47.140 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.ssm,
  47.141  			"ssm", (privcnt.ssm*100L)/sum);
  47.142  	if (privcnt.rsm)
  47.143 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.rsm,
  47.144 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.rsm,
  47.145  			"rsm", (privcnt.rsm*100L)/sum);
  47.146  	if (privcnt.rfi)
  47.147 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.rfi,
  47.148 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.rfi,
  47.149  			"rfi", (privcnt.rfi*100L)/sum);
  47.150  	if (privcnt.bsw0)
  47.151 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.bsw0,
  47.152 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.bsw0,
  47.153  			"bsw0", (privcnt.bsw0*100L)/sum);
  47.154  	if (privcnt.bsw1)
  47.155 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.bsw1,
  47.156 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.bsw1,
  47.157  			"bsw1", (privcnt.bsw1*100L)/sum);
  47.158  	if (privcnt.cover)
  47.159 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.cover,
  47.160 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.cover,
  47.161  			"cover", (privcnt.cover*100L)/sum);
  47.162  	if (privcnt.fc)
  47.163 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.fc,
  47.164 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.fc,
  47.165  			"privified-fc", (privcnt.fc*100L)/sum);
  47.166  	if (privcnt.cpuid)
  47.167 -		s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.cpuid,
  47.168 +		s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.cpuid,
  47.169  			"privified-getcpuid", (privcnt.cpuid*100L)/sum);
  47.170  	for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
  47.171  		if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\n");
  47.172 -		else s += sprintf(s,"%10d  %s [%d%%]\n", privcnt.Mpriv_cnt[i],
  47.173 +		else s += sprintf(s,"%10ld  %s [%ld%%]\n", privcnt.Mpriv_cnt[i],
  47.174  			Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
  47.175  		if (i == 0x24) { // mov from CR
  47.176  			s += sprintf(s,"            [");
  47.177  			for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
  47.178  				if (!cr_str[j])
  47.179  					s += sprintf(s,"PRIVSTRING NULL!!\n");
  47.180 -				s += sprintf(s,"%s(%d),",cr_str[j],from_cr_cnt[j]);
  47.181 +				s += sprintf(s,"%s(%ld),",cr_str[j],from_cr_cnt[j]);
  47.182  			}
  47.183  			s += sprintf(s,"]\n");
  47.184  		}
  47.185 @@ -996,7 +999,7 @@ int dump_privop_counts(char *buf)
  47.186  			for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
  47.187  				if (!cr_str[j])
  47.188  					s += sprintf(s,"PRIVSTRING NULL!!\n");
  47.189 -				s += sprintf(s,"%s(%d),",cr_str[j],to_cr_cnt[j]);
  47.190 +				s += sprintf(s,"%s(%ld),",cr_str[j],to_cr_cnt[j]);
  47.191  			}
  47.192  			s += sprintf(s,"]\n");
  47.193  		}
  47.194 @@ -1050,7 +1053,7 @@ int dump_privop_addrs(char *buf)
  47.195  		s += sprintf(s,"%s:\n",v->instname);
  47.196  		for (j = 0; j < PRIVOP_COUNT_NADDRS; j++) {
  47.197  			if (!v->addr[j]) break;
  47.198 -			s += sprintf(s," @%p #%ld\n",v->addr[j],v->count[j]);
  47.199 +			s += sprintf(s," at 0x%lx #%ld\n",v->addr[j],v->count[j]);
  47.200  		}
  47.201  		if (v->overflow) 
  47.202  			s += sprintf(s," other #%ld\n",v->overflow);
  47.203 @@ -1085,17 +1088,17 @@ extern unsigned long context_switch_coun
  47.204  int dump_misc_stats(char *buf)
  47.205  {
  47.206  	char *s = buf;
  47.207 -	s += sprintf(s,"Virtual TR translations: %d\n",tr_translate_count);
  47.208 -	s += sprintf(s,"Virtual VHPT slow translations: %d\n",vhpt_translate_count);
  47.209 -	s += sprintf(s,"Virtual VHPT fast translations: %d\n",fast_vhpt_translate_count);
  47.210 -	s += sprintf(s,"Virtual DTLB translations: %d\n",dtlb_translate_count);
  47.211 -	s += sprintf(s,"Physical translations: %d\n",phys_translate_count);
  47.212 -	s += sprintf(s,"Recoveries to page fault: %d\n",recover_to_page_fault_count);
  47.213 -	s += sprintf(s,"Recoveries to break fault: %d\n",recover_to_break_fault_count);
  47.214 -	s += sprintf(s,"Idle when pending: %d\n",idle_when_pending);
  47.215 -	s += sprintf(s,"PAL_HALT_LIGHT (no pending): %d\n",pal_halt_light_count);
  47.216 -	s += sprintf(s,"context switches: %d\n",context_switch_count);
  47.217 -	s += sprintf(s,"Lazy covers: %d\n",lazy_cover_count);
  47.218 +	s += sprintf(s,"Virtual TR translations: %ld\n",tr_translate_count);
  47.219 +	s += sprintf(s,"Virtual VHPT slow translations: %ld\n",vhpt_translate_count);
  47.220 +	s += sprintf(s,"Virtual VHPT fast translations: %ld\n",fast_vhpt_translate_count);
  47.221 +	s += sprintf(s,"Virtual DTLB translations: %ld\n",dtlb_translate_count);
  47.222 +	s += sprintf(s,"Physical translations: %ld\n",phys_translate_count);
  47.223 +	s += sprintf(s,"Recoveries to page fault: %ld\n",recover_to_page_fault_count);
  47.224 +	s += sprintf(s,"Recoveries to break fault: %ld\n",recover_to_break_fault_count);
  47.225 +	s += sprintf(s,"Idle when pending: %ld\n",idle_when_pending);
  47.226 +	s += sprintf(s,"PAL_HALT_LIGHT (no pending): %ld\n",pal_halt_light_count);
  47.227 +	s += sprintf(s,"context switches: %ld\n",context_switch_count);
  47.228 +	s += sprintf(s,"Lazy covers: %ld\n",lazy_cover_count);
  47.229  	return s - buf;
  47.230  }
  47.231  
  47.232 @@ -1120,17 +1123,17 @@ int dump_hyperprivop_counts(char *buf)
  47.233  	char *s = buf;
  47.234  	unsigned long total = 0;
  47.235  	for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += slow_hyperpriv_cnt[i];
  47.236 -	s += sprintf(s,"Slow hyperprivops (total %d):\n",total);
  47.237 +	s += sprintf(s,"Slow hyperprivops (total %ld):\n",total);
  47.238  	for (i = 1; i <= HYPERPRIVOP_MAX; i++)
  47.239  		if (slow_hyperpriv_cnt[i])
  47.240 -			s += sprintf(s,"%10d %s\n",
  47.241 +			s += sprintf(s,"%10ld %s\n",
  47.242  				slow_hyperpriv_cnt[i], hyperpriv_str[i]);
  47.243  	total = 0;
  47.244  	for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += fast_hyperpriv_cnt[i];
  47.245 -	s += sprintf(s,"Fast hyperprivops (total %d):\n",total);
  47.246 +	s += sprintf(s,"Fast hyperprivops (total %ld):\n",total);
  47.247  	for (i = 1; i <= HYPERPRIVOP_MAX; i++)
  47.248  		if (fast_hyperpriv_cnt[i])
  47.249 -			s += sprintf(s,"%10d %s\n",
  47.250 +			s += sprintf(s,"%10ld %s\n",
  47.251  				fast_hyperpriv_cnt[i], hyperpriv_str[i]);
  47.252  	return s - buf;
  47.253  }
    48.1 --- a/xen/arch/ia64/xen/process.c	Thu Mar 02 10:59:34 2006 +0100
    48.2 +++ b/xen/arch/ia64/xen/process.c	Thu Mar 02 11:00:49 2006 +0100
    48.3 @@ -33,8 +33,14 @@
    48.4  #include <xen/multicall.h>
    48.5  #include <asm/debugger.h>
    48.6  
    48.7 -extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
    48.8  extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
    48.9 +/* FIXME: where these declarations shold be there ? */
   48.10 +extern void load_region_regs(struct vcpu *);
   48.11 +extern void panic_domain(struct pt_regs *, const char *, ...);
   48.12 +extern long platform_is_hp_ski(void);
   48.13 +extern int ia64_hyperprivop(unsigned long, REGS *);
   48.14 +extern int ia64_hypercall(struct pt_regs *regs);
   48.15 +extern void vmx_do_launch(struct vcpu *);
   48.16  
   48.17  extern unsigned long dom0_start, dom0_size;
   48.18  
   48.19 @@ -94,18 +100,21 @@ unsigned long translate_domain_pte(unsig
   48.20  	extern unsigned long dom0_start, dom0_size;
   48.21  
   48.22  	// FIXME address had better be pre-validated on insert
   48.23 -	mask = (1L << ((itir >> 2) & 0x3f)) - 1;
   48.24 +	mask = ~itir_mask(itir);
   48.25  	mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
   48.26  	if (d == dom0) {
   48.27  		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   48.28 -			//printk("translate_domain_pte: out-of-bounds dom0 mpaddr %p! itc=%lx...\n",mpaddr,ia64_get_itc());
   48.29 +			/*
   48.30 +			printk("translate_domain_pte: out-of-bounds dom0 mpaddr 0x%lx! itc=%lx...\n",
   48.31 +				mpaddr, ia64_get_itc());
   48.32 +			*/
   48.33  			tdpfoo();
   48.34  		}
   48.35  	}
   48.36  	else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
   48.37  		if ((mpaddr & ~0x1fffL ) != (1L << 40))
   48.38 -		printf("translate_domain_pte: bad mpa=%p (> %p),vadr=%p,pteval=%p,itir=%p\n",
   48.39 -			mpaddr,d->max_pages<<PAGE_SHIFT,address,pteval,itir);
   48.40 +		printf("translate_domain_pte: bad mpa=0x%lx (> 0x%lx),vadr=0x%lx,pteval=0x%lx,itir=0x%lx\n",
   48.41 +			mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT, address, pteval, itir);
   48.42  		tdpfoo();
   48.43  	}
   48.44  	pteval2 = lookup_domain_mpa(d,mpaddr);
   48.45 @@ -123,7 +132,8 @@ unsigned long translate_domain_mpaddr(un
   48.46  
   48.47  	if (current->domain == dom0) {
   48.48  		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   48.49 -			printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
   48.50 +			printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr 0x%lx! continuing...\n",
   48.51 +				mpaddr);
   48.52  			tdpfoo();
   48.53  		}
   48.54  	}
   48.55 @@ -150,7 +160,7 @@ int dump_reflect_counts(char *buf)
   48.56  
   48.57  	s += sprintf(s,"Slow reflections by vector:\n");
   48.58  	for (i = 0, j = 0; i < 0x80; i++) {
   48.59 -		if (cnt = slow_reflect_count[i]) {
   48.60 +		if ( (cnt = slow_reflect_count[i]) != 0 ) {
   48.61  			s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
   48.62  			if ((j++ & 3) == 3) s += sprintf(s,"\n");
   48.63  		}
   48.64 @@ -158,7 +168,7 @@ int dump_reflect_counts(char *buf)
   48.65  	if (j & 3) s += sprintf(s,"\n");
   48.66  	s += sprintf(s,"Fast reflections by vector:\n");
   48.67  	for (i = 0, j = 0; i < 0x80; i++) {
   48.68 -		if (cnt = fast_reflect_count[i]) {
   48.69 +		if ( (cnt = fast_reflect_count[i]) != 0 ) {
   48.70  			s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
   48.71  			if ((j++ & 3) == 3) s += sprintf(s,"\n");
   48.72  		}
   48.73 @@ -186,7 +196,6 @@ panic_domain(regs,"psr.ic off, deliverin
   48.74  
   48.75  void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector)
   48.76  {
   48.77 -	unsigned long vcpu_get_ipsr_int_state(struct vcpu *,unsigned long);
   48.78  	struct vcpu *v = current;
   48.79  
   48.80  	if (!PSCB(v,interrupt_collection_enabled))
   48.81 @@ -205,7 +214,7 @@ void reflect_interruption(unsigned long 
   48.82  #ifdef CONFIG_SMP
   48.83  #warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
   48.84  #endif
   48.85 -	regs->r31 = &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
   48.86 +	regs->r31 = (unsigned long) &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
   48.87  
   48.88  	PSCB(v,interrupt_delivery_enabled) = 0;
   48.89  	PSCB(v,interrupt_collection_enabled) = 0;
   48.90 @@ -219,14 +228,12 @@ unsigned long pending_false_positive = 0
   48.91  
   48.92  void reflect_extint(struct pt_regs *regs)
   48.93  {
   48.94 -	extern unsigned long vcpu_verbose, privop_trace;
   48.95  	unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
   48.96  	struct vcpu *v = current;
   48.97 -	static first_extint = 1;
   48.98 +	static int first_extint = 1;
   48.99  
  48.100  	if (first_extint) {
  48.101 -		printf("Delivering first extint to domain: isr=%p, iip=%p\n",isr,regs->cr_iip);
  48.102 -		//privop_trace = 1; vcpu_verbose = 1;
  48.103 +		printf("Delivering first extint to domain: isr=0x%lx, iip=0x%lx\n", isr, regs->cr_iip);
  48.104  		first_extint = 0;
  48.105  	}
  48.106  	if (vcpu_timer_pending_early(v))
  48.107 @@ -297,11 +304,11 @@ void ia64_do_page_fault (unsigned long a
  48.108  			// should never happen.  If it does, region 0 addr may
  48.109  			// indicate a bad xen pointer
  48.110  			printk("*** xen_handle_domain_access: exception table"
  48.111 -			       " lookup failed, iip=%p, addr=%p, spinning...\n",
  48.112 -				iip,address);
  48.113 +			       " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
  48.114 +				iip, address);
  48.115  			panic_domain(regs,"*** xen_handle_domain_access: exception table"
  48.116 -			       " lookup failed, iip=%p, addr=%p, spinning...\n",
  48.117 -				iip,address);
  48.118 +			       " lookup failed, iip=0x%lx, addr=0x%lx, spinning...\n",
  48.119 +				iip, address);
  48.120  		}
  48.121  		return;
  48.122  	}
  48.123 @@ -329,10 +336,9 @@ ia64_fault (unsigned long vector, unsign
  48.124  	    unsigned long arg6, unsigned long arg7, unsigned long stack)
  48.125  {
  48.126  	struct pt_regs *regs = (struct pt_regs *) &stack;
  48.127 -	unsigned long code, error = isr;
  48.128 +	unsigned long code;
  48.129  	char buf[128];
  48.130 -	int result, sig;
  48.131 -	static const char *reason[] = {
  48.132 +	static const char * const reason[] = {
  48.133  		"IA-64 Illegal Operation fault",
  48.134  		"IA-64 Privileged Operation fault",
  48.135  		"IA-64 Privileged Register fault",
  48.136 @@ -543,7 +549,6 @@ do_ssc(unsigned long ssc, struct pt_regs
  48.137  /**/	static int last_fd, last_count;	// FIXME FIXME FIXME
  48.138  /**/					// BROKEN FOR MULTIPLE DOMAINS & SMP
  48.139  /**/	struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
  48.140 -	extern unsigned long vcpu_verbose, privop_trace;
  48.141  
  48.142  	arg0 = vcpu_get_gr(current,32);
  48.143  	switch(ssc) {
  48.144 @@ -588,11 +593,11 @@ if (!running_on_sim) { printf("SSC_OPEN,
  48.145  		arg3 = vcpu_get_gr(current,35);
  48.146  		if (arg2) {	// metaphysical address of descriptor
  48.147  			struct ssc_disk_req *req;
  48.148 -			unsigned long mpaddr, paddr;
  48.149 +			unsigned long mpaddr;
  48.150  			long len;
  48.151  
  48.152  			arg2 = translate_domain_mpaddr(arg2);
  48.153 -			req = (struct disk_req *)__va(arg2);
  48.154 +			req = (struct ssc_disk_req *) __va(arg2);
  48.155  			req->len &= 0xffffffffL;	// avoid strange bug
  48.156  			len = req->len;
  48.157  /**/			last_fd = arg1;
  48.158 @@ -640,7 +645,8 @@ if (!running_on_sim) { printf("SSC_OPEN,
  48.159  		vcpu_set_gr(current,8,-1L,0);
  48.160  		break;
  48.161  	    default:
  48.162 -		printf("ia64_handle_break: bad ssc code %lx, iip=%p, b0=%p... spinning\n",ssc,regs->cr_iip,regs->b0);
  48.163 +		printf("ia64_handle_break: bad ssc code %lx, iip=0x%lx, b0=0x%lx... spinning\n",
  48.164 +			ssc, regs->cr_iip, regs->b0);
  48.165  		while(1);
  48.166  		break;
  48.167  	}
  48.168 @@ -696,8 +702,7 @@ void
  48.169  ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
  48.170  {
  48.171  	IA64FAULT vector;
  48.172 -	struct domain *d = current->domain;
  48.173 -	struct vcpu *v = current;
  48.174 +
  48.175  	vector = priv_emulate(current,regs,isr);
  48.176  	if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
  48.177  		// Note: if a path results in a vector to reflect that requires
  48.178 @@ -712,8 +717,7 @@ UINT64 int_counts[INTR_TYPE_MAX];
  48.179  void
  48.180  ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
  48.181  {
  48.182 -	struct domain *d = (struct domain *) current->domain;
  48.183 -	struct vcpu *v = (struct domain *) current;
  48.184 +	struct vcpu *v = current;
  48.185  	unsigned long check_lazy_cover = 0;
  48.186  	unsigned long psr = regs->cr_ipsr;
  48.187  
  48.188 @@ -753,7 +757,8 @@ ia64_handle_reflection (unsigned long if
  48.189  		}
  48.190  #endif
  48.191  printf("*** NaT fault... attempting to handle as privop\n");
  48.192 -printf("isr=%p, ifa=%p,iip=%p,ipsr=%p\n",isr,ifa,regs->cr_iip,psr);
  48.193 +printf("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
  48.194 +       isr, ifa, regs->cr_iip, psr);
  48.195  		//regs->eml_unat = 0;  FIXME: DO WE NEED THIS???
  48.196  		// certain NaT faults are higher priority than privop faults
  48.197  		vector = priv_emulate(v,regs,isr);
  48.198 @@ -800,8 +805,7 @@ unsigned long hypercall_create_continuat
  48.199  	unsigned int op, const char *format, ...)
  48.200  {
  48.201      struct mc_state *mcs = &mc_state[smp_processor_id()];
  48.202 -    VCPU *vcpu = current;
  48.203 -    struct cpu_user_regs *regs = vcpu_regs(vcpu);
  48.204 +    struct vcpu *v = current;
  48.205      const char *p = format;
  48.206      unsigned long arg;
  48.207      unsigned int i;
  48.208 @@ -811,7 +815,7 @@ unsigned long hypercall_create_continuat
  48.209      if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
  48.210  	panic("PREEMPT happen in multicall\n");	// Not support yet
  48.211      } else {
  48.212 -	vcpu_set_gr(vcpu, 2, op, 0);
  48.213 +	vcpu_set_gr(v, 2, op, 0);
  48.214  	for ( i = 0; *p != '\0'; i++) {
  48.215              switch ( *p++ )
  48.216              {
  48.217 @@ -830,22 +834,22 @@ unsigned long hypercall_create_continuat
  48.218                  BUG();
  48.219              }
  48.220  	    switch (i) {
  48.221 -	    case 0: vcpu_set_gr(vcpu, 14, arg, 0);
  48.222 +	    case 0: vcpu_set_gr(v, 14, arg, 0);
  48.223  		    break;
  48.224 -	    case 1: vcpu_set_gr(vcpu, 15, arg, 0);
  48.225 +	    case 1: vcpu_set_gr(v, 15, arg, 0);
  48.226  		    break;
  48.227 -	    case 2: vcpu_set_gr(vcpu, 16, arg, 0);
  48.228 +	    case 2: vcpu_set_gr(v, 16, arg, 0);
  48.229  		    break;
  48.230 -	    case 3: vcpu_set_gr(vcpu, 17, arg, 0);
  48.231 +	    case 3: vcpu_set_gr(v, 17, arg, 0);
  48.232  		    break;
  48.233 -	    case 4: vcpu_set_gr(vcpu, 18, arg, 0);
  48.234 +	    case 4: vcpu_set_gr(v, 18, arg, 0);
  48.235  		    break;
  48.236  	    default: panic("Too many args for hypercall continuation\n");
  48.237  		    break;
  48.238  	    }
  48.239  	}
  48.240      }
  48.241 -    vcpu->arch.hypercall_continuation = 1;
  48.242 +    v->arch.hypercall_continuation = 1;
  48.243      va_end(args);
  48.244      return op;
  48.245  }
    49.1 --- a/xen/arch/ia64/xen/regionreg.c	Thu Mar 02 10:59:34 2006 +0100
    49.2 +++ b/xen/arch/ia64/xen/regionreg.c	Thu Mar 02 11:00:49 2006 +0100
    49.3 @@ -18,6 +18,8 @@
    49.4  extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, unsigned long p_vhpt, unsigned long v_pal);
    49.5  extern void *pal_vaddr;
    49.6  
    49.7 +/* FIXME: where these declarations should be there ? */
    49.8 +extern void panic_domain(struct pt_regs *, const char *, ...);
    49.9  
   49.10  #define	IA64_MIN_IMPL_RID_BITS	(IA64_MIN_IMPL_RID_MSB+1)
   49.11  #define	IA64_MAX_IMPL_RID_BITS	24
   49.12 @@ -142,7 +144,7 @@ int allocate_rid_range(struct domain *d,
   49.13  	// setup domain struct
   49.14  	d->arch.rid_bits = ridbits;
   49.15  	d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS; d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
   49.16 -printf("###allocating rid_range, domain %p: starting_rid=%lx, ending_rid=%lx\n",
   49.17 +printf("###allocating rid_range, domain %p: starting_rid=%x, ending_rid=%x\n",
   49.18  d,d->arch.starting_rid, d->arch.ending_rid);
   49.19  	
   49.20  	return 1;
   49.21 @@ -211,7 +213,6 @@ int set_one_rr(unsigned long rr, unsigne
   49.22  	unsigned long rreg = REGION_NUMBER(rr);
   49.23  	ia64_rr rrv, newrrv, memrrv;
   49.24  	unsigned long newrid;
   49.25 -	extern unsigned long vhpt_paddr;
   49.26  
   49.27  	if (val == -1) return 1;
   49.28  
   49.29 @@ -220,8 +221,8 @@ int set_one_rr(unsigned long rr, unsigne
   49.30  	newrid = v->arch.starting_rid + rrv.rid;
   49.31  
   49.32  	if (newrid > v->arch.ending_rid) {
   49.33 -		printk("can't set rr%d to %lx, starting_rid=%lx,"
   49.34 -			"ending_rid=%lx, val=%lx\n", rreg, newrid,
   49.35 +		printk("can't set rr%d to %lx, starting_rid=%x,"
   49.36 +			"ending_rid=%x, val=%lx\n", (int) rreg, newrid,
   49.37  			v->arch.starting_rid,v->arch.ending_rid,val);
   49.38  		return 0;
   49.39  	}
   49.40 @@ -249,10 +250,12 @@ int set_one_rr(unsigned long rr, unsigne
   49.41  	newrrv.rid = newrid;
   49.42  	newrrv.ve = 1;  // VHPT now enabled for region 7!!
   49.43  	newrrv.ps = PAGE_SHIFT;
   49.44 -	if (rreg == 0) v->arch.metaphysical_saved_rr0 =
   49.45 -		vmMangleRID(newrrv.rrval);
   49.46 -	if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
   49.47 -				v->arch.privregs, vhpt_paddr, pal_vaddr);
   49.48 +	if (rreg == 0)
   49.49 +		v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
   49.50 +	else if (rreg == 7)
   49.51 +		ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
   49.52 +			     v->arch.privregs, __get_cpu_var(vhpt_paddr),
   49.53 +			     (unsigned long) pal_vaddr);
   49.54  	else set_rr(rr,newrrv.rrval);
   49.55  #endif
   49.56  	return 1;
   49.57 @@ -262,11 +265,12 @@ int set_one_rr(unsigned long rr, unsigne
   49.58  int set_metaphysical_rr0(void)
   49.59  {
   49.60  	struct vcpu *v = current;
   49.61 -	ia64_rr rrv;
   49.62 +//	ia64_rr rrv;
   49.63  	
   49.64  //	rrv.ve = 1; 	FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
   49.65  	ia64_set_rr(0,v->arch.metaphysical_rr0);
   49.66  	ia64_srlz_d();
   49.67 +	return 1;
   49.68  }
   49.69  
   49.70  // validates/changes region registers 0-6 in the currently executing domain
    50.1 --- a/xen/arch/ia64/xen/sn_console.c	Thu Mar 02 10:59:34 2006 +0100
    50.2 +++ b/xen/arch/ia64/xen/sn_console.c	Thu Mar 02 11:00:49 2006 +0100
    50.3 @@ -9,7 +9,13 @@
    50.4  #include <asm/sn/sn_sal.h>
    50.5  #include <xen/serial.h>
    50.6  
    50.7 -void sn_putc(struct serial_port *, char);
    50.8 +/*
    50.9 + * sn_putc - Send a character to the console, polled or interrupt mode
   50.10 + */
   50.11 +static void sn_putc(struct serial_port *port, char c)
   50.12 +{
   50.13 +	ia64_sn_console_putc(c);
   50.14 +}
   50.15  
   50.16  static struct uart_driver sn_sal_console = {
   50.17  	.putc = sn_putc,
   50.18 @@ -75,11 +81,3 @@ int __init sn_serial_console_early_setup
   50.19  
   50.20  	return 0;
   50.21  }
   50.22 -
   50.23 -/*
   50.24 - * sn_putc - Send a character to the console, polled or interrupt mode
   50.25 - */
   50.26 -void sn_putc(struct serial_port *port, char c)
   50.27 -{
   50.28 -	return ia64_sn_console_putc(c);
   50.29 -}
    51.1 --- a/xen/arch/ia64/xen/vcpu.c	Thu Mar 02 10:59:34 2006 +0100
    51.2 +++ b/xen/arch/ia64/xen/vcpu.c	Thu Mar 02 11:00:49 2006 +0100
    51.3 @@ -21,8 +21,16 @@ int in_tpa = 0;
    51.4  #include <asm/processor.h>
    51.5  #include <asm/delay.h>
    51.6  #include <asm/vmx_vcpu.h>
    51.7 +#include <asm/vhpt.h>
    51.8 +#include <asm/tlbflush.h>
    51.9  #include <xen/event.h>
   51.10  
   51.11 +/* FIXME: where these declarations should be there ? */
   51.12 +extern void getreg(unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs);
   51.13 +extern void setreg(unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs);
   51.14 +extern void panic_domain(struct pt_regs *, const char *, ...);
   51.15 +extern int set_metaphysical_rr0(void);
   51.16 +
   51.17  typedef	union {
   51.18  	struct ia64_psr ia64_psr;
   51.19  	unsigned long i64;
   51.20 @@ -47,10 +55,10 @@ typedef	union {
   51.21  #define STATIC
   51.22  
   51.23  #ifdef PRIVOP_ADDR_COUNT
   51.24 -struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
   51.25 -	{ "=ifa", { 0 }, { 0 }, 0 },
   51.26 +struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS+1] = {
   51.27 +	{ "=ifa",  { 0 }, { 0 }, 0 },
   51.28  	{ "thash", { 0 }, { 0 }, 0 },
   51.29 -	0
   51.30 +	{ 0,       { 0 }, { 0 }, 0 }
   51.31  };
   51.32  extern void privop_count_addr(unsigned long addr, int inst);
   51.33  #define	PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
   51.34 @@ -81,7 +89,7 @@ unsigned long vcpu_verbose = 0;
   51.35  **************************************************************************/
   51.36  #ifdef XEN
   51.37  UINT64
   51.38 -vcpu_get_gr(VCPU *vcpu, unsigned reg)
   51.39 +vcpu_get_gr(VCPU *vcpu, unsigned long reg)
   51.40  {
   51.41  	REGS *regs = vcpu_regs(vcpu);
   51.42  	UINT64 val;
   51.43 @@ -90,7 +98,7 @@ vcpu_get_gr(VCPU *vcpu, unsigned reg)
   51.44  	return val;
   51.45  }
   51.46  IA64FAULT
   51.47 -vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val)
   51.48 +vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val)
   51.49  {
   51.50  	REGS *regs = vcpu_regs(vcpu);
   51.51      int nat;
   51.52 @@ -104,7 +112,7 @@ vcpu_get_gr_nat(VCPU *vcpu, unsigned reg
   51.53  //   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
   51.54  //   IA64_NO_FAULT otherwise
   51.55  IA64FAULT
   51.56 -vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat)
   51.57 +vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int nat)
   51.58  {
   51.59  	REGS *regs = vcpu_regs(vcpu);
   51.60  	if (!reg) return IA64_ILLOP_FAULT;
   51.61 @@ -118,7 +126,7 @@ vcpu_set_gr(VCPU *vcpu, unsigned reg, UI
   51.62  //   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
   51.63  //   IA64_NO_FAULT otherwise
   51.64  IA64FAULT
   51.65 -vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
   51.66 +vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value)
   51.67  {
   51.68  	REGS *regs = vcpu_regs(vcpu);
   51.69  	long sof = (regs->cr_ifs) & 0x7f;
   51.70 @@ -375,7 +383,7 @@ BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
   51.71  UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
   51.72  {
   51.73  	UINT64 dcr = PSCBX(vcpu,dcr);
   51.74 -	PSR psr = {0};
   51.75 +	PSR psr;
   51.76  
   51.77  	//printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
   51.78  	psr.i64 = prevpsr;
   51.79 @@ -397,7 +405,7 @@ UINT64 vcpu_get_ipsr_int_state(VCPU *vcp
   51.80  
   51.81  IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
   51.82  {
   51.83 -extern unsigned long privop_trace;
   51.84 +//extern unsigned long privop_trace;
   51.85  //privop_trace=0;
   51.86  //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
   51.87  	// Reads of cr.dcr on Xen always have the sign bit set, so
   51.88 @@ -525,7 +533,7 @@ IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT6
   51.89  
   51.90  IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
   51.91  {
   51.92 -extern unsigned long privop_trace;
   51.93 +//extern unsigned long privop_trace;
   51.94  //privop_trace=1;
   51.95  	// Reads of cr.dcr on SP always have the sign bit set, so
   51.96  	// a domain can differentiate whether it is running on SP or not
   51.97 @@ -643,7 +651,6 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
   51.98  	    set_bit(vector,VCPU(vcpu,irr));
   51.99      } else
  51.100      {
  51.101 -	/* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
  51.102  	if (test_bit(vector,PSCBX(vcpu,irr))) {
  51.103  //printf("vcpu_pend_interrupt: overrun\n");
  51.104  	}
  51.105 @@ -683,7 +690,6 @@ check_start:
  51.106  		vcpu_pend_interrupt(vcpu, vcpu->vcpu_info->arch.evtchn_vector);
  51.107  
  51.108  	p = &PSCBX(vcpu,irr[3]);
  51.109 -	/* q = &PSCB(vcpu,delivery_mask[3]); */
  51.110  	r = &PSCBX(vcpu,insvc[3]);
  51.111  	for (i = 3; ; p--, q--, r--, i--) {
  51.112  		bits = *p /* & *q */;
  51.113 @@ -747,7 +753,7 @@ UINT64 vcpu_deliverable_timer(VCPU *vcpu
  51.114  
  51.115  IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
  51.116  {
  51.117 -extern unsigned long privop_trace;
  51.118 +//extern unsigned long privop_trace;
  51.119  //privop_trace=1;
  51.120  	//TODO: Implement this
  51.121  	printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
  51.122 @@ -764,9 +770,10 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
  51.123  #define HEARTBEAT_FREQ 16	// period in seconds
  51.124  #ifdef HEARTBEAT_FREQ
  51.125  #define N_DOMS 16	// period in seconds
  51.126 +#if 0
  51.127  	static long count[N_DOMS] = { 0 };
  51.128 +#endif
  51.129  	static long nonclockcount[N_DOMS] = { 0 };
  51.130 -	REGS *regs = vcpu_regs(vcpu);
  51.131  	unsigned domid = vcpu->domain->domain_id;
  51.132  #endif
  51.133  #ifdef IRQ_DEBUG
  51.134 @@ -803,7 +810,7 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
  51.135  	// getting ivr has "side effects"
  51.136  #ifdef IRQ_DEBUG
  51.137  	if (firsttime[vector]) {
  51.138 -		printf("*** First get_ivr on vector=%d,itc=%lx\n",
  51.139 +		printf("*** First get_ivr on vector=%lu,itc=%lx\n",
  51.140  			vector,ia64_get_itc());
  51.141  		firsttime[vector]=0;
  51.142  	}
  51.143 @@ -817,7 +824,7 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT6
  51.144  
  51.145  	i = vector >> 6;
  51.146  	mask = 1L << (vector & 0x3f);
  51.147 -//printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
  51.148 +//printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %lu\n",vector);
  51.149  	PSCBX(vcpu,insvc[i]) |= mask;
  51.150  	PSCBX(vcpu,irr[i]) &= ~mask;
  51.151  	//PSCB(vcpu,pending_interruption)--;
  51.152 @@ -978,27 +985,18 @@ IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT
  51.153  	return (IA64_NO_FAULT);
  51.154  }
  51.155  
  51.156 -// parameter is a time interval specified in cycles
  51.157 -void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
  51.158 -{
  51.159 -    PSCBX(vcpu,xen_timer_interval) = cycles;
  51.160 -    vcpu_set_next_timer(vcpu);
  51.161 -    printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
  51.162 -             PSCBX(vcpu,xen_timer_interval));
  51.163 -    __set_bit(PSCB(vcpu,itv), PSCB(vcpu,delivery_mask));
  51.164 -}
  51.165 -
  51.166  IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
  51.167  {
  51.168 -extern unsigned long privop_trace;
  51.169 +//extern unsigned long privop_trace;
  51.170  //privop_trace=1;
  51.171  	if (val & 0xef00) return (IA64_ILLOP_FAULT);
  51.172  	PSCB(vcpu,itv) = val;
  51.173  	if (val & 0x10000) {
  51.174 -printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCBX(vcpu,domain_itm));
  51.175 +		printf("**** vcpu_set_itv(%lu): vitm=%lx, setting to 0\n",
  51.176 +		       val,PSCBX(vcpu,domain_itm));
  51.177  		PSCBX(vcpu,domain_itm) = 0;
  51.178  	}
  51.179 -	else vcpu_enable_timer(vcpu,1000000L);
  51.180 +	else vcpu_set_next_timer(vcpu);
  51.181  	return (IA64_NO_FAULT);
  51.182  }
  51.183  
  51.184 @@ -1080,7 +1078,6 @@ void vcpu_set_next_timer(VCPU *vcpu)
  51.185  	//UINT64 s = PSCBX(vcpu,xen_itm);
  51.186  	UINT64 s = local_cpu_data->itm_next;
  51.187  	UINT64 now = ia64_get_itc();
  51.188 -	//UINT64 interval = PSCBX(vcpu,xen_timer_interval);
  51.189  
  51.190  	/* gloss over the wraparound problem for now... we know it exists
  51.191  	 * but it doesn't matter right now */
  51.192 @@ -1103,7 +1100,7 @@ void vcpu_set_next_timer(VCPU *vcpu)
  51.193  
  51.194  IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
  51.195  {
  51.196 -	UINT now = ia64_get_itc();
  51.197 +	//UINT now = ia64_get_itc();
  51.198  
  51.199  	//if (val < now) val = now + 1000;
  51.200  //printf("*** vcpu_set_itm: called with %lx\n",val);
  51.201 @@ -1114,7 +1111,10 @@ IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT6
  51.202  
  51.203  IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
  51.204  {
  51.205 -
  51.206 +#define DISALLOW_SETTING_ITC_FOR_NOW
  51.207 +#ifdef DISALLOW_SETTING_ITC_FOR_NOW
  51.208 +printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
  51.209 +#else
  51.210  	UINT64 oldnow = ia64_get_itc();
  51.211  	UINT64 olditm = PSCBX(vcpu,domain_itm);
  51.212  	unsigned long d = olditm - oldnow;
  51.213 @@ -1122,10 +1122,6 @@ IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT6
  51.214  
  51.215  	UINT64 newnow = val, min_delta;
  51.216  
  51.217 -#define DISALLOW_SETTING_ITC_FOR_NOW
  51.218 -#ifdef DISALLOW_SETTING_ITC_FOR_NOW
  51.219 -printf("vcpu_set_itc: Setting ar.itc is currently disabled\n");
  51.220 -#else
  51.221  	local_irq_disable();
  51.222  	if (olditm) {
  51.223  printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
  51.224 @@ -1293,9 +1289,6 @@ IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 v
  51.225  	return (IA64_ILLOP_FAULT);
  51.226  }
  51.227  
  51.228 -#define itir_ps(itir)	((itir >> 2) & 0x3f)
  51.229 -#define itir_mask(itir) (~((1UL << itir_ps(itir)) - 1))
  51.230 -
  51.231  unsigned long vhpt_translate_count = 0;
  51.232  unsigned long fast_vhpt_translate_count = 0;
  51.233  unsigned long recover_to_page_fault_count = 0;
  51.234 @@ -1317,7 +1310,7 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
  51.235  // this down, but since it has been apparently harmless, just flag it for now
  51.236  //			panic_domain(vcpu_regs(vcpu),
  51.237  			printk(
  51.238 -			 "vcpu_translate: bad physical address: %p\n",address);
  51.239 +			 "vcpu_translate: bad physical address: 0x%lx\n",address);
  51.240  		}
  51.241  		*pteval = (address & _PAGE_PPN_MASK) | __DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX;
  51.242  		*itir = PAGE_SHIFT << 2;
  51.243 @@ -1330,7 +1323,8 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
  51.244  		unsigned long vipsr = PSCB(vcpu,ipsr);
  51.245  		unsigned long iip = regs->cr_iip;
  51.246  		unsigned long ipsr = regs->cr_ipsr;
  51.247 -		printk("vcpu_translate: bad address %p, viip=%p, vipsr=%p, iip=%p, ipsr=%p continuing\n", address, viip, vipsr, iip, ipsr);
  51.248 +		printk("vcpu_translate: bad address 0x%lx, viip=0x%lx, vipsr=0x%lx, iip=0x%lx, ipsr=0x%lx continuing\n",
  51.249 +			address, viip, vipsr, iip, ipsr);
  51.250  	}
  51.251  
  51.252  	rr = PSCB(vcpu,rrs)[region];
  51.253 @@ -1798,7 +1792,7 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64
  51.254  
  51.255  IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  51.256  {
  51.257 -	unsigned long pteval, logps = (itir >> 2) & 0x3f;
  51.258 +	unsigned long pteval, logps = itir_ps(itir);
  51.259  	unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
  51.260  	BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
  51.261  
  51.262 @@ -1818,7 +1812,7 @@ IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 
  51.263  
  51.264  IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  51.265  {
  51.266 -	unsigned long pteval, logps = (itir >> 2) & 0x3f;
  51.267 +	unsigned long pteval, logps = itir_ps(itir);
  51.268  	unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
  51.269  	BOOLEAN swap_rr0 = (!(ifa>>61) && PSCB(vcpu,metaphysical_mode));
  51.270  
  51.271 @@ -1891,7 +1885,7 @@ IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 
  51.272  
  51.273  IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
  51.274  {
  51.275 -	extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
  51.276 +	extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
  51.277  	// FIXME: validate not flushing Xen addresses
  51.278  	// if (Xen address) return(IA64_ILLOP_FAULT);
  51.279  	// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
    52.1 --- a/xen/arch/ia64/xen/vhpt.c	Thu Mar 02 10:59:34 2006 +0100
    52.2 +++ b/xen/arch/ia64/xen/vhpt.c	Thu Mar 02 11:00:49 2006 +0100
    52.3 @@ -15,12 +15,13 @@
    52.4  #include <asm/dma.h>
    52.5  #include <asm/vhpt.h>
    52.6  
    52.7 -unsigned long vhpt_paddr, vhpt_pend, vhpt_pte;
    52.8 +DEFINE_PER_CPU (unsigned long, vhpt_paddr);
    52.9 +DEFINE_PER_CPU (unsigned long, vhpt_pend);
   52.10  
   52.11  void vhpt_flush(void)
   52.12  {
   52.13  	struct vhpt_lf_entry *v = (void *)VHPT_ADDR;
   52.14 -	int i, cnt = 0;
   52.15 +	int i;
   52.16  #if 0
   52.17  static int firsttime = 2;
   52.18  
   52.19 @@ -47,7 +48,6 @@ printf("vhpt_flush: ********************
   52.20  #ifdef VHPT_GLOBAL
   52.21  void vhpt_flush_address(unsigned long vadr, unsigned long addr_range)
   52.22  {
   52.23 -	unsigned long ps;
   52.24  	struct vhpt_lf_entry *vlfe;
   52.25  
   52.26  	if ((vadr >> 61) == 7) {
   52.27 @@ -77,12 +77,12 @@ void vhpt_flush_address(unsigned long va
   52.28  }
   52.29  #endif
   52.30  
   52.31 -void vhpt_map(void)
   52.32 +static void vhpt_map(unsigned long pte)
   52.33  {
   52.34  	unsigned long psr;
   52.35  
   52.36  	psr = ia64_clear_ic();
   52.37 -	ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, vhpt_pte, VHPT_SIZE_LOG2);
   52.38 +	ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, pte, VHPT_SIZE_LOG2);
   52.39  	ia64_set_psr(psr);
   52.40  	ia64_srlz_i();
   52.41  }
   52.42 @@ -121,29 +121,35 @@ void vhpt_multiple_insert(unsigned long 
   52.43  
   52.44  void vhpt_init(void)
   52.45  {
   52.46 -	unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva;
   52.47 +	unsigned long vhpt_total_size, vhpt_alignment;
   52.48 +	unsigned long paddr, pte;
   52.49 +	struct page_info *page;
   52.50  #if !VHPT_ENABLED
   52.51  	return;
   52.52  #endif
   52.53  	// allocate a huge chunk of physical memory.... how???
   52.54  	vhpt_total_size = 1 << VHPT_SIZE_LOG2;	// 4MB, 16MB, 64MB, or 256MB
   52.55  	vhpt_alignment = 1 << VHPT_SIZE_LOG2;	// 4MB, 16MB, 64MB, or 256MB
   52.56 -	printf("vhpt_init: vhpt size=%p, align=%p\n",vhpt_total_size,vhpt_alignment);
   52.57 +	printf("vhpt_init: vhpt size=0x%lx, align=0x%lx\n",
   52.58 +		vhpt_total_size, vhpt_alignment);
   52.59  	/* This allocation only holds true if vhpt table is unique for
   52.60  	 * all domains. Or else later new vhpt table should be allocated
   52.61  	 * from domain heap when each domain is created. Assume xen buddy
   52.62  	 * allocator can provide natural aligned page by order?
   52.63  	 */
   52.64 -	vhpt_imva = alloc_xenheap_pages(VHPT_SIZE_LOG2 - PAGE_SHIFT);
   52.65 -	if (!vhpt_imva) {
   52.66 +//	vhpt_imva = alloc_xenheap_pages(VHPT_SIZE_LOG2 - PAGE_SHIFT);
   52.67 +	page = alloc_domheap_pages(NULL, VHPT_SIZE_LOG2 - PAGE_SHIFT, 0);
   52.68 +	if (!page) {
   52.69  		printf("vhpt_init: can't allocate VHPT!\n");
   52.70  		while(1);
   52.71  	}
   52.72 -	vhpt_paddr = __pa(vhpt_imva);
   52.73 -	vhpt_pend = vhpt_paddr + vhpt_total_size - 1;
   52.74 -	printf("vhpt_init: vhpt paddr=%p, end=%p\n",vhpt_paddr,vhpt_pend);
   52.75 -	vhpt_pte = pte_val(pfn_pte(vhpt_paddr >> PAGE_SHIFT, PAGE_KERNEL));
   52.76 -	vhpt_map();
   52.77 +	paddr = page_to_maddr(page);
   52.78 +	__get_cpu_var(vhpt_paddr) = paddr;
   52.79 +	__get_cpu_var(vhpt_pend) = paddr + vhpt_total_size - 1;
   52.80 +	printf("vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n",
   52.81 +		paddr, __get_cpu_var(vhpt_pend));
   52.82 +	pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL));
   52.83 +	vhpt_map(pte);
   52.84  	ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
   52.85  		VHPT_ENABLED);
   52.86  	vhpt_flush();
   52.87 @@ -167,6 +173,6 @@ int dump_vhpt_stats(char *buf)
   52.88  		if (v->CChain) vhpt_chains++;
   52.89  	}
   52.90  	s += sprintf(s,"VHPT usage: %ld/%ld (%ld collision chains)\n",
   52.91 -		vhpt_valid,VHPT_NUM_ENTRIES,vhpt_chains);
   52.92 +		vhpt_valid, (unsigned long) VHPT_NUM_ENTRIES, vhpt_chains);
   52.93  	return s - buf;
   52.94  }
    53.1 --- a/xen/arch/ia64/xen/xenirq.c	Thu Mar 02 10:59:34 2006 +0100
    53.2 +++ b/xen/arch/ia64/xen/xenirq.c	Thu Mar 02 11:00:49 2006 +0100
    53.3 @@ -24,7 +24,7 @@ xen_debug_irq(ia64_vector vector, struct
    53.4  		firstirq = 0;
    53.5  	}
    53.6  	if (firsttime[vector]) {
    53.7 -		printf("**** (entry) First received int on vector=%d,itc=%lx\n",
    53.8 +		printf("**** (entry) First received int on vector=%lu,itc=%lx\n",
    53.9  			(unsigned long) vector, ia64_get_itc());
   53.10  		firsttime[vector] = 0;
   53.11  	}
   53.12 @@ -38,13 +38,13 @@ xen_do_IRQ(ia64_vector vector)
   53.13  		extern void vcpu_pend_interrupt(void *, int);
   53.14  #if 0
   53.15  		if (firsttime[vector]) {
   53.16 -			printf("**** (iterate) First received int on vector=%d,itc=%lx\n",
   53.17 -			(unsigned long) vector, ia64_get_itc());
   53.18 +			printf("**** (iterate) First received int on vector=%lu,itc=%lx\n",
   53.19 +				(unsigned long) vector, ia64_get_itc());
   53.20  			firsttime[vector] = 0;
   53.21  		}
   53.22  		if (firstpend[vector]) {
   53.23 -			printf("**** First pended int on vector=%d,itc=%lx\n",
   53.24 -				(unsigned long) vector,ia64_get_itc());
   53.25 +			printf("**** First pended int on vector=%lu,itc=%lx\n",
   53.26 +				(unsigned long) vector, ia64_get_itc());
   53.27  			firstpend[vector] = 0;
   53.28  		}
   53.29  #endif
   53.30 @@ -59,7 +59,7 @@ xen_do_IRQ(ia64_vector vector)
   53.31  /*
   53.32   * Exit an interrupt context. Process softirqs if needed and possible:
   53.33   */
   53.34 -void xen_irq_exit(struct pt_regs *regs)
   53.35 +void irq_exit(void)
   53.36  {
   53.37  	sub_preempt_count(IRQ_EXIT_OFFSET);
   53.38  }
    54.1 --- a/xen/arch/ia64/xen/xenmem.c	Thu Mar 02 10:59:34 2006 +0100
    54.2 +++ b/xen/arch/ia64/xen/xenmem.c	Thu Mar 02 11:00:49 2006 +0100
    54.3 @@ -34,7 +34,6 @@ unsigned long mpt_table_size;
    54.4  void
    54.5  paging_init (void)
    54.6  {
    54.7 -	struct page_info *pg;
    54.8  	unsigned int mpt_order;
    54.9  	/* Create machine to physical mapping table
   54.10  	 * NOTE: similar to frame table, later we may need virtually
   54.11 @@ -61,7 +60,7 @@ paging_init (void)
   54.12  #define FT_ALIGN_SIZE	(16UL << 20)
   54.13  void __init init_frametable(void)
   54.14  {
   54.15 -	unsigned long i, pfn;
   54.16 +	unsigned long pfn;
   54.17  	frame_table_size = max_page * sizeof(struct page_info);
   54.18  	frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
   54.19  
    55.1 --- a/xen/arch/ia64/xen/xenmisc.c	Thu Mar 02 10:59:34 2006 +0100
    55.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Thu Mar 02 11:00:49 2006 +0100
    55.3 @@ -19,6 +19,8 @@
    55.4  #include <public/sched.h>
    55.5  #include <asm/vhpt.h>
    55.6  #include <asm/debugger.h>
    55.7 +#include <asm/vmx.h>
    55.8 +#include <asm/vmx_vcpu.h>
    55.9  
   55.10  efi_memory_desc_t ia64_efi_io_md;
   55.11  EXPORT_SYMBOL(ia64_efi_io_md);
   55.12 @@ -26,6 +28,10 @@ unsigned long wait_init_idle;
   55.13  int phys_proc_id[NR_CPUS];
   55.14  unsigned long loops_per_jiffy = (1<<12);	// from linux/init/main.c
   55.15  
   55.16 +/* FIXME: where these declarations should be there ? */
   55.17 +extern void load_region_regs(struct vcpu *);
   55.18 +extern void show_registers(struct pt_regs *regs);
   55.19 +
   55.20  void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
   55.21  void ia64_mca_cpu_init(void *x) { }
   55.22  void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { }
   55.23 @@ -168,7 +174,11 @@ void __free_pages(struct page *page, uns
   55.24  
   55.25  void *pgtable_quicklist_alloc(void)
   55.26  {
   55.27 -	return alloc_xenheap_pages(0);
   55.28 +    void *p;
   55.29 +    p = alloc_xenheap_pages(0);
   55.30 +    if (p) 
   55.31 +        clear_page(p);
   55.32 +    return p;
   55.33  }
   55.34  
   55.35  void pgtable_quicklist_free(void *pgtable_entry)
   55.36 @@ -247,6 +257,7 @@ ia64_peek (struct task_struct *child, st
   55.37  	   unsigned long user_rbs_end, unsigned long addr, long *val)
   55.38  {
   55.39  	printk("ia64_peek: called, not implemented\n");
   55.40 +	return 1;
   55.41  }
   55.42  
   55.43  long
   55.44 @@ -254,6 +265,7 @@ ia64_poke (struct task_struct *child, st
   55.45  	   unsigned long user_rbs_end, unsigned long addr, long val)
   55.46  {
   55.47  	printk("ia64_poke: called, not implemented\n");
   55.48 +	return 1;
   55.49  }
   55.50  
   55.51  void
   55.52 @@ -291,6 +303,7 @@ unsigned long context_switch_count = 0;
   55.53  void context_switch(struct vcpu *prev, struct vcpu *next)
   55.54  {
   55.55      uint64_t spsr;
   55.56 +    uint64_t pta;
   55.57  
   55.58      local_irq_save(spsr);
   55.59      if(VMX_DOMAIN(prev)){
   55.60 @@ -298,9 +311,9 @@ void context_switch(struct vcpu *prev, s
   55.61      }
   55.62  	context_switch_count++;
   55.63  	switch_to(prev,next,prev);
   55.64 -    if(VMX_DOMAIN(current)){
   55.65 -        vtm_domain_in(current);
   55.66 -    }
   55.67 +//    if(VMX_DOMAIN(current)){
   55.68 +//        vtm_domain_in(current);
   55.69 +//    }
   55.70  
   55.71  // leave this debug for now: it acts as a heartbeat when more than
   55.72  // one domain is active
   55.73 @@ -309,22 +322,30 @@ static long cnt[16] = { 50,50,50,50,50,5
   55.74  static int i = 100;
   55.75  int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
   55.76  if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; }
   55.77 -if (!i--) { printk("+",id); i = 1000000; }
   55.78 +if (!i--) { printk("+"); i = 1000000; }
   55.79  }
   55.80  
   55.81      if (VMX_DOMAIN(current)){
   55.82 +        vtm_domain_in(current);
   55.83  		vmx_load_all_rr(current);
   55.84      }else{
   55.85 -	extern char ia64_ivt;
   55.86 -	ia64_set_iva(&ia64_ivt);
   55.87 -	ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
   55.88 -		VHPT_ENABLED);
   55.89 +    	extern char ia64_ivt;
   55.90 +    	ia64_set_iva(&ia64_ivt);
   55.91      	if (!is_idle_domain(current->domain)) {
   55.92 +        	ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
   55.93 +		        VHPT_ENABLED);
   55.94  	    	load_region_regs(current);
   55.95  	    	vcpu_load_kernel_regs(current);
   55.96 -		    if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
   55.97 -    	}
   55.98 -	    if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
   55.99 +		    if (vcpu_timer_expired(current))
  55.100 +                vcpu_pend_timer(current);
  55.101 +    	}else {
  55.102 +        /* When switching to idle domain, only need to disable vhpt
  55.103 +        * walker. Then all accesses happen within idle context will
  55.104 +        * be handled by TR mapping and identity mapping.
  55.105 +        */
  55.106 +           pta = ia64_get_pta();
  55.107 +           ia64_set_pta(pta & ~VHPT_ENABLED);
  55.108 +        }
  55.109      }
  55.110  
  55.111      local_irq_restore(spsr);
  55.112 @@ -345,12 +366,12 @@ void panic_domain(struct pt_regs *regs, 
  55.113  	va_list args;
  55.114  	char buf[128];
  55.115  	struct vcpu *v = current;
  55.116 -	static volatile int test = 1;	// so can continue easily in debug
  55.117 -	extern spinlock_t console_lock;
  55.118 -	unsigned long flags;
  55.119 +//	static volatile int test = 1;	// so can continue easily in debug
  55.120 +//	extern spinlock_t console_lock;
  55.121 +//	unsigned long flags;
  55.122      
  55.123  loop:
  55.124 -	printf("$$$$$ PANIC in domain %d (k6=%p): ",
  55.125 +	printf("$$$$$ PANIC in domain %d (k6=0x%lx): ",
  55.126  		v->domain->domain_id, 
  55.127  		__get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]);
  55.128  	va_start(args, fmt);
  55.129 @@ -365,7 +386,7 @@ loop:
  55.130  	}
  55.131  	domain_pause_by_systemcontroller(current->domain);
  55.132  	v->domain->shutdown_code = SHUTDOWN_crash;
  55.133 -	set_bit(_DOMF_shutdown, v->domain->domain_flags);
  55.134 +	set_bit(_DOMF_shutdown, &v->domain->domain_flags);
  55.135  	if (v->domain->domain_id == 0) {
  55.136  		int i = 1000000000L;
  55.137  		// if domain0 crashes, just periodically print out panic
    56.1 --- a/xen/arch/ia64/xen/xensetup.c	Thu Mar 02 10:59:34 2006 +0100
    56.2 +++ b/xen/arch/ia64/xen/xensetup.c	Thu Mar 02 11:00:49 2006 +0100
    56.3 @@ -14,7 +14,7 @@
    56.4  #include <public/version.h>
    56.5  //#include <xen/delay.h>
    56.6  #include <xen/compile.h>
    56.7 -//#include <xen/console.h>
    56.8 +#include <xen/console.h>
    56.9  #include <xen/serial.h>
   56.10  #include <xen/trace.h>
   56.11  #include <asm/meminit.h>
   56.12 @@ -22,6 +22,7 @@
   56.13  #include <asm/setup.h>
   56.14  #include <xen/string.h>
   56.15  #include <asm/vmx.h>
   56.16 +#include <linux/efi.h>
   56.17  
   56.18  unsigned long xenheap_phys_end;
   56.19  
   56.20 @@ -31,14 +32,21 @@ struct vcpu *idle_vcpu[NR_CPUS];
   56.21  
   56.22  cpumask_t cpu_present_map;
   56.23  
   56.24 -#ifdef CLONE_DOMAIN0
   56.25 -struct domain *clones[CLONE_DOMAIN0];
   56.26 -#endif
   56.27  extern unsigned long domain0_ready;
   56.28  
   56.29  int find_max_pfn (unsigned long, unsigned long, void *);
   56.30  void start_of_day(void);
   56.31  
   56.32 +/* FIXME: which header these declarations should be there ? */
   56.33 +extern long is_platform_hp_ski(void);
   56.34 +extern void early_setup_arch(char **);
   56.35 +extern void late_setup_arch(char **);
   56.36 +extern void hpsim_serial_init(void);
   56.37 +extern void alloc_dom0(void);
   56.38 +extern void setup_per_cpu_areas(void);
   56.39 +extern void mem_init(void);
   56.40 +extern void init_IRQ(void);
   56.41 +
   56.42  /* opt_nosmp: If true, secondary processors are ignored. */
   56.43  static int opt_nosmp = 0;
   56.44  boolean_param("nosmp", opt_nosmp);
   56.45 @@ -147,13 +155,30 @@ struct ns16550_defaults ns16550_com2 = {
   56.46      .parity    = 'n',
   56.47      .stop_bits = 1
   56.48  };
   56.49 +/*  This is a wrapper function of init_domheap_pages,
   56.50 + *  memory exceeds (max_page<<PAGE_SHIFT) will not be reclaimed.
   56.51 + *  This function will go away when the virtual memmap/discontig
   56.52 + *  memory issues are solved
   56.53 + */
   56.54 +void init_domheap_pages_wrapper(unsigned long ps, unsigned long pe)
   56.55 +{
   56.56 +    unsigned long s_nrm, e_nrm, max_mem;
   56.57 +    max_mem = (max_page+1)<<PAGE_SHIFT;
   56.58 +    s_nrm = (ps+PAGE_SIZE-1)&PAGE_MASK;
   56.59 +    e_nrm = pe&PAGE_MASK;
   56.60 +    s_nrm = min(s_nrm, max_mem);
   56.61 +    e_nrm = min(e_nrm, max_mem);
   56.62 +    if(s_nrm < e_nrm)
   56.63 +         init_domheap_pages(s_nrm, e_nrm);
   56.64 +}
   56.65 +
   56.66 +
   56.67  
   56.68  void start_kernel(void)
   56.69  {
   56.70      unsigned char *cmdline;
   56.71      void *heap_start;
   56.72 -    int i;
   56.73 -    unsigned long max_mem, nr_pages, firsthole_start;
   56.74 +    unsigned long nr_pages, firsthole_start;
   56.75      unsigned long dom0_memory_start, dom0_memory_size;
   56.76      unsigned long dom0_initrd_start, dom0_initrd_size;
   56.77      unsigned long initial_images_start, initial_images_end;
   56.78 @@ -163,7 +188,7 @@ void start_kernel(void)
   56.79      /* Kernel may be relocated by EFI loader */
   56.80      xen_pstart = ia64_tpa(KERNEL_START);
   56.81  
   56.82 -    early_setup_arch(&cmdline);
   56.83 +    early_setup_arch((char **) &cmdline);
   56.84  
   56.85      /* We initialise the serial devices very early so we can get debugging. */
   56.86      if (running_on_sim) hpsim_serial_init();
   56.87 @@ -251,9 +276,9 @@ void start_kernel(void)
   56.88  	max_page);
   56.89  
   56.90      heap_start = memguard_init(ia64_imva(&_end));
   56.91 -    printf("Before heap_start: 0x%lx\n", heap_start);
   56.92 +    printf("Before heap_start: %p\n", heap_start);
   56.93      heap_start = __va(init_boot_allocator(__pa(heap_start)));
   56.94 -    printf("After heap_start: 0x%lx\n", heap_start);
   56.95 +    printf("After heap_start: %p\n", heap_start);
   56.96  
   56.97      reserve_memory();
   56.98  
   56.99 @@ -284,7 +309,7 @@ printk("About to call scheduler_init()\n
  56.100      idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
  56.101      BUG_ON(idle_domain == NULL);
  56.102  
  56.103 -    late_setup_arch(&cmdline);
  56.104 +    late_setup_arch((char **) &cmdline);
  56.105      setup_per_cpu_areas();
  56.106      mem_init();
  56.107  
  56.108 @@ -301,6 +326,8 @@ printk("About to call timer_init()\n");
  56.109  #endif
  56.110  
  56.111  #ifdef CONFIG_SMP
  56.112 +    int i;
  56.113 +
  56.114      if ( opt_nosmp )
  56.115      {
  56.116          max_cpus = 0;
  56.117 @@ -342,16 +369,6 @@ printk("About to call sort_main_extable(
  56.118  printk("About to call domain_create()\n");
  56.119      dom0 = domain_create(0, 0);
  56.120  
  56.121 -#ifdef CLONE_DOMAIN0
  56.122 -    {
  56.123 -    int i;
  56.124 -    for (i = 0; i < CLONE_DOMAIN0; i++) {
  56.125 -	clones[i] = domain_create(i+1, 0);
  56.126 -        if ( clones[i] == NULL )
  56.127 -            panic("Error creating domain0 clone %d\n",i);
  56.128 -    }
  56.129 -    }
  56.130 -#endif
  56.131      if ( dom0 == NULL )
  56.132          panic("Error creating domain 0\n");
  56.133  
  56.134 @@ -362,9 +379,9 @@ printk("About to call domain_create()\n"
  56.135       * above our heap. The second module, if present, is an initrd ramdisk.
  56.136       */
  56.137      printk("About to call construct_dom0()\n");
  56.138 -    dom0_memory_start = __va(initial_images_start);
  56.139 +    dom0_memory_start = (unsigned long) __va(initial_images_start);
  56.140      dom0_memory_size = ia64_boot_param->domain_size;
  56.141 -    dom0_initrd_start = __va(initial_images_start +
  56.142 +    dom0_initrd_start = (unsigned long) __va(initial_images_start +
  56.143  			     PAGE_ALIGN(ia64_boot_param->domain_size));
  56.144      dom0_initrd_size = ia64_boot_param->initrd_size;
  56.145   
  56.146 @@ -376,29 +393,15 @@ printk("About to call domain_create()\n"
  56.147      /* PIN domain0 on CPU 0.  */
  56.148      dom0->vcpu[0]->cpu_affinity = cpumask_of_cpu(0);
  56.149  
  56.150 -#ifdef CLONE_DOMAIN0
  56.151 -    {
  56.152 -    int i;
  56.153 -    dom0_memory_start = __va(ia64_boot_param->domain_start);
  56.154 -    dom0_memory_size = ia64_boot_param->domain_size;
  56.155 -
  56.156 -    for (i = 0; i < CLONE_DOMAIN0; i++) {
  56.157 -      printk("CONSTRUCTING DOMAIN0 CLONE #%d\n",i+1);
  56.158 -      if ( construct_domU(clones[i], dom0_memory_start, dom0_memory_size,
  56.159 -			  dom0_initrd_start,dom0_initrd_size,
  56.160 -			  0) != 0)
  56.161 -            panic("Could not set up DOM0 clone %d\n",i);
  56.162 -    }
  56.163 -    }
  56.164 -#endif
  56.165 -
  56.166      /* The stash space for the initial kernel image can now be freed up. */
  56.167 -    init_domheap_pages(ia64_boot_param->domain_start,
  56.168 -                       ia64_boot_param->domain_size);
  56.169 +    /* init_domheap_pages_wrapper is temporary solution, please refer to the
  56.170 +     * descriptor of this function */
  56.171 +    init_domheap_pages_wrapper(ia64_boot_param->domain_start,
  56.172 +           ia64_boot_param->domain_start+ia64_boot_param->domain_size);
  56.173      /* throw away initrd area passed from elilo */
  56.174      if (ia64_boot_param->initrd_size) {
  56.175 -        init_domheap_pages(ia64_boot_param->initrd_start,
  56.176 -                          ia64_boot_param->initrd_size);
  56.177 +        init_domheap_pages_wrapper(ia64_boot_param->initrd_start,
  56.178 +           ia64_boot_param->initrd_start+ia64_boot_param->initrd_size);
  56.179      }
  56.180  
  56.181      if (!running_on_sim)  // slow on ski and pages are pre-initialized to zero
  56.182 @@ -412,13 +415,6 @@ printk("About to call init_trace_bufs()\
  56.183      console_endboot(cmdline && strstr(cmdline, "tty0"));
  56.184  #endif
  56.185  
  56.186 -#ifdef CLONE_DOMAIN0
  56.187 -    {
  56.188 -    int i;
  56.189 -    for (i = 0; i < CLONE_DOMAIN0; i++)
  56.190 -	domain_unpause_by_systemcontroller(clones[i]);
  56.191 -    }
  56.192 -#endif
  56.193      domain0_ready = 1;
  56.194  
  56.195      local_irq_enable();
    57.1 --- a/xen/arch/ia64/xen/xentime.c	Thu Mar 02 10:59:34 2006 +0100
    57.2 +++ b/xen/arch/ia64/xen/xentime.c	Thu Mar 02 11:00:49 2006 +0100
    57.3 @@ -30,6 +30,9 @@
    57.4  #include <linux/jiffies.h>	// not included by xen/sched.h
    57.5  #include <xen/softirq.h>
    57.6  
    57.7 +/* FIXME: where these declarations should be there ? */
    57.8 +extern void ia64_init_itm(void);
    57.9 +
   57.10  seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
   57.11  
   57.12  #define TIME_KEEPER_ID  0
   57.13 @@ -70,7 +73,7 @@ static inline u64 get_time_delta(void)
   57.14  s_time_t get_s_time(void)
   57.15  {
   57.16      s_time_t now;
   57.17 -    unsigned long flags, seq;
   57.18 +    unsigned long seq;
   57.19  
   57.20      do {
   57.21  	seq = read_seqbegin(&xtime_lock);
   57.22 @@ -202,7 +205,7 @@ xen_timer_interrupt (int irq, void *dev_
   57.23  }
   57.24  
   57.25  static struct irqaction xen_timer_irqaction = {
   57.26 -	.handler =	xen_timer_interrupt,
   57.27 +	.handler =	(void *) xen_timer_interrupt,
   57.28  	.name =		"timer"
   57.29  };
   57.30  
   57.31 @@ -217,8 +220,6 @@ ia64_time_init (void)
   57.32  /* Late init function (after all CPUs are booted). */
   57.33  int __init init_xen_time()
   57.34  {
   57.35 -    struct timespec tm;
   57.36 -
   57.37      ia64_time_init();
   57.38      itc_scale  = 1000000000UL << 32 ;
   57.39      itc_scale /= local_cpu_data->itc_freq;
   57.40 @@ -253,7 +254,7 @@ int reprogram_timer(s_time_t timeout)
   57.41  	} while (unlikely(read_seqretry(&xtime_lock, seq)));
   57.42  
   57.43  	local_cpu_data->itm_next = itm_next;
   57.44 -	vcpu_set_next_timer(current);
   57.45 +	vcpu_set_next_timer(v);
   57.46  	return 1;
   57.47  }
   57.48  
    58.1 --- a/xen/include/asm-ia64/config.h	Thu Mar 02 10:59:34 2006 +0100
    58.2 +++ b/xen/include/asm-ia64/config.h	Thu Mar 02 11:00:49 2006 +0100
    58.3 @@ -3,11 +3,8 @@
    58.4  
    58.5  #undef USE_PAL_EMULATOR
    58.6  // control flags for turning on/off features under test
    58.7 -#undef CLONE_DOMAIN0
    58.8 -//#define CLONE_DOMAIN0 1
    58.9  #undef DOMU_BUILD_STAGING
   58.10  #define VHPT_GLOBAL
   58.11 -#define DOMU_AUTO_RESTART
   58.12  
   58.13  #undef DEBUG_PFMON
   58.14  
   58.15 @@ -24,7 +21,7 @@
   58.16  #define CONFIG_EFI_PCDP
   58.17  #define CONFIG_SERIAL_SGI_L1_CONSOLE
   58.18  
   58.19 -#undef CONFIG_XEN_SMP
   58.20 +#define CONFIG_XEN_SMP
   58.21  
   58.22  #ifdef CONFIG_XEN_SMP
   58.23  #define CONFIG_SMP 1
   58.24 @@ -72,7 +69,7 @@ typedef unsigned long paddr_t;
   58.25  extern unsigned long xenheap_phys_end;
   58.26  extern unsigned long xen_pstart;
   58.27  extern unsigned long xenheap_size;
   58.28 -extern struct domain *dom0;
   58.29 +//extern struct domain *dom0;
   58.30  extern unsigned long dom0_start;
   58.31  extern unsigned long dom0_size;
   58.32  
   58.33 @@ -211,9 +208,9 @@ void sort_main_extable(void);
   58.34  
   58.35  // see include/asm-ia64/mm.h, handle remaining page_info uses until gone
   58.36  #define page_info page
   58.37 -
   58.38 -// see common/memory.c
   58.39 -#define set_gpfn_from_mfn(x,y)	do { } while (0)
   58.40 +// Deprivated linux inf and put here for short time compatibility
   58.41 +#define kmalloc(s, t) xmalloc_bytes((s))
   58.42 +#define kfree(s) xfree((s))
   58.43  
   58.44  // see common/keyhandler.c
   58.45  #define	nop()	asm volatile ("nop 0")
   58.46 @@ -254,10 +251,8 @@ struct screen_info { };
   58.47  #define seq_printf(a,b...) printf(b)
   58.48  #define CONFIG_BLK_DEV_INITRD // needed to reserve memory for domain0
   58.49  
   58.50 -#define FORCE_CRASH()	asm("break 0;;");
   58.51 -
   58.52  void dummy_called(char *function);
   58.53 -#define dummy()	dummy_called(__FUNCTION__)
   58.54 +#define dummy()	dummy_called((char *) __FUNCTION__)
   58.55  
   58.56  // these declarations got moved at some point, find a better place for them
   58.57  extern int ht_per_core;
   58.58 @@ -295,14 +290,17 @@ extern int ht_per_core;
   58.59  #endif /* __XEN_IA64_CONFIG_H__ */
   58.60  
   58.61  // needed for include/xen/smp.h
   58.62 -#ifdef CONFIG_SMP
   58.63 -#define raw_smp_processor_id()	current->processor
   58.64 -#else
   58.65 -#define raw_smp_processor_id()	0
   58.66 -#endif
   58.67 +//#ifdef CONFIG_SMP
   58.68 +//#define raw_smp_processor_id()	current->processor
   58.69 +//#else
   58.70 +//#define raw_smp_processor_id()	0
   58.71 +//#endif
   58.72  
   58.73  #ifndef __ASSEMBLY__
   58.74  #include <linux/linkage.h>
   58.75 +#define FORCE_CRASH()	asm("break.m 0;;");
   58.76 +#else
   58.77 +#define FORCE_CRASH	break.m 0;;
   58.78  #endif
   58.79  
   58.80  #endif	/* _IA64_CONFIG_H_ */
    59.1 --- a/xen/include/asm-ia64/debugger.h	Thu Mar 02 10:59:34 2006 +0100
    59.2 +++ b/xen/include/asm-ia64/debugger.h	Thu Mar 02 11:00:49 2006 +0100
    59.3 @@ -40,6 +40,8 @@
    59.4  
    59.5  #include <xen/gdbstub.h>
    59.6  
    59.7 +void show_registers(struct cpu_user_regs *regs);
    59.8 +
    59.9  // NOTE: on xen struct pt_regs = struct cpu_user_regs
   59.10  //       see include/asm-ia64/linux-xen/asm/ptrace.h
   59.11  #ifdef CRASH_DEBUG
    60.1 --- a/xen/include/asm-ia64/domain.h	Thu Mar 02 10:59:34 2006 +0100
    60.2 +++ b/xen/include/asm-ia64/domain.h	Thu Mar 02 11:00:49 2006 +0100
    60.3 @@ -9,14 +9,15 @@
    60.4  #include <public/arch-ia64.h>
    60.5  #include <asm/vmx_platform.h>
    60.6  #include <xen/list.h>
    60.7 +#include <xen/cpumask.h>
    60.8  
    60.9  extern void domain_relinquish_resources(struct domain *);
   60.10  
   60.11  struct arch_domain {
   60.12      struct mm_struct *active_mm;
   60.13      struct mm_struct *mm;
   60.14 -    int metaphysical_rr0;
   60.15 -    int metaphysical_rr4;
   60.16 +    unsigned long metaphysical_rr0;
   60.17 +    unsigned long metaphysical_rr4;
   60.18      int starting_rid;		/* first RID assigned to domain */
   60.19      int ending_rid;		/* one beyond highest RID assigned to domain */
   60.20      int rid_bits;		/* number of virtual rid bits (default: 18) */
   60.21 @@ -32,11 +33,6 @@ struct arch_domain {
   60.22      u64 xen_vastart;
   60.23      u64 xen_vaend;
   60.24      u64 shared_info_va;
   60.25 -#ifdef DOMU_AUTO_RESTART
   60.26 -    u64 image_start;
   60.27 -    u64 image_len;
   60.28 -    u64 entry;
   60.29 -#endif
   60.30      unsigned long initrd_start;
   60.31      unsigned long initrd_len;
   60.32      char *cmdline;
   60.33 @@ -63,13 +59,12 @@ struct arch_vcpu {
   60.34  	unsigned long domain_itm;
   60.35  	unsigned long domain_itm_last;
   60.36  	unsigned long xen_itm;
   60.37 -	unsigned long xen_timer_interval;
   60.38  #endif
   60.39      mapped_regs_t *privregs; /* save the state of vcpu */
   60.40 -    int metaphysical_rr0;		// from arch_domain (so is pinned)
   60.41 -    int metaphysical_rr4;		// from arch_domain (so is pinned)
   60.42 -    int metaphysical_saved_rr0;		// from arch_domain (so is pinned)
   60.43 -    int metaphysical_saved_rr4;		// from arch_domain (so is pinned)
   60.44 +    unsigned long metaphysical_rr0;		// from arch_domain (so is pinned)
   60.45 +    unsigned long metaphysical_rr4;		// from arch_domain (so is pinned)
   60.46 +    unsigned long metaphysical_saved_rr0;	// from arch_domain (so is pinned)
   60.47 +    unsigned long metaphysical_saved_rr4;	// from arch_domain (so is pinned)
   60.48      int breakimm;			// from arch_domain (so is pinned)
   60.49      int starting_rid;		/* first RID assigned to domain */
   60.50      int ending_rid;		/* one beyond highest RID assigned to domain */
   60.51 @@ -112,6 +107,7 @@ struct mm_struct {
   60.52  						 * by mmlist_lock
   60.53  						 */
   60.54  
   60.55 +#ifndef XEN
   60.56  	unsigned long start_code, end_code, start_data, end_data;
   60.57  	unsigned long start_brk, brk, start_stack;
   60.58  	unsigned long arg_start, arg_end, env_start, env_end;
   60.59 @@ -121,6 +117,7 @@ struct mm_struct {
   60.60  	unsigned long saved_auxv[40]; /* for /proc/PID/auxv */
   60.61  
   60.62  	unsigned dumpable:1;
   60.63 +#endif
   60.64  #ifdef CONFIG_HUGETLB_PAGE
   60.65  	int used_hugetlb;
   60.66  #endif
    61.1 --- a/xen/include/asm-ia64/grant_table.h	Thu Mar 02 10:59:34 2006 +0100
    61.2 +++ b/xen/include/asm-ia64/grant_table.h	Thu Mar 02 11:00:49 2006 +0100
    61.3 @@ -17,7 +17,7 @@
    61.4  #define gnttab_shared_gmfn(d, t, i)                                     \
    61.5      ( ((d) == dom0) ?                                                   \
    61.6        ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i)) :              \
    61.7 -      (map_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)),       \
    61.8 +      (assign_domain_page((d), 1UL<<40, virt_to_maddr((t)->shared)),       \
    61.9         1UL << (40 - PAGE_SHIFT))                                        \
   61.10      )
   61.11  
    62.1 --- a/xen/include/asm-ia64/hypercall.h	Thu Mar 02 10:59:34 2006 +0100
    62.2 +++ b/xen/include/asm-ia64/hypercall.h	Thu Mar 02 11:00:49 2006 +0100
    62.3 @@ -16,15 +16,4 @@ vmx_do_mmu_update(
    62.4      u64 *pdone,
    62.5      u64 foreigndom);
    62.6  
    62.7 -extern int
    62.8 -do_lock_page(
    62.9 -    VCPU *vcpu,
   62.10 -    u64 va,
   62.11 -    u64 lock);
   62.12 -
   62.13 -extern int
   62.14 -do_set_shared_page(
   62.15 -    VCPU *vcpu,
   62.16 -    u64 gpa);
   62.17 -
   62.18  #endif /* __ASM_IA64_HYPERCALL_H__ */
    63.1 --- a/xen/include/asm-ia64/linux-xen/asm/README.origin	Thu Mar 02 10:59:34 2006 +0100
    63.2 +++ b/xen/include/asm-ia64/linux-xen/asm/README.origin	Thu Mar 02 11:00:49 2006 +0100
    63.3 @@ -5,6 +5,7 @@
    63.4  # (e.g. with #ifdef XEN or XEN in a comment) so that they can be
    63.5  # easily updated to future versions of the corresponding Linux files.
    63.6  
    63.7 +cache.h		-> linux/include/asm-ia64/cache.h
    63.8  gcc_intrin.h		-> linux/include/asm-ia64/gcc_intrin.h
    63.9  ia64regs.h		-> linux/include/asm-ia64/ia64regs.h
   63.10  io.h			-> linux/include/asm-ia64/io.h
   63.11 @@ -16,6 +17,7 @@ pgalloc.h		-> linux/include/asm-ia64/pga
   63.12  pgtable.h		-> linux/include/asm-ia64/pgtable.h
   63.13  processor.h		-> linux/include/asm-ia64/processor.h
   63.14  ptrace.h		-> linux/include/asm-ia64/ptrace.h
   63.15 +smp.h			-> linux/include/asm-ia64/smp.h
   63.16  spinlock.h		-> linux/include/asm-ia64/spinlock.h
   63.17  system.h		-> linux/include/asm-ia64/system.h
   63.18  tlbflush.h		-> linux/include/asm-ia64/tlbflush.h
    64.1 --- a/xen/include/asm-ia64/linux-xen/asm/processor.h	Thu Mar 02 10:59:34 2006 +0100
    64.2 +++ b/xen/include/asm-ia64/linux-xen/asm/processor.h	Thu Mar 02 11:00:49 2006 +0100
    64.3 @@ -639,6 +639,19 @@ ia64_get_ivr (void)
    64.4  	return r;
    64.5  }
    64.6  
    64.7 +#ifdef XEN
    64.8 +/* Get the page table address and control bits.  */
    64.9 +static inline __u64
   64.10 +ia64_get_pta (void)
   64.11 +{
   64.12 +   __u64 r;
   64.13 +   ia64_srlz_d();
   64.14 +   r = ia64_getreg(_IA64_REG_CR_PTA);
   64.15 +   ia64_srlz_d();
   64.16 +   return r;
   64.17 +}
   64.18 +#endif
   64.19 +
   64.20  static inline void
   64.21  ia64_set_dbr (__u64 regnum, __u64 value)
   64.22  {
    65.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    65.2 +++ b/xen/include/asm-ia64/linux-xen/asm/smp.h	Thu Mar 02 11:00:49 2006 +0100
    65.3 @@ -0,0 +1,143 @@
    65.4 +/*
    65.5 + * SMP Support
    65.6 + *
    65.7 + * Copyright (C) 1999 VA Linux Systems
    65.8 + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
    65.9 + * (c) Copyright 2001-2003, 2005 Hewlett-Packard Development Company, L.P.
   65.10 + *	David Mosberger-Tang <davidm@hpl.hp.com>
   65.11 + *	Bjorn Helgaas <bjorn.helgaas@hp.com>
   65.12 + */
   65.13 +#ifndef _ASM_IA64_SMP_H
   65.14 +#define _ASM_IA64_SMP_H
   65.15 +
   65.16 +#include <linux/config.h>
   65.17 +#include <linux/init.h>
   65.18 +#include <linux/threads.h>
   65.19 +#include <linux/kernel.h>
   65.20 +#include <linux/cpumask.h>
   65.21 +
   65.22 +#include <asm/bitops.h>
   65.23 +#include <asm/io.h>
   65.24 +#include <asm/param.h>
   65.25 +#include <asm/processor.h>
   65.26 +#include <asm/ptrace.h>
   65.27 +
   65.28 +static inline unsigned int
   65.29 +ia64_get_lid (void)
   65.30 +{
   65.31 +	union {
   65.32 +		struct {
   65.33 +			unsigned long reserved : 16;
   65.34 +			unsigned long eid : 8;
   65.35 +			unsigned long id : 8;
   65.36 +			unsigned long ignored : 32;
   65.37 +		} f;
   65.38 +		unsigned long bits;
   65.39 +	} lid;
   65.40 +
   65.41 +	lid.bits = ia64_getreg(_IA64_REG_CR_LID);
   65.42 +	return lid.f.id << 8 | lid.f.eid;
   65.43 +}
   65.44 +
   65.45 +#ifdef CONFIG_SMP
   65.46 +
   65.47 +#define XTP_OFFSET		0x1e0008
   65.48 +
   65.49 +#define SMP_IRQ_REDIRECTION	(1 << 0)
   65.50 +#define SMP_IPI_REDIRECTION	(1 << 1)
   65.51 +
   65.52 +#ifdef XEN
   65.53 +#define raw_smp_processor_id() (current->processor)
   65.54 +#else
   65.55 +#define raw_smp_processor_id() (current_thread_info()->cpu)
   65.56 +#endif
   65.57 +
   65.58 +extern struct smp_boot_data {
   65.59 +	int cpu_count;
   65.60 +	int cpu_phys_id[NR_CPUS];
   65.61 +} smp_boot_data __initdata;
   65.62 +
   65.63 +extern char no_int_routing __devinitdata;
   65.64 +
   65.65 +extern cpumask_t cpu_online_map;
   65.66 +extern cpumask_t cpu_core_map[NR_CPUS];
   65.67 +extern cpumask_t cpu_sibling_map[NR_CPUS];
   65.68 +extern int smp_num_siblings;
   65.69 +extern int smp_num_cpucores;
   65.70 +extern void __iomem *ipi_base_addr;
   65.71 +extern unsigned char smp_int_redirect;
   65.72 +
   65.73 +extern volatile int ia64_cpu_to_sapicid[];
   65.74 +#define cpu_physical_id(i)	ia64_cpu_to_sapicid[i]
   65.75 +
   65.76 +extern unsigned long ap_wakeup_vector;
   65.77 +
   65.78 +/*
   65.79 + * Function to map hard smp processor id to logical id.  Slow, so don't use this in
   65.80 + * performance-critical code.
   65.81 + */
   65.82 +static inline int
   65.83 +cpu_logical_id (int cpuid)
   65.84 +{
   65.85 +	int i;
   65.86 +
   65.87 +	for (i = 0; i < NR_CPUS; ++i)
   65.88 +		if (cpu_physical_id(i) == cpuid)
   65.89 +			break;
   65.90 +	return i;
   65.91 +}
   65.92 +
   65.93 +/*
   65.94 + * XTP control functions:
   65.95 + *	min_xtp   : route all interrupts to this CPU
   65.96 + *	normal_xtp: nominal XTP value
   65.97 + *	max_xtp   : never deliver interrupts to this CPU.
   65.98 + */
   65.99 +
  65.100 +static inline void
  65.101 +min_xtp (void)
  65.102 +{
  65.103 +	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  65.104 +		writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
  65.105 +}
  65.106 +
  65.107 +static inline void
  65.108 +normal_xtp (void)
  65.109 +{
  65.110 +	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  65.111 +		writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
  65.112 +}
  65.113 +
  65.114 +static inline void
  65.115 +max_xtp (void)
  65.116 +{
  65.117 +	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  65.118 +		writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
  65.119 +}
  65.120 +
  65.121 +#define hard_smp_processor_id()		ia64_get_lid()
  65.122 +
  65.123 +/* Upping and downing of CPUs */
  65.124 +extern int __cpu_disable (void);
  65.125 +extern void __cpu_die (unsigned int cpu);
  65.126 +extern void cpu_die (void) __attribute__ ((noreturn));
  65.127 +extern int __cpu_up (unsigned int cpu);
  65.128 +extern void __init smp_build_cpu_map(void);
  65.129 +
  65.130 +extern void __init init_smp_config (void);
  65.131 +extern void smp_do_timer (struct pt_regs *regs);
  65.132 +
  65.133 +extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
  65.134 +				     int retry, int wait);
  65.135 +extern void smp_send_reschedule (int cpu);
  65.136 +extern void lock_ipi_calllock(void);
  65.137 +extern void unlock_ipi_calllock(void);
  65.138 +extern void identify_siblings (struct cpuinfo_ia64 *);
  65.139 +
  65.140 +#else
  65.141 +
  65.142 +#define cpu_logical_id(i)		0
  65.143 +#define cpu_physical_id(i)		ia64_get_lid()
  65.144 +
  65.145 +#endif /* CONFIG_SMP */
  65.146 +#endif /* _ASM_IA64_SMP_H */
    66.1 --- a/xen/include/asm-ia64/linux-xen/asm/tlbflush.h	Thu Mar 02 10:59:34 2006 +0100
    66.2 +++ b/xen/include/asm-ia64/linux-xen/asm/tlbflush.h	Thu Mar 02 11:00:49 2006 +0100
    66.3 @@ -103,6 +103,10 @@ flush_tlb_pgtables (struct mm_struct *mm
    66.4  	 */
    66.5  }
    66.6  
    66.7 +
    66.8  #define flush_tlb_kernel_range(start, end)	flush_tlb_all()	/* XXX fix me */
    66.9 +#ifdef XEN
   66.10 +extern void flush_tlb_mask(cpumask_t mask);
   66.11 +#endif
   66.12  
   66.13  #endif /* _ASM_IA64_TLBFLUSH_H */
    67.1 --- a/xen/include/asm-ia64/linux-xen/linux/README.origin	Thu Mar 02 10:59:34 2006 +0100
    67.2 +++ b/xen/include/asm-ia64/linux-xen/linux/README.origin	Thu Mar 02 11:00:49 2006 +0100
    67.3 @@ -5,7 +5,6 @@
    67.4  # (e.g. with #ifdef XEN or XEN in a comment) so that they can be
    67.5  # easily updated to future versions of the corresponding Linux files.
    67.6  
    67.7 -cpumask.h 		-> linux/include/linux/cpumask.h
    67.8  gfp.h	 		-> linux/include/linux/gfp.h
    67.9  hardirq.h 		-> linux/include/linux/hardirq.h
   67.10  interrupt.h 		-> linux/include/linux/interrupt.h
    68.1 --- a/xen/include/asm-ia64/linux-xen/linux/cpumask.h	Thu Mar 02 10:59:34 2006 +0100
    68.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    68.3 @@ -1,397 +0,0 @@
    68.4 -#ifndef __LINUX_CPUMASK_H
    68.5 -#define __LINUX_CPUMASK_H
    68.6 -
    68.7 -/*
    68.8 - * Cpumasks provide a bitmap suitable for representing the
    68.9 - * set of CPU's in a system, one bit position per CPU number.
   68.10 - *
   68.11 - * See detailed comments in the file linux/bitmap.h describing the
   68.12 - * data type on which these cpumasks are based.
   68.13 - *
   68.14 - * For details of cpumask_scnprintf() and cpumask_parse(),
   68.15 - * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
   68.16 - * For details of cpulist_scnprintf() and cpulist_parse(), see
   68.17 - * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
   68.18 - *
   68.19 - * The available cpumask operations are:
   68.20 - *
   68.21 - * void cpu_set(cpu, mask)		turn on bit 'cpu' in mask
   68.22 - * void cpu_clear(cpu, mask)		turn off bit 'cpu' in mask
   68.23 - * void cpus_setall(mask)		set all bits
   68.24 - * void cpus_clear(mask)		clear all bits
   68.25 - * int cpu_isset(cpu, mask)		true iff bit 'cpu' set in mask
   68.26 - * int cpu_test_and_set(cpu, mask)	test and set bit 'cpu' in mask
   68.27 - *
   68.28 - * void cpus_and(dst, src1, src2)	dst = src1 & src2  [intersection]
   68.29 - * void cpus_or(dst, src1, src2)	dst = src1 | src2  [union]
   68.30 - * void cpus_xor(dst, src1, src2)	dst = src1 ^ src2
   68.31 - * void cpus_andnot(dst, src1, src2)	dst = src1 & ~src2
   68.32 - * void cpus_complement(dst, src)	dst = ~src
   68.33 - *
   68.34 - * int cpus_equal(mask1, mask2)		Does mask1 == mask2?
   68.35 - * int cpus_intersects(mask1, mask2)	Do mask1 and mask2 intersect?
   68.36 - * int cpus_subset(mask1, mask2)	Is mask1 a subset of mask2?
   68.37 - * int cpus_empty(mask)			Is mask empty (no bits sets)?
   68.38 - * int cpus_full(mask)			Is mask full (all bits sets)?
   68.39 - * int cpus_weight(mask)		Hamming weigh - number of set bits
   68.40 - *
   68.41 - * void cpus_shift_right(dst, src, n)	Shift right
   68.42 - * void cpus_shift_left(dst, src, n)	Shift left
   68.43 - *
   68.44 - * int first_cpu(mask)			Number lowest set bit, or NR_CPUS
   68.45 - * int next_cpu(cpu, mask)		Next cpu past 'cpu', or NR_CPUS
   68.46 - *
   68.47 - * cpumask_t cpumask_of_cpu(cpu)	Return cpumask with bit 'cpu' set
   68.48 - * CPU_MASK_ALL				Initializer - all bits set
   68.49 - * CPU_MASK_NONE			Initializer - no bits set
   68.50 - * unsigned long *cpus_addr(mask)	Array of unsigned long's in mask
   68.51 - *
   68.52 - * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
   68.53 - * int cpumask_parse(ubuf, ulen, mask)	Parse ascii string as cpumask
   68.54 - * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
   68.55 - * int cpulist_parse(buf, map)		Parse ascii string as cpulist
   68.56 - *
   68.57 - * for_each_cpu_mask(cpu, mask)		for-loop cpu over mask
   68.58 - *
   68.59 - * int num_online_cpus()		Number of online CPUs
   68.60 - * int num_possible_cpus()		Number of all possible CPUs
   68.61 - * int num_present_cpus()		Number of present CPUs
   68.62 - *
   68.63 - * int cpu_online(cpu)			Is some cpu online?
   68.64 - * int cpu_possible(cpu)		Is some cpu possible?
   68.65 - * int cpu_present(cpu)			Is some cpu present (can schedule)?
   68.66 - *
   68.67 - * int any_online_cpu(mask)		First online cpu in mask
   68.68 - *
   68.69 - * for_each_cpu(cpu)			for-loop cpu over cpu_possible_map
   68.70 - * for_each_online_cpu(cpu)		for-loop cpu over cpu_online_map
   68.71 - * for_each_present_cpu(cpu)		for-loop cpu over cpu_present_map
   68.72 - *
   68.73 - * Subtlety:
   68.74 - * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
   68.75 - *    to generate slightly worse code.  Note for example the additional
   68.76 - *    40 lines of assembly code compiling the "for each possible cpu"
   68.77 - *    loops buried in the disk_stat_read() macros calls when compiling
   68.78 - *    drivers/block/genhd.c (arch i386, CONFIG_SMP=y).  So use a simple
   68.79 - *    one-line #define for cpu_isset(), instead of wrapping an inline
   68.80 - *    inside a macro, the way we do the other calls.
   68.81 - */
   68.82 -
   68.83 -#include <linux/kernel.h>
   68.84 -#include <linux/threads.h>
   68.85 -#include <linux/bitmap.h>
   68.86 -#include <asm/bug.h>
   68.87 -
   68.88 -typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
   68.89 -extern cpumask_t _unused_cpumask_arg_;
   68.90 -
   68.91 -#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
   68.92 -static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
   68.93 -{
   68.94 -	set_bit(cpu, dstp->bits);
   68.95 -}
   68.96 -
   68.97 -#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
   68.98 -static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
   68.99 -{
  68.100 -	clear_bit(cpu, dstp->bits);
  68.101 -}
  68.102 -
  68.103 -#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
  68.104 -static inline void __cpus_setall(cpumask_t *dstp, int nbits)
  68.105 -{
  68.106 -	bitmap_fill(dstp->bits, nbits);
  68.107 -}
  68.108 -
  68.109 -#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
  68.110 -static inline void __cpus_clear(cpumask_t *dstp, int nbits)
  68.111 -{
  68.112 -	bitmap_zero(dstp->bits, nbits);
  68.113 -}
  68.114 -
  68.115 -/* No static inline type checking - see Subtlety (1) above. */
  68.116 -#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
  68.117 -
  68.118 -#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
  68.119 -static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
  68.120 -{
  68.121 -	return test_and_set_bit(cpu, addr->bits);
  68.122 -}
  68.123 -
  68.124 -#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
  68.125 -static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
  68.126 -					const cpumask_t *src2p, int nbits)
  68.127 -{
  68.128 -	bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
  68.129 -}
  68.130 -
  68.131 -#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
  68.132 -static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
  68.133 -					const cpumask_t *src2p, int nbits)
  68.134 -{
  68.135 -	bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
  68.136 -}
  68.137 -
  68.138 -#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
  68.139 -static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
  68.140 -					const cpumask_t *src2p, int nbits)
  68.141 -{
  68.142 -	bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
  68.143 -}
  68.144 -
  68.145 -#define cpus_andnot(dst, src1, src2) \
  68.146 -				__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
  68.147 -static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
  68.148 -					const cpumask_t *src2p, int nbits)
  68.149 -{
  68.150 -	bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
  68.151 -}
  68.152 -
  68.153 -#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
  68.154 -static inline void __cpus_complement(cpumask_t *dstp,
  68.155 -					const cpumask_t *srcp, int nbits)
  68.156 -{
  68.157 -	bitmap_complement(dstp->bits, srcp->bits, nbits);
  68.158 -}
  68.159 -
  68.160 -#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
  68.161 -static inline int __cpus_equal(const cpumask_t *src1p,
  68.162 -					const cpumask_t *src2p, int nbits)
  68.163 -{
  68.164 -	return bitmap_equal(src1p->bits, src2p->bits, nbits);
  68.165 -}
  68.166 -
  68.167 -#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
  68.168 -static inline int __cpus_intersects(const cpumask_t *src1p,
  68.169 -					const cpumask_t *src2p, int nbits)
  68.170 -{
  68.171 -	return bitmap_intersects(src1p->bits, src2p->bits, nbits);
  68.172 -}
  68.173 -
  68.174 -#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
  68.175 -static inline int __cpus_subset(const cpumask_t *src1p,
  68.176 -					const cpumask_t *src2p, int nbits)
  68.177 -{
  68.178 -	return bitmap_subset(src1p->bits, src2p->bits, nbits);
  68.179 -}
  68.180 -
  68.181 -#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
  68.182 -static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
  68.183 -{
  68.184 -	return bitmap_empty(srcp->bits, nbits);
  68.185 -}
  68.186 -
  68.187 -#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
  68.188 -static inline int __cpus_full(const cpumask_t *srcp, int nbits)
  68.189 -{
  68.190 -	return bitmap_full(srcp->bits, nbits);
  68.191 -}
  68.192 -
  68.193 -#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
  68.194 -static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
  68.195 -{
  68.196 -	return bitmap_weight(srcp->bits, nbits);
  68.197 -}
  68.198 -
  68.199 -#define cpus_shift_right(dst, src, n) \
  68.200 -			__cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
  68.201 -static inline void __cpus_shift_right(cpumask_t *dstp,
  68.202 -					const cpumask_t *srcp, int n, int nbits)
  68.203 -{
  68.204 -	bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
  68.205 -}
  68.206 -
  68.207 -#define cpus_shift_left(dst, src, n) \
  68.208 -			__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
  68.209 -static inline void __cpus_shift_left(cpumask_t *dstp,
  68.210 -					const cpumask_t *srcp, int n, int nbits)
  68.211 -{
  68.212 -	bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
  68.213 -}
  68.214 -
  68.215 -#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
  68.216 -static inline int __first_cpu(const cpumask_t *srcp, int nbits)
  68.217 -{
  68.218 -	return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
  68.219 -}
  68.220 -
  68.221 -#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
  68.222 -static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
  68.223 -{
  68.224 -	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
  68.225 -}
  68.226 -
  68.227 -#define cpumask_of_cpu(cpu)						\
  68.228 -({									\
  68.229 -	typeof(_unused_cpumask_arg_) m;					\
  68.230 -	if (sizeof(m) == sizeof(unsigned long)) {			\
  68.231 -		m.bits[0] = 1UL<<(cpu);					\
  68.232 -	} else {							\
  68.233 -		cpus_clear(m);						\
  68.234 -		cpu_set((cpu), m);					\
  68.235 -	}								\
  68.236 -	m;								\
  68.237 -})
  68.238 -
  68.239 -#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
  68.240 -
  68.241 -#if NR_CPUS <= BITS_PER_LONG
  68.242 -
  68.243 -#define CPU_MASK_ALL							\
  68.244 -(cpumask_t) { {								\
  68.245 -	[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD			\
  68.246 -} }
  68.247 -
  68.248 -#else
  68.249 -
  68.250 -#define CPU_MASK_ALL							\
  68.251 -(cpumask_t) { {								\
  68.252 -	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,			\
  68.253 -	[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD			\
  68.254 -} }
  68.255 -
  68.256 -#endif
  68.257 -
  68.258 -#define CPU_MASK_NONE							\
  68.259 -(cpumask_t) { {								\
  68.260 -	[0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL				\
  68.261 -} }
  68.262 -
  68.263 -#define CPU_MASK_CPU0							\
  68.264 -(cpumask_t) { {								\
  68.265 -	[0] =  1UL							\
  68.266 -} }
  68.267 -
  68.268 -#define cpus_addr(src) ((src).bits)
  68.269 -
  68.270 -#define cpumask_scnprintf(buf, len, src) \
  68.271 -			__cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
  68.272 -static inline int __cpumask_scnprintf(char *buf, int len,
  68.273 -					const cpumask_t *srcp, int nbits)
  68.274 -{
  68.275 -	return bitmap_scnprintf(buf, len, srcp->bits, nbits);
  68.276 -}
  68.277 -
  68.278 -#define cpumask_parse(ubuf, ulen, dst) \
  68.279 -			__cpumask_parse((ubuf), (ulen), &(dst), NR_CPUS)
  68.280 -static inline int __cpumask_parse(const char __user *buf, int len,
  68.281 -					cpumask_t *dstp, int nbits)
  68.282 -{
  68.283 -	return bitmap_parse(buf, len, dstp->bits, nbits);
  68.284 -}
  68.285 -
  68.286 -#define cpulist_scnprintf(buf, len, src) \
  68.287 -			__cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
  68.288 -static inline int __cpulist_scnprintf(char *buf, int len,
  68.289 -					const cpumask_t *srcp, int nbits)
  68.290 -{
  68.291 -	return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
  68.292 -}
  68.293 -
  68.294 -#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
  68.295 -static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
  68.296 -{
  68.297 -	return bitmap_parselist(buf, dstp->bits, nbits);
  68.298 -}
  68.299 -
  68.300 -#if NR_CPUS > 1
  68.301 -#define for_each_cpu_mask(cpu, mask)		\
  68.302 -	for ((cpu) = first_cpu(mask);		\
  68.303 -		(cpu) < NR_CPUS;		\
  68.304 -		(cpu) = next_cpu((cpu), (mask)))
  68.305 -#else /* NR_CPUS == 1 */
  68.306 -#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
  68.307 -#endif /* NR_CPUS */
  68.308 -
  68.309 -/*
  68.310 - * The following particular system cpumasks and operations manage
  68.311 - * possible, present and online cpus.  Each of them is a fixed size
  68.312 - * bitmap of size NR_CPUS.
  68.313 - *
  68.314 - *  #ifdef CONFIG_HOTPLUG_CPU
  68.315 - *     cpu_possible_map - all NR_CPUS bits set
  68.316 - *     cpu_present_map  - has bit 'cpu' set iff cpu is populated
  68.317 - *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
  68.318 - *  #else
  68.319 - *     cpu_possible_map - has bit 'cpu' set iff cpu is populated
  68.320 - *     cpu_present_map  - copy of cpu_possible_map
  68.321 - *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
  68.322 - *  #endif
  68.323 - *
  68.324 - *  In either case, NR_CPUS is fixed at compile time, as the static
  68.325 - *  size of these bitmaps.  The cpu_possible_map is fixed at boot
  68.326 - *  time, as the set of CPU id's that it is possible might ever
  68.327 - *  be plugged in at anytime during the life of that system boot.
  68.328 - *  The cpu_present_map is dynamic(*), representing which CPUs
  68.329 - *  are currently plugged in.  And cpu_online_map is the dynamic
  68.330 - *  subset of cpu_present_map, indicating those CPUs available
  68.331 - *  for scheduling.
  68.332 - *
  68.333 - *  If HOTPLUG is enabled, then cpu_possible_map is forced to have
  68.334 - *  all NR_CPUS bits set, otherwise it is just the set of CPUs that
  68.335 - *  ACPI reports present at boot.
  68.336 - *
  68.337 - *  If HOTPLUG is enabled, then cpu_present_map varies dynamically,
  68.338 - *  depending on what ACPI reports as currently plugged in, otherwise
  68.339 - *  cpu_present_map is just a copy of cpu_possible_map.
  68.340 - *
  68.341 - *  (*) Well, cpu_present_map is dynamic in the hotplug case.  If not
  68.342 - *      hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
  68.343 - *
  68.344 - * Subtleties:
  68.345 - * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
  68.346 - *    assumption that their single CPU is online.  The UP
  68.347 - *    cpu_{online,possible,present}_maps are placebos.  Changing them
  68.348 - *    will have no useful affect on the following num_*_cpus()
  68.349 - *    and cpu_*() macros in the UP case.  This ugliness is a UP
  68.350 - *    optimization - don't waste any instructions or memory references
  68.351 - *    asking if you're online or how many CPUs there are if there is
  68.352 - *    only one CPU.
  68.353 - * 2) Most SMP arch's #define some of these maps to be some
  68.354 - *    other map specific to that arch.  Therefore, the following
  68.355 - *    must be #define macros, not inlines.  To see why, examine
  68.356 - *    the assembly code produced by the following.  Note that
  68.357 - *    set1() writes phys_x_map, but set2() writes x_map:
  68.358 - *        int x_map, phys_x_map;
  68.359 - *        #define set1(a) x_map = a
  68.360 - *        inline void set2(int a) { x_map = a; }
  68.361 - *        #define x_map phys_x_map
  68.362 - *        main(){ set1(3); set2(5); }
  68.363 - */
  68.364 -
  68.365 -extern cpumask_t cpu_possible_map;
  68.366 -#ifndef XEN
  68.367 -extern cpumask_t cpu_online_map;
  68.368 -#endif
  68.369 -extern cpumask_t cpu_present_map;
  68.370 -
  68.371 -#if NR_CPUS > 1
  68.372 -#define num_online_cpus()	cpus_weight(cpu_online_map)
  68.373 -#define num_possible_cpus()	cpus_weight(cpu_possible_map)
  68.374 -#define num_present_cpus()	cpus_weight(cpu_present_map)
  68.375 -#define cpu_online(cpu)		cpu_isset((cpu), cpu_online_map)
  68.376 -#define cpu_possible(cpu)	cpu_isset((cpu), cpu_possible_map)
  68.377 -#define cpu_present(cpu)	cpu_isset((cpu), cpu_present_map)
  68.378 -#else
  68.379 -#define num_online_cpus()	1
  68.380 -#define num_possible_cpus()	1
  68.381 -#define num_present_cpus()	1
  68.382 -#define cpu_online(cpu)		((cpu) == 0)
  68.383 -#define cpu_possible(cpu)	((cpu) == 0)
  68.384 -#define cpu_present(cpu)	((cpu) == 0)
  68.385 -#endif
  68.386 -
  68.387 -#define any_online_cpu(mask)			\
  68.388 -({						\
  68.389 -	int cpu;				\
  68.390 -	for_each_cpu_mask(cpu, (mask))		\
  68.391 -		if (cpu_online(cpu))		\
  68.392 -			break;			\
  68.393 -	cpu;					\
  68.394 -})
  68.395 -
  68.396 -#define for_each_cpu(cpu)	  for_each_cpu_mask((cpu), cpu_possible_map)
  68.397 -#define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
  68.398 -#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
  68.399 -
  68.400 -#endif /* __LINUX_CPUMASK_H */
    69.1 --- a/xen/include/asm-ia64/linux/README.origin	Thu Mar 02 10:59:34 2006 +0100
    69.2 +++ b/xen/include/asm-ia64/linux/README.origin	Thu Mar 02 11:00:49 2006 +0100
    69.3 @@ -4,31 +4,29 @@
    69.4  # needs to be changed, move it to ../linux-xen and follow
    69.5  # the instructions in the README there.
    69.6  
    69.7 -bcd.h			->linux/include/linux/bcd.h
    69.8 -bitmap.h		->linux/include/linux/bitmap.h
    69.9 -bitops.h		->linux/include/linux/bitops.h
   69.10 -dma-mapping.h		->linux/include/linux/dma-mapping.h
   69.11 -efi.h			->linux/include/linux/efi.h
   69.12 -err.h			->linux/include/linux/err.h
   69.13 -initrd.h		->linux/include/linux/initrd.h
   69.14 -jiffies.h		->linux/include/linux/jiffies.h
   69.15 -kmalloc_sizes.h		->linux/include/linux/kmalloc_sizes.h
   69.16 -linkage.h		->linux/include/linux/linkage.h
   69.17 -mmzone.h		->linux/include/linux/mmzone.h
   69.18 -notifier.h		->linux/include/linux/notifier.h
   69.19 -numa.h			->linux/include/linux/numa.h
   69.20 -page-flags.h		->linux/include/linux/page-flags.h
   69.21 -percpu.h		->linux/include/linux/percpu.h
   69.22 -preempt.h		->linux/include/linux/preempt.h
   69.23 -rbtree.h		->linux/include/linux/rbtree.h
   69.24 -rwsem.h			->linux/include/linux/rwsem.h
   69.25 -seqlock.h		->linux/include/linux/seqlock.h
   69.26 -slab.h			->linux/include/linux/slab.h
   69.27 -sort.h			->linux/include/linux/sort.h
   69.28 -stddef.h		->linux/include/linux/stddef.h
   69.29 -thread_info.h		->linux/include/linux/thread_info.h
   69.30 -threads.h		->linux/include/linux/threads.h
   69.31 -time.h			->linux/include/linux/time.h
   69.32 -timex.h			->linux/include/linux/timex.h
   69.33 -topology.h		->linux/include/linux/topology.h
   69.34 -wait.h			->linux/include/linux/wait.h
   69.35 +bcd.h			-> linux/include/linux/bcd.h
   69.36 +bitmap.h		-> linux/include/linux/bitmap.h
   69.37 +bitops.h		-> linux/include/linux/bitops.h
   69.38 +dma-mapping.h		-> linux/include/linux/dma-mapping.h
   69.39 +efi.h			-> linux/include/linux/efi.h
   69.40 +err.h			-> linux/include/linux/err.h
   69.41 +initrd.h		-> linux/include/linux/initrd.h
   69.42 +jiffies.h		-> linux/include/linux/jiffies.h
   69.43 +kmalloc_sizes.h		-> linux/include/linux/kmalloc_sizes.h
   69.44 +linkage.h		-> linux/include/linux/linkage.h
   69.45 +mmzone.h		-> linux/include/linux/mmzone.h
   69.46 +notifier.h		-> linux/include/linux/notifier.h
   69.47 +numa.h			-> linux/include/linux/numa.h
   69.48 +page-flags.h		-> linux/include/linux/page-flags.h
   69.49 +percpu.h		-> linux/include/linux/percpu.h
   69.50 +preempt.h		-> linux/include/linux/preempt.h
   69.51 +rbtree.h		-> linux/include/linux/rbtree.h
   69.52 +rwsem.h			-> linux/include/linux/rwsem.h
   69.53 +seqlock.h		-> linux/include/linux/seqlock.h
   69.54 +sort.h			-> linux/include/linux/sort.h
   69.55 +stddef.h		-> linux/include/linux/stddef.h
   69.56 +thread_info.h		-> linux/include/linux/thread_info.h
   69.57 +time.h			-> linux/include/linux/time.h
   69.58 +timex.h			-> linux/include/linux/timex.h
   69.59 +topology.h		-> linux/include/linux/topology.h
   69.60 +wait.h			-> linux/include/linux/wait.h
    70.1 --- a/xen/include/asm-ia64/linux/asm/README.origin	Thu Mar 02 10:59:34 2006 +0100
    70.2 +++ b/xen/include/asm-ia64/linux/asm/README.origin	Thu Mar 02 11:00:49 2006 +0100
    70.3 @@ -4,6 +4,7 @@
    70.4  # needs to be changed, move it to ../linux-xen and follow
    70.5  # the instructions in the README there.
    70.6  
    70.7 +acpi.h			-> linux/include/asm-ia64/acpi.h
    70.8  asmmacro.h		-> linux/include/asm-ia64/asmmacro.h
    70.9  atomic.h		-> linux/include/asm-ia64/atomic.h
   70.10  bitops.h		-> linux/include/asm-ia64/bitops.h
   70.11 @@ -11,7 +12,6 @@ break.h			-> linux/include/asm-ia64/brea
   70.12  bug.h			-> linux/include/asm-ia64/bug.h
   70.13  byteorder.h		-> linux/include/asm-ia64/byteorder.h
   70.14  cacheflush.h		-> linux/include/asm-ia64/cacheflush.h
   70.15 -cache.h			-> linux/include/asm-ia64/cache.h
   70.16  checksum.h		-> linux/include/asm-ia64/checksum.h
   70.17  current.h		-> linux/include/asm-ia64/current.h
   70.18  delay.h			-> linux/include/asm-ia64/delay.h
   70.19 @@ -46,9 +46,6 @@ scatterlist.h		-> linux/include/asm-ia64
   70.20  sections.h		-> linux/include/asm-ia64/sections.h
   70.21  semaphore.h		-> linux/include/asm-ia64/semaphore.h
   70.22  setup.h			-> linux/include/asm-ia64/setup.h
   70.23 -sigcontext.h		-> linux/include/asm-ia64/sigcontext.h
   70.24 -signal.h		-> linux/include/asm-ia64/signal.h
   70.25 -smp.h			-> linux/include/asm-ia64/smp.h
   70.26  string.h		-> linux/include/asm-ia64/string.h
   70.27  thread_info.h		-> linux/include/asm-ia64/thread_info.h
   70.28  timex.h			-> linux/include/asm-ia64/timex.h
    71.1 --- a/xen/include/asm-ia64/linux/asm/sigcontext.h	Thu Mar 02 10:59:34 2006 +0100
    71.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    71.3 @@ -1,70 +0,0 @@
    71.4 -#ifndef _ASM_IA64_SIGCONTEXT_H
    71.5 -#define _ASM_IA64_SIGCONTEXT_H
    71.6 -
    71.7 -/*
    71.8 - * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
    71.9 - * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
   71.10 - */
   71.11 -
   71.12 -#include <asm/fpu.h>
   71.13 -
   71.14 -#define IA64_SC_FLAG_ONSTACK_BIT		0	/* is handler running on signal stack? */
   71.15 -#define IA64_SC_FLAG_IN_SYSCALL_BIT		1	/* did signal interrupt a syscall? */
   71.16 -#define IA64_SC_FLAG_FPH_VALID_BIT		2	/* is state in f[32]-f[127] valid? */
   71.17 -
   71.18 -#define IA64_SC_FLAG_ONSTACK		(1 << IA64_SC_FLAG_ONSTACK_BIT)
   71.19 -#define IA64_SC_FLAG_IN_SYSCALL		(1 << IA64_SC_FLAG_IN_SYSCALL_BIT)
   71.20 -#define IA64_SC_FLAG_FPH_VALID		(1 << IA64_SC_FLAG_FPH_VALID_BIT)
   71.21 -
   71.22 -# ifndef __ASSEMBLY__
   71.23 -
   71.24 -/*
   71.25 - * Note on handling of register backing store: sc_ar_bsp contains the address that would
   71.26 - * be found in ar.bsp after executing a "cover" instruction the context in which the
   71.27 - * signal was raised.  If signal delivery required switching to an alternate signal stack
   71.28 - * (sc_rbs_base is not NULL), the "dirty" partition (as it would exist after executing the
   71.29 - * imaginary "cover" instruction) is backed by the *alternate* signal stack, not the
   71.30 - * original one.  In this case, sc_rbs_base contains the base address of the new register
   71.31 - * backing store.  The number of registers in the dirty partition can be calculated as:
   71.32 - *
   71.33 - *   ndirty = ia64_rse_num_regs(sc_rbs_base, sc_rbs_base + (sc_loadrs >> 16))
   71.34 - *
   71.35 - */
   71.36 -
   71.37 -struct sigcontext {
   71.38 -	unsigned long		sc_flags;	/* see manifest constants above */
   71.39 -	unsigned long		sc_nat;		/* bit i == 1 iff scratch reg gr[i] is a NaT */
   71.40 -	stack_t			sc_stack;	/* previously active stack */
   71.41 -
   71.42 -	unsigned long		sc_ip;		/* instruction pointer */
   71.43 -	unsigned long		sc_cfm;		/* current frame marker */
   71.44 -	unsigned long		sc_um;		/* user mask bits */
   71.45 -	unsigned long		sc_ar_rsc;	/* register stack configuration register */
   71.46 -	unsigned long		sc_ar_bsp;	/* backing store pointer */
   71.47 -	unsigned long		sc_ar_rnat;	/* RSE NaT collection register */
   71.48 -	unsigned long		sc_ar_ccv;	/* compare and exchange compare value register */
   71.49 -	unsigned long		sc_ar_unat;	/* ar.unat of interrupted context */
   71.50 -	unsigned long		sc_ar_fpsr;	/* floating-point status register */
   71.51 -	unsigned long		sc_ar_pfs;	/* previous function state */
   71.52 -	unsigned long		sc_ar_lc;	/* loop count register */
   71.53 -	unsigned long		sc_pr;		/* predicate registers */
   71.54 -	unsigned long		sc_br[8];	/* branch registers */
   71.55 -	/* Note: sc_gr[0] is used as the "uc_link" member of ucontext_t */
   71.56 -	unsigned long		sc_gr[32];	/* general registers (static partition) */
   71.57 -	struct ia64_fpreg	sc_fr[128];	/* floating-point registers */
   71.58 -
   71.59 -	unsigned long		sc_rbs_base;	/* NULL or new base of sighandler's rbs */
   71.60 -	unsigned long		sc_loadrs;	/* see description above */
   71.61 -
   71.62 -	unsigned long		sc_ar25;	/* cmp8xchg16 uses this */
   71.63 -	unsigned long		sc_ar26;	/* rsvd for scratch use */
   71.64 -	unsigned long		sc_rsvd[12];	/* reserved for future use */
   71.65 -	/*
   71.66 -	 * The mask must come last so we can increase _NSIG_WORDS
   71.67 -	 * without breaking binary compatibility.
   71.68 -	 */
   71.69 -	sigset_t		sc_mask;	/* signal mask to restore after handler returns */
   71.70 -};
   71.71 -
   71.72 -# endif /* __ASSEMBLY__ */
   71.73 -#endif /* _ASM_IA64_SIGCONTEXT_H */
    72.1 --- a/xen/include/asm-ia64/linux/asm/signal.h	Thu Mar 02 10:59:34 2006 +0100
    72.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    72.3 @@ -1,166 +0,0 @@
    72.4 -#ifndef _ASM_IA64_SIGNAL_H
    72.5 -#define _ASM_IA64_SIGNAL_H
    72.6 -
    72.7 -/*
    72.8 - * Modified 1998-2001, 2003
    72.9 - *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
   72.10 - *
   72.11 - * Unfortunately, this file is being included by bits/signal.h in
   72.12 - * glibc-2.x.  Hence the #ifdef __KERNEL__ ugliness.
   72.13 - */
   72.14 -
   72.15 -#define SIGHUP		 1
   72.16 -#define SIGINT		 2
   72.17 -#define SIGQUIT		 3
   72.18 -#define SIGILL		 4
   72.19 -#define SIGTRAP		 5
   72.20 -#define SIGABRT		 6
   72.21 -#define SIGIOT		 6
   72.22 -#define SIGBUS		 7
   72.23 -#define SIGFPE		 8
   72.24 -#define SIGKILL		 9
   72.25 -#define SIGUSR1		10
   72.26 -#define SIGSEGV		11
   72.27 -#define SIGUSR2		12
   72.28 -#define SIGPIPE		13
   72.29 -#define SIGALRM		14
   72.30 -#define SIGTERM		15
   72.31 -#define SIGSTKFLT	16
   72.32 -#define SIGCHLD		17
   72.33 -#define SIGCONT		18
   72.34 -#define SIGSTOP		19
   72.35 -#define SIGTSTP		20
   72.36 -#define SIGTTIN		21
   72.37 -#define SIGTTOU		22
   72.38 -#define SIGURG		23
   72.39 -#define SIGXCPU		24
   72.40 -#define SIGXFSZ		25
   72.41 -#define SIGVTALRM	26
   72.42 -#define SIGPROF		27
   72.43 -#define SIGWINCH	28
   72.44 -#define SIGIO		29
   72.45 -#define SIGPOLL		SIGIO
   72.46 -/*
   72.47 -#define SIGLOST		29
   72.48 -*/
   72.49 -#define SIGPWR		30
   72.50 -#define SIGSYS		31
   72.51 -/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for backwards compatibility */
   72.52 -#define	SIGUNUSED	31
   72.53 -
   72.54 -/* These should not be considered constants from userland.  */
   72.55 -#define SIGRTMIN	32
   72.56 -#define SIGRTMAX	_NSIG
   72.57 -
   72.58 -/*
   72.59 - * SA_FLAGS values:
   72.60 - *
   72.61 - * SA_ONSTACK indicates that a registered stack_t will be used.
   72.62 - * SA_INTERRUPT is a no-op, but left due to historical reasons.
   72.63 - * SA_RESTART flag to get restarting signals (which were the default long ago)
   72.64 - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
   72.65 - * SA_RESETHAND clears the handler when the signal is delivered.
   72.66 - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
   72.67 - * SA_NODEFER prevents the current signal from being masked in the handler.
   72.68 - *
   72.69 - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
   72.70 - * Unix names RESETHAND and NODEFER respectively.
   72.71 - */
   72.72 -#define SA_NOCLDSTOP	0x00000001
   72.73 -#define SA_NOCLDWAIT	0x00000002
   72.74 -#define SA_SIGINFO	0x00000004
   72.75 -#define SA_ONSTACK	0x08000000
   72.76 -#define SA_RESTART	0x10000000
   72.77 -#define SA_NODEFER	0x40000000
   72.78 -#define SA_RESETHAND	0x80000000
   72.79 -
   72.80 -#define SA_NOMASK	SA_NODEFER
   72.81 -#define SA_ONESHOT	SA_RESETHAND
   72.82 -#define SA_INTERRUPT	0x20000000 /* dummy -- ignored */
   72.83 -
   72.84 -#define SA_RESTORER	0x04000000
   72.85 -
   72.86 -/*
   72.87 - * sigaltstack controls
   72.88 - */
   72.89 -#define SS_ONSTACK	1
   72.90 -#define SS_DISABLE	2
   72.91 -
   72.92 -/*
   72.93 - * The minimum stack size needs to be fairly large because we want to
   72.94 - * be sure that an app compiled for today's CPUs will continue to run
   72.95 - * on all future CPU models.  The CPU model matters because the signal
   72.96 - * frame needs to have space for the complete machine state, including
   72.97 - * all physical stacked registers.  The number of physical stacked
   72.98 - * registers is CPU model dependent, but given that the width of
   72.99 - * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
  72.100 - * more than 16KB of space.
  72.101 - */
  72.102 -#if 1
  72.103 -  /*
  72.104 -   * This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but I typed it
  72.105 -   * in wrong. ;-(  To preserve backwards compatibility, we leave the kernel at the
  72.106 -   * incorrect value and fix libc only.
  72.107 -   */
  72.108 -# define MINSIGSTKSZ	131027	/* min. stack size for sigaltstack() */
  72.109 -#else
  72.110 -# define MINSIGSTKSZ	131072	/* min. stack size for sigaltstack() */
  72.111 -#endif
  72.112 -#define SIGSTKSZ	262144	/* default stack size for sigaltstack() */
  72.113 -
  72.114 -#ifdef __KERNEL__
  72.115 -
  72.116 -#define _NSIG		64
  72.117 -#define _NSIG_BPW	64
  72.118 -#define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
  72.119 -
  72.120 -#define SA_PERCPU_IRQ		0x02000000
  72.121 -
  72.122 -#endif /* __KERNEL__ */
  72.123 -
  72.124 -#include <asm-generic/signal.h>
  72.125 -
  72.126 -# ifndef __ASSEMBLY__
  72.127 -
  72.128 -#  include <linux/types.h>
  72.129 -
  72.130 -/* Avoid too many header ordering problems.  */
  72.131 -struct siginfo;
  72.132 -
  72.133 -typedef struct sigaltstack {
  72.134 -	void __user *ss_sp;
  72.135 -	int ss_flags;
  72.136 -	size_t ss_size;
  72.137 -} stack_t;
  72.138 -
  72.139 -#ifdef __KERNEL__
  72.140 -
  72.141 -/* Most things should be clean enough to redefine this at will, if care
  72.142 -   is taken to make libc match.  */
  72.143 -
  72.144 -typedef unsigned long old_sigset_t;
  72.145 -
  72.146 -typedef struct {
  72.147 -	unsigned long sig[_NSIG_WORDS];
  72.148 -} sigset_t;
  72.149 -
  72.150 -struct sigaction {
  72.151 -	__sighandler_t sa_handler;
  72.152 -	unsigned long sa_flags;
  72.153 -	sigset_t sa_mask;		/* mask last for extensibility */
  72.154 -};
  72.155 -
  72.156 -struct k_sigaction {
  72.157 -	struct sigaction sa;
  72.158 -};
  72.159 -
  72.160 -#  include <asm/sigcontext.h>
  72.161 -
  72.162 -#define ptrace_signal_deliver(regs, cookie) do { } while (0)
  72.163 -
  72.164 -void set_sigdelayed(pid_t pid, int signo, int code, void __user *addr);
  72.165 -
  72.166 -#endif /* __KERNEL__ */
  72.167 -
  72.168 -# endif /* !__ASSEMBLY__ */
  72.169 -#endif /* _ASM_IA64_SIGNAL_H */
    73.1 --- a/xen/include/asm-ia64/linux/asm/smp.h	Thu Mar 02 10:59:34 2006 +0100
    73.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    73.3 @@ -1,139 +0,0 @@
    73.4 -/*
    73.5 - * SMP Support
    73.6 - *
    73.7 - * Copyright (C) 1999 VA Linux Systems
    73.8 - * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
    73.9 - * (c) Copyright 2001-2003, 2005 Hewlett-Packard Development Company, L.P.
   73.10 - *	David Mosberger-Tang <davidm@hpl.hp.com>
   73.11 - *	Bjorn Helgaas <bjorn.helgaas@hp.com>
   73.12 - */
   73.13 -#ifndef _ASM_IA64_SMP_H
   73.14 -#define _ASM_IA64_SMP_H
   73.15 -
   73.16 -#include <linux/config.h>
   73.17 -#include <linux/init.h>
   73.18 -#include <linux/threads.h>
   73.19 -#include <linux/kernel.h>
   73.20 -#include <linux/cpumask.h>
   73.21 -
   73.22 -#include <asm/bitops.h>
   73.23 -#include <asm/io.h>
   73.24 -#include <asm/param.h>
   73.25 -#include <asm/processor.h>
   73.26 -#include <asm/ptrace.h>
   73.27 -
   73.28 -static inline unsigned int
   73.29 -ia64_get_lid (void)
   73.30 -{
   73.31 -	union {
   73.32 -		struct {
   73.33 -			unsigned long reserved : 16;
   73.34 -			unsigned long eid : 8;
   73.35 -			unsigned long id : 8;
   73.36 -			unsigned long ignored : 32;
   73.37 -		} f;
   73.38 -		unsigned long bits;
   73.39 -	} lid;
   73.40 -
   73.41 -	lid.bits = ia64_getreg(_IA64_REG_CR_LID);
   73.42 -	return lid.f.id << 8 | lid.f.eid;
   73.43 -}
   73.44 -
   73.45 -#ifdef CONFIG_SMP
   73.46 -
   73.47 -#define XTP_OFFSET		0x1e0008
   73.48 -
   73.49 -#define SMP_IRQ_REDIRECTION	(1 << 0)
   73.50 -#define SMP_IPI_REDIRECTION	(1 << 1)
   73.51 -
   73.52 -#define raw_smp_processor_id() (current_thread_info()->cpu)
   73.53 -
   73.54 -extern struct smp_boot_data {
   73.55 -	int cpu_count;
   73.56 -	int cpu_phys_id[NR_CPUS];
   73.57 -} smp_boot_data __initdata;
   73.58 -
   73.59 -extern char no_int_routing __devinitdata;
   73.60 -
   73.61 -extern cpumask_t cpu_online_map;
   73.62 -extern cpumask_t cpu_core_map[NR_CPUS];
   73.63 -extern cpumask_t cpu_sibling_map[NR_CPUS];
   73.64 -extern int smp_num_siblings;
   73.65 -extern int smp_num_cpucores;
   73.66 -extern void __iomem *ipi_base_addr;
   73.67 -extern unsigned char smp_int_redirect;
   73.68 -
   73.69 -extern volatile int ia64_cpu_to_sapicid[];
   73.70 -#define cpu_physical_id(i)	ia64_cpu_to_sapicid[i]
   73.71 -
   73.72 -extern unsigned long ap_wakeup_vector;
   73.73 -
   73.74 -/*
   73.75 - * Function to map hard smp processor id to logical id.  Slow, so don't use this in
   73.76 - * performance-critical code.
   73.77 - */
   73.78 -static inline int
   73.79 -cpu_logical_id (int cpuid)
   73.80 -{
   73.81 -	int i;
   73.82 -
   73.83 -	for (i = 0; i < NR_CPUS; ++i)
   73.84 -		if (cpu_physical_id(i) == cpuid)
   73.85 -			break;
   73.86 -	return i;
   73.87 -}
   73.88 -
   73.89 -/*
   73.90 - * XTP control functions:
   73.91 - *	min_xtp   : route all interrupts to this CPU
   73.92 - *	normal_xtp: nominal XTP value
   73.93 - *	max_xtp   : never deliver interrupts to this CPU.
   73.94 - */
   73.95 -
   73.96 -static inline void
   73.97 -min_xtp (void)
   73.98 -{
   73.99 -	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  73.100 -		writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
  73.101 -}
  73.102 -
  73.103 -static inline void
  73.104 -normal_xtp (void)
  73.105 -{
  73.106 -	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  73.107 -		writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
  73.108 -}
  73.109 -
  73.110 -static inline void
  73.111 -max_xtp (void)
  73.112 -{
  73.113 -	if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  73.114 -		writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
  73.115 -}
  73.116 -
  73.117 -#define hard_smp_processor_id()		ia64_get_lid()
  73.118 -
  73.119 -/* Upping and downing of CPUs */
  73.120 -extern int __cpu_disable (void);
  73.121 -extern void __cpu_die (unsigned int cpu);
  73.122 -extern void cpu_die (void) __attribute__ ((noreturn));
  73.123 -extern int __cpu_up (unsigned int cpu);
  73.124 -extern void __init smp_build_cpu_map(void);
  73.125 -
  73.126 -extern void __init init_smp_config (void);
  73.127 -extern void smp_do_timer (struct pt_regs *regs);
  73.128 -
  73.129 -extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
  73.130 -				     int retry, int wait);
  73.131 -extern void smp_send_reschedule (int cpu);
  73.132 -extern void lock_ipi_calllock(void);
  73.133 -extern void unlock_ipi_calllock(void);
  73.134 -extern void identify_siblings (struct cpuinfo_ia64 *);
  73.135 -
  73.136 -#else
  73.137 -
  73.138 -#define cpu_logical_id(i)		0
  73.139 -#define cpu_physical_id(i)		ia64_get_lid()
  73.140 -
  73.141 -#endif /* CONFIG_SMP */
  73.142 -#endif /* _ASM_IA64_SMP_H */
    74.1 --- a/xen/include/asm-ia64/linux/byteorder/README.origin	Thu Mar 02 10:59:34 2006 +0100
    74.2 +++ b/xen/include/asm-ia64/linux/byteorder/README.origin	Thu Mar 02 11:00:49 2006 +0100
    74.3 @@ -4,6 +4,6 @@
    74.4  # needs to be changed, move it to ../linux-xen and follow
    74.5  # the instructions in the README there.
    74.6  
    74.7 -generic.h		-> linux/include/byteorder/generic.h
    74.8 -little_endian.h		-> linux/include/byteorder/little_endian.h
    74.9 -swab.h			-> linux/include/byteorder/swab.h
   74.10 +generic.h		-> linux/include/linux/byteorder/generic.h
   74.11 +little_endian.h		-> linux/include/linux/byteorder/little_endian.h
   74.12 +swab.h			-> linux/include/linux/byteorder/swab.h
    75.1 --- a/xen/include/asm-ia64/linux/slab.h	Thu Mar 02 10:59:34 2006 +0100
    75.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    75.3 @@ -1,137 +0,0 @@
    75.4 -/*
    75.5 - * linux/mm/slab.h
    75.6 - * Written by Mark Hemment, 1996.
    75.7 - * (markhe@nextd.demon.co.uk)
    75.8 - */
    75.9 -
   75.10 -#ifndef _LINUX_SLAB_H
   75.11 -#define	_LINUX_SLAB_H
   75.12 -
   75.13 -#if	defined(__KERNEL__)
   75.14 -
   75.15 -typedef struct kmem_cache_s kmem_cache_t;
   75.16 -
   75.17 -#include	<linux/config.h>	/* kmalloc_sizes.h needs CONFIG_ options */
   75.18 -#include	<linux/gfp.h>
   75.19 -#include	<linux/init.h>
   75.20 -#include	<linux/types.h>
   75.21 -#include	<asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */
   75.22 -#include	<asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */
   75.23 -
   75.24 -/* flags for kmem_cache_alloc() */
   75.25 -#define	SLAB_NOFS		GFP_NOFS
   75.26 -#define	SLAB_NOIO		GFP_NOIO
   75.27 -#define	SLAB_ATOMIC		GFP_ATOMIC
   75.28 -#define	SLAB_USER		GFP_USER
   75.29 -#define	SLAB_KERNEL		GFP_KERNEL
   75.30 -#define	SLAB_DMA		GFP_DMA
   75.31 -
   75.32 -#define SLAB_LEVEL_MASK		GFP_LEVEL_MASK
   75.33 -
   75.34 -#define	SLAB_NO_GROW		__GFP_NO_GROW	/* don't grow a cache */
   75.35 -
   75.36 -/* flags to pass to kmem_cache_create().
   75.37 - * The first 3 are only valid when the allocator as been build
   75.38 - * SLAB_DEBUG_SUPPORT.
   75.39 - */
   75.40 -#define	SLAB_DEBUG_FREE		0x00000100UL	/* Peform (expensive) checks on free */
   75.41 -#define	SLAB_DEBUG_INITIAL	0x00000200UL	/* Call constructor (as verifier) */
   75.42 -#define	SLAB_RED_ZONE		0x00000400UL	/* Red zone objs in a cache */
   75.43 -#define	SLAB_POISON		0x00000800UL	/* Poison objects */
   75.44 -#define	SLAB_NO_REAP		0x00001000UL	/* never reap from the cache */
   75.45 -#define	SLAB_HWCACHE_ALIGN	0x00002000UL	/* align objs on a h/w cache lines */
   75.46 -#define SLAB_CACHE_DMA		0x00004000UL	/* use GFP_DMA memory */
   75.47 -#define SLAB_MUST_HWCACHE_ALIGN	0x00008000UL	/* force alignment */
   75.48 -#define SLAB_STORE_USER		0x00010000UL	/* store the last owner for bug hunting */
   75.49 -#define SLAB_RECLAIM_ACCOUNT	0x00020000UL	/* track pages allocated to indicate
   75.50 -						   what is reclaimable later*/
   75.51 -#define SLAB_PANIC		0x00040000UL	/* panic if kmem_cache_create() fails */
   75.52 -#define SLAB_DESTROY_BY_RCU	0x00080000UL	/* defer freeing pages to RCU */
   75.53 -
   75.54 -/* flags passed to a constructor func */
   75.55 -#define	SLAB_CTOR_CONSTRUCTOR	0x001UL		/* if not set, then deconstructor */
   75.56 -#define SLAB_CTOR_ATOMIC	0x002UL		/* tell constructor it can't sleep */
   75.57 -#define	SLAB_CTOR_VERIFY	0x004UL		/* tell constructor it's a verify call */
   75.58 -
   75.59 -/* prototypes */
   75.60 -extern void __init kmem_cache_init(void);
   75.61 -
   75.62 -extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
   75.63 -				       void (*)(void *, kmem_cache_t *, unsigned long),
   75.64 -				       void (*)(void *, kmem_cache_t *, unsigned long));
   75.65 -extern int kmem_cache_destroy(kmem_cache_t *);
   75.66 -extern int kmem_cache_shrink(kmem_cache_t *);
   75.67 -extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
   75.68 -extern void kmem_cache_free(kmem_cache_t *, void *);
   75.69 -extern unsigned int kmem_cache_size(kmem_cache_t *);
   75.70 -extern const char *kmem_cache_name(kmem_cache_t *);
   75.71 -extern kmem_cache_t *kmem_find_general_cachep(size_t size, unsigned int __nocast gfpflags);
   75.72 -
   75.73 -/* Size description struct for general caches. */
   75.74 -struct cache_sizes {
   75.75 -	size_t		 cs_size;
   75.76 -	kmem_cache_t	*cs_cachep;
   75.77 -	kmem_cache_t	*cs_dmacachep;
   75.78 -};
   75.79 -extern struct cache_sizes malloc_sizes[];
   75.80 -extern void *__kmalloc(size_t, unsigned int __nocast);
   75.81 -
   75.82 -static inline void *kmalloc(size_t size, unsigned int __nocast flags)
   75.83 -{
   75.84 -	if (__builtin_constant_p(size)) {
   75.85 -		int i = 0;
   75.86 -#define CACHE(x) \
   75.87 -		if (size <= x) \
   75.88 -			goto found; \
   75.89 -		else \
   75.90 -			i++;
   75.91 -#include "kmalloc_sizes.h"
   75.92 -#undef CACHE
   75.93 -		{
   75.94 -			extern void __you_cannot_kmalloc_that_much(void);
   75.95 -			__you_cannot_kmalloc_that_much();
   75.96 -		}
   75.97 -found:
   75.98 -		return kmem_cache_alloc((flags & GFP_DMA) ?
   75.99 -			malloc_sizes[i].cs_dmacachep :
  75.100 -			malloc_sizes[i].cs_cachep, flags);
  75.101 -	}
  75.102 -	return __kmalloc(size, flags);
  75.103 -}
  75.104 -
  75.105 -extern void *kcalloc(size_t, size_t, unsigned int __nocast);
  75.106 -extern void kfree(const void *);
  75.107 -extern unsigned int ksize(const void *);
  75.108 -
  75.109 -#ifdef CONFIG_NUMA
  75.110 -extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node);
  75.111 -extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node);
  75.112 -#else
  75.113 -static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
  75.114 -{
  75.115 -	return kmem_cache_alloc(cachep, flags);
  75.116 -}
  75.117 -static inline void *kmalloc_node(size_t size, unsigned int __nocast flags, int node)
  75.118 -{
  75.119 -	return kmalloc(size, flags);
  75.120 -}
  75.121 -#endif
  75.122 -
  75.123 -extern int FASTCALL(kmem_cache_reap(int));
  75.124 -extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
  75.125 -
  75.126 -/* System wide caches */
  75.127 -extern kmem_cache_t	*vm_area_cachep;
  75.128 -extern kmem_cache_t	*names_cachep;
  75.129 -extern kmem_cache_t	*files_cachep;
  75.130 -extern kmem_cache_t	*filp_cachep;
  75.131 -extern kmem_cache_t	*fs_cachep;
  75.132 -extern kmem_cache_t	*signal_cachep;
  75.133 -extern kmem_cache_t	*sighand_cachep;
  75.134 -extern kmem_cache_t	*bio_cachep;
  75.135 -
  75.136 -extern atomic_t slab_reclaim_pages;
  75.137 -
  75.138 -#endif	/* __KERNEL__ */
  75.139 -
  75.140 -#endif	/* _LINUX_SLAB_H */
    76.1 --- a/xen/include/asm-ia64/linux/threads.h	Thu Mar 02 10:59:34 2006 +0100
    76.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    76.3 @@ -1,36 +0,0 @@
    76.4 -#ifndef _LINUX_THREADS_H
    76.5 -#define _LINUX_THREADS_H
    76.6 -
    76.7 -#include <linux/config.h>
    76.8 -
    76.9 -/*
   76.10 - * The default limit for the nr of threads is now in
   76.11 - * /proc/sys/kernel/threads-max.
   76.12 - */
   76.13 -
   76.14 -/*
   76.15 - * Maximum supported processors that can run under SMP.  This value is
   76.16 - * set via configure setting.  The maximum is equal to the size of the
   76.17 - * bitmasks used on that platform, i.e. 32 or 64.  Setting this smaller
   76.18 - * saves quite a bit of memory.
   76.19 - */
   76.20 -#ifdef CONFIG_SMP
   76.21 -#define NR_CPUS		CONFIG_NR_CPUS
   76.22 -#else
   76.23 -#define NR_CPUS		1
   76.24 -#endif
   76.25 -
   76.26 -#define MIN_THREADS_LEFT_FOR_ROOT 4
   76.27 -
   76.28 -/*
   76.29 - * This controls the default maximum pid allocated to a process
   76.30 - */
   76.31 -#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000)
   76.32 -
   76.33 -/*
   76.34 - * A maximum of 4 million PIDs should be enough for a while:
   76.35 - */
   76.36 -#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
   76.37 -	(sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
   76.38 -
   76.39 -#endif
    77.1 --- a/xen/include/asm-ia64/mm.h	Thu Mar 02 10:59:34 2006 +0100
    77.2 +++ b/xen/include/asm-ia64/mm.h	Thu Mar 02 11:00:49 2006 +0100
    77.3 @@ -134,6 +134,8 @@ extern void __init init_frametable(void)
    77.4  #endif
    77.5  void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
    77.6  
    77.7 +extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
    77.8 +
    77.9  static inline void put_page(struct page_info *page)
   77.10  {
   77.11  #ifdef VALIDATE_VT	// doesn't work with non-VTI in grant tables yet
   77.12 @@ -215,8 +217,8 @@ void memguard_unguard_range(void *p, uns
   77.13  #endif
   77.14  
   77.15  // prototype of misc memory stuff
   77.16 -unsigned long __get_free_pages(unsigned int mask, unsigned int order);
   77.17 -void __free_pages(struct page *page, unsigned int order);
   77.18 +//unsigned long __get_free_pages(unsigned int mask, unsigned int order);
   77.19 +//void __free_pages(struct page *page, unsigned int order);
   77.20  void *pgtable_quicklist_alloc(void);
   77.21  void pgtable_quicklist_free(void *pgtable_entry);
   77.22  
   77.23 @@ -407,6 +409,7 @@ extern unsigned long totalram_pages;
   77.24  extern int nr_swap_pages;
   77.25  
   77.26  extern unsigned long *mpt_table;
   77.27 +extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
   77.28  extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
   77.29  #undef machine_to_phys_mapping
   77.30  #define machine_to_phys_mapping	mpt_table
   77.31 @@ -435,12 +438,22 @@ extern unsigned long lookup_domain_mpa(s
   77.32  
   77.33  /* Return I/O type if trye */
   77.34  #define __gpfn_is_io(_d, gpfn)				\
   77.35 -	(__gmfn_valid(_d, gpfn) ? 			\
   77.36 -	(lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) : 0)
   77.37 +({                                          \
   77.38 +    u64 pte, ret=0;                                \
   77.39 +    pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT));      \
   77.40 +    if(!(pte&GPFN_INV_MASK))        \
   77.41 +        ret = pte & GPFN_IO_MASK;        \
   77.42 +    ret;                \
   77.43 +})
   77.44  
   77.45  #define __gpfn_is_mem(_d, gpfn)				\
   77.46 -	(__gmfn_valid(_d, gpfn) ?			\
   77.47 -	((lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) == GPFN_MEM) : 0)
   77.48 +({                                          \
   77.49 +    u64 pte, ret=0;                                \
   77.50 +    pte=lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT));      \
   77.51 +    if((!(pte&GPFN_INV_MASK))&&((pte & GPFN_IO_MASK)==GPFN_MEM))   \
   77.52 +        ret = 1;             \
   77.53 +    ret;                \
   77.54 +})
   77.55  
   77.56  
   77.57  #define __gpa_to_mpa(_d, gpa)   \
    78.1 --- a/xen/include/asm-ia64/multicall.h	Thu Mar 02 10:59:34 2006 +0100
    78.2 +++ b/xen/include/asm-ia64/multicall.h	Thu Mar 02 11:00:49 2006 +0100
    78.3 @@ -1,5 +1,27 @@
    78.4  #ifndef __ASM_IA64_MULTICALL_H__
    78.5  #define __ASM_IA64_MULTICALL_H__
    78.6  
    78.7 -#define do_multicall_call(_call) BUG()
    78.8 +#include <public/xen.h>
    78.9 +
   78.10 +typedef unsigned long (*hypercall_t)(
   78.11 +			unsigned long arg0,
   78.12 +			unsigned long arg1,
   78.13 +			unsigned long arg2,
   78.14 +			unsigned long arg3,
   78.15 +			unsigned long arg4,
   78.16 +			unsigned long arg5);
   78.17 +
   78.18 +extern hypercall_t ia64_hypercall_table[];
   78.19 +
   78.20 +static inline void do_multicall_call(multicall_entry_t *call)
   78.21 +{
   78.22 +	call->result = (*ia64_hypercall_table[call->op])(
   78.23 +			call->args[0],
   78.24 +			call->args[1],
   78.25 +			call->args[2],
   78.26 +			call->args[3],
   78.27 +			call->args[4],
   78.28 +			call->args[5]);
   78.29 +}
   78.30 +
   78.31  #endif /* __ASM_IA64_MULTICALL_H__ */
    79.1 --- a/xen/include/asm-ia64/privop.h	Thu Mar 02 10:59:34 2006 +0100
    79.2 +++ b/xen/include/asm-ia64/privop.h	Thu Mar 02 11:00:49 2006 +0100
    79.3 @@ -209,4 +209,6 @@ typedef union U_INST64 {
    79.4  
    79.5  extern void privify_memory(void *start, UINT64 len);
    79.6  
    79.7 +extern int ia64_hyperprivop(unsigned long iim, REGS *regs);
    79.8 +
    79.9  #endif
    80.1 --- a/xen/include/asm-ia64/vcpu.h	Thu Mar 02 10:59:34 2006 +0100
    80.2 +++ b/xen/include/asm-ia64/vcpu.h	Thu Mar 02 11:00:49 2006 +0100
    80.3 @@ -34,11 +34,13 @@ struct privop_addr_count {
    80.4  #endif
    80.5  
    80.6  /* general registers */
    80.7 -extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned reg);
    80.8 -extern IA64FAULT vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val);
    80.9 -extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat);
   80.10 +extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned long reg);
   80.11 +extern IA64FAULT vcpu_get_gr_nat(VCPU *vcpu, unsigned long reg, UINT64 *val);
   80.12 +extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned long reg, UINT64 value, int nat);
   80.13  /* application registers */
   80.14 +extern void vcpu_load_kernel_regs(VCPU *vcpu);
   80.15  extern IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val);
   80.16 +extern IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val);
   80.17  /* psr */
   80.18  extern BOOLEAN vcpu_get_psr_ic(VCPU *vcpu);
   80.19  extern UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr);
   80.20 @@ -46,6 +48,9 @@ extern IA64FAULT vcpu_get_psr(VCPU *vcpu
   80.21  extern IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm);
   80.22  extern IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm);
   80.23  extern IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
   80.24 +extern IA64FAULT vcpu_set_psr_i(VCPU *vcpu);
   80.25 +extern IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu);
   80.26 +extern IA64FAULT vcpu_set_psr_dt(VCPU *vcpu);
   80.27  /* control registers */
   80.28  extern IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val);
   80.29  extern IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val);
   80.30 @@ -89,6 +94,8 @@ extern IA64FAULT vcpu_get_irr3(VCPU *vcp
   80.31  extern IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval);
   80.32  extern IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval);
   80.33  /* interrupt registers */
   80.34 +extern void vcpu_pend_unspecified_interrupt(VCPU *vcpu);
   80.35 +extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
   80.36  extern IA64FAULT vcpu_get_itv(VCPU *vcpu,UINT64 *pval);
   80.37  extern IA64FAULT vcpu_get_pmv(VCPU *vcpu,UINT64 *pval);
   80.38  extern IA64FAULT vcpu_get_cmcv(VCPU *vcpu,UINT64 *pval);
   80.39 @@ -97,8 +104,8 @@ extern IA64FAULT vcpu_set_itv(VCPU *vcpu
   80.40  extern IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val);
   80.41  extern IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val);
   80.42  /* interval timer registers */
   80.43 -extern IA64FAULT vcpu_set_itm(VCPU *vcpu,UINT64 val);
   80.44  extern IA64FAULT vcpu_set_itc(VCPU *vcpu,UINT64 val);
   80.45 +extern UINT64 vcpu_timer_pending_early(VCPU *vcpu);
   80.46  /* debug breakpoint registers */
   80.47  extern IA64FAULT vcpu_set_ibr(VCPU *vcpu,UINT64 reg,UINT64 val);
   80.48  extern IA64FAULT vcpu_set_dbr(VCPU *vcpu,UINT64 reg,UINT64 val);
   80.49 @@ -135,9 +142,14 @@ extern IA64FAULT vcpu_ptr_d(VCPU *vcpu,U
   80.50  extern IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr, UINT64 addr_range);
   80.51  extern IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pteval, UINT64 *itir, UINT64 *iha);
   80.52  extern IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
   80.53 +extern IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa);
   80.54 +extern IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr);
   80.55  /* misc */
   80.56  extern IA64FAULT vcpu_rfi(VCPU *vcpu);
   80.57  extern IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
   80.58 +extern IA64FAULT vcpu_cover(VCPU *vcpu);
   80.59 +extern IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
   80.60 +extern IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval);
   80.61  
   80.62  extern void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
   80.63  extern void vcpu_pend_timer(VCPU *vcpu);
   80.64 @@ -149,4 +161,16 @@ extern void vcpu_itc_no_srlz(VCPU *vcpu,
   80.65  extern UINT64 vcpu_get_tmp(VCPU *, UINT64);
   80.66  extern void vcpu_set_tmp(VCPU *, UINT64, UINT64);
   80.67  
   80.68 +static inline UINT64
   80.69 +itir_ps(UINT64 itir)
   80.70 +{
   80.71 +    return ((itir >> 2) & 0x3f);
   80.72 +}
   80.73 +
   80.74 +static inline UINT64
   80.75 +itir_mask(UINT64 itir)
   80.76 +{
   80.77 +    return (~((1UL << itir_ps(itir)) - 1));
   80.78 +}
   80.79 +
   80.80  #endif
    81.1 --- a/xen/include/asm-ia64/vhpt.h	Thu Mar 02 10:59:34 2006 +0100
    81.2 +++ b/xen/include/asm-ia64/vhpt.h	Thu Mar 02 11:00:49 2006 +0100
    81.3 @@ -121,6 +121,11 @@ extern void vhpt_multiple_insert(unsigne
    81.4  extern void vhpt_insert (unsigned long vadr, unsigned long ptr,
    81.5  			 unsigned logps);
    81.6  extern void vhpt_flush(void);
    81.7 +
    81.8 +/* Currently the VHPT is allocated per CPU.  */
    81.9 +DECLARE_PER_CPU (unsigned long, vhpt_paddr);
   81.10 +DECLARE_PER_CPU (unsigned long, vhpt_pend);
   81.11 +
   81.12  #endif /* !__ASSEMBLY */
   81.13  
   81.14  #if !VHPT_ENABLED
    82.1 --- a/xen/include/asm-ia64/vmmu.h	Thu Mar 02 10:59:34 2006 +0100
    82.2 +++ b/xen/include/asm-ia64/vmmu.h	Thu Mar 02 11:00:49 2006 +0100
    82.3 @@ -23,12 +23,22 @@
    82.4  #ifndef XEN_TLBthash_H
    82.5  #define XEN_TLBthash_H
    82.6  
    82.7 +#define         MAX_CCN_DEPTH           15       // collision chain depth
    82.8 +#define         VCPU_VTLB_SHIFT          (20)    // 1M for VTLB
    82.9 +#define         VCPU_VTLB_SIZE           (1UL<<VCPU_VTLB_SHIFT)
   82.10 +#define         VCPU_VTLB_ORDER          (VCPU_VTLB_SHIFT - PAGE_SHIFT)
   82.11 +#define         VCPU_VHPT_SHIFT          (24)    // 16M for VTLB
   82.12 +#define         VCPU_VHPT_SIZE           (1UL<<VCPU_VHPT_SHIFT)
   82.13 +#define         VCPU_VHPT_ORDER          (VCPU_VHPT_SHIFT - PAGE_SHIFT)
   82.14 +
   82.15 +#ifndef __ASSEMBLY__
   82.16 +
   82.17  #include <xen/config.h>
   82.18  #include <xen/types.h>
   82.19  #include <public/xen.h>
   82.20  #include <asm/tlb.h>
   82.21  #include <asm/regionreg.h>
   82.22 -
   82.23 +#include <asm/vmx_mm_def.h>
   82.24  //#define         THASH_TLB_TR            0
   82.25  //#define         THASH_TLB_TC            1
   82.26  
   82.27 @@ -39,7 +49,15 @@
   82.28  
   82.29  /*
   82.30   * Next bit definition must be same with THASH_TLB_XX
   82.31 +#define         PTA_BASE_SHIFT          (15)
   82.32   */
   82.33 +
   82.34 +
   82.35 +
   82.36 +
   82.37 +#define HIGH_32BITS(x)  bits(x,32,63)
   82.38 +#define LOW_32BITS(x)   bits(x,0,31)
   82.39 +
   82.40  typedef union search_section {
   82.41          struct {
   82.42                  u32 tr : 1;
   82.43 @@ -49,15 +67,6 @@ typedef union search_section {
   82.44          u32     v;
   82.45  } search_section_t;
   82.46  
   82.47 -#define         MAX_CCN_DEPTH           4       // collision chain depth
   82.48 -#define         VCPU_TLB_SHIFT          (22)
   82.49 -#define         VCPU_TLB_SIZE           (1UL<<VCPU_TLB_SHIFT)
   82.50 -#define         VCPU_TLB_ORDER          VCPU_TLB_SHIFT - PAGE_SHIFT
   82.51 -#define         PTA_BASE_SHIFT          (15)
   82.52 -
   82.53 -#ifndef __ASSEMBLY__
   82.54 -#define HIGH_32BITS(x)  bits(x,32,63)
   82.55 -#define LOW_32BITS(x)   bits(x,0,31)
   82.56  
   82.57  typedef enum {
   82.58          ISIDE_TLB=0,
   82.59 @@ -77,18 +86,21 @@ typedef struct thash_data {
   82.60              u64 ppn  : 38; // 12-49
   82.61              u64 rv2  :  2; // 50-51
   82.62              u64 ed   :  1; // 52
   82.63 -            u64 ig1  :  11; //53-63
   82.64 +            u64 ig1  :  3; // 53-55
   82.65 +            u64 len  :  4; // 56-59
   82.66 +            u64 ig2  :  3; // 60-63
   82.67          };
   82.68          struct {
   82.69              u64 __rv1 : 53;	// 0-52
   82.70 +            u64 contiguous : 1; //53
   82.71 +            u64 tc : 1;     // 54 TR or TC
   82.72 +            CACHE_LINE_TYPE cl : 1; // 55 I side or D side cache line
   82.73              // next extension to ig1, only for TLB instance
   82.74 -            u64 tc : 1;     // 53 TR or TC
   82.75 -            u64 locked  : 1;	// 54 entry locked or not
   82.76 -            CACHE_LINE_TYPE cl : 1; // I side or D side cache line
   82.77 -            u64 nomap : 1;   // entry cann't be inserted into machine TLB.
   82.78 -            u64 __ig1  :  5; // 56-61
   82.79 -            u64 checked : 1; // for VTLB/VHPT sanity check
   82.80 -            u64 invalid : 1; // invalid entry
   82.81 +            u64 __ig1  :  4; // 56-59
   82.82 +            u64 locked  : 1;	// 60 entry locked or not
   82.83 +            u64 nomap : 1;   // 61 entry cann't be inserted into machine TLB.
   82.84 +            u64 checked : 1; // 62 for VTLB/VHPT sanity check
   82.85 +            u64 invalid : 1; // 63 invalid entry
   82.86          };
   82.87          u64 page_flags;
   82.88      };                  // same for VHPT and TLB
   82.89 @@ -128,10 +140,37 @@ typedef struct thash_data {
   82.90      };
   82.91  } thash_data_t;
   82.92  
   82.93 +#define INVALIDATE_VHPT_HEADER(hdata)   \
   82.94 +{	((hdata)->page_flags)=0;	\
   82.95 +	((hdata)->ti)=1;	\
   82.96 +	((hdata)->next)=0; }
   82.97 +
   82.98 +#define INVALIDATE_TLB_HEADER(hdata)   \
   82.99 +{	((hdata)->page_flags)=0;	\
  82.100 +	((hdata)->ti)=1;		\
  82.101 +	((hdata)->next)=0; }
  82.102 +
  82.103  #define INVALID_VHPT(hdata)     ((hdata)->ti)
  82.104 -#define INVALID_TLB(hdata)      ((hdata)->invalid)
  82.105 -#define INVALID_ENTRY(hcb, hdata)                       \
  82.106 -        ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata))
  82.107 +#define INVALID_TLB(hdata)      ((hdata)->ti)
  82.108 +#define INVALID_TR(hdata)      ((hdata)->invalid)
  82.109 +#define INVALID_ENTRY(hcb, hdata)       INVALID_VHPT(hdata)
  82.110 +
  82.111 +/*        ((hcb)->ht==THASH_TLB ? INVALID_TLB(hdata) : INVALID_VHPT(hdata)) */
  82.112 +
  82.113 +
  82.114 +/*
  82.115 + * Architecture ppn is in 4KB unit while XEN
  82.116 + * page may be different(1<<PAGE_SHIFT).
  82.117 + */
  82.118 +static inline u64 arch_to_xen_ppn(u64 appn)
  82.119 +{
  82.120 +    return (appn >>(PAGE_SHIFT-ARCH_PAGE_SHIFT));
  82.121 +}
  82.122 +
  82.123 +static inline u64 xen_to_arch_ppn(u64 xppn)
  82.124 +{
  82.125 +    return (xppn <<(PAGE_SHIFT- ARCH_PAGE_SHIFT));
  82.126 +}
  82.127  
  82.128  typedef enum {
  82.129          THASH_TLB=0,
  82.130 @@ -166,11 +205,11 @@ typedef struct tlb_special {
  82.131          struct thash_cb  *vhpt;
  82.132  } tlb_special_t;
  82.133  
  82.134 -typedef struct vhpt_cb {
  82.135 +//typedef struct vhpt_cb {
  82.136          //u64     pta;    // pta value.
  82.137 -        GET_MFN_FN      *get_mfn;
  82.138 -        TTAG_FN         *tag_func;
  82.139 -} vhpt_special;
  82.140 +//        GET_MFN_FN      *get_mfn;
  82.141 +//        TTAG_FN         *tag_func;
  82.142 +//} vhpt_special;
  82.143  
  82.144  typedef struct thash_internal {
  82.145          thash_data_t *hash_base;
  82.146 @@ -198,36 +237,38 @@ typedef struct thash_cb {
  82.147          u64     hash_sz;        // size of above data.
  82.148          void    *cch_buf;       // base address of collision chain.
  82.149          u64     cch_sz;         // size of above data.
  82.150 -        THASH_FN        *hash_func;
  82.151 -        GET_RR_FN       *get_rr_fn;
  82.152 -        RECYCLE_FN      *recycle_notifier;
  82.153 +//        THASH_FN        *hash_func;
  82.154 +//        GET_RR_FN       *get_rr_fn;
  82.155 +//        RECYCLE_FN      *recycle_notifier;
  82.156          thash_cch_mem_t *cch_freelist;
  82.157          struct vcpu *vcpu;
  82.158          PTA     pta;
  82.159          /* VTLB/VHPT common information */
  82.160 -        FIND_OVERLAP_FN *find_overlap;
  82.161 -        FIND_NEXT_OVL_FN *next_overlap;
  82.162 -        REM_THASH_FN    *rem_hash; // remove hash entry.
  82.163 -        INS_THASH_FN    *ins_hash; // insert hash entry.
  82.164 -        REM_NOTIFIER_FN *remove_notifier;
  82.165 +//        FIND_OVERLAP_FN *find_overlap;
  82.166 +//        FIND_NEXT_OVL_FN *next_overlap;
  82.167 +//        REM_THASH_FN    *rem_hash; // remove hash entry.
  82.168 +//        INS_THASH_FN    *ins_hash; // insert hash entry.
  82.169 +//        REM_NOTIFIER_FN *remove_notifier;
  82.170          /* private information */
  82.171 -        thash_internal_t  priv;
  82.172 +//        thash_internal_t  priv;
  82.173          union {
  82.174                  tlb_special_t  *ts;
  82.175 -                vhpt_special   *vs;
  82.176 +//                vhpt_special   *vs;
  82.177          };
  82.178          // Internal positon information, buffer and storage etc. TBD
  82.179  } thash_cb_t;
  82.180  
  82.181  #define ITR(hcb,id)             ((hcb)->ts->itr[id])
  82.182  #define DTR(hcb,id)             ((hcb)->ts->dtr[id])
  82.183 -#define INVALIDATE_HASH(hcb,hash)           {   \
  82.184 -           if ((hcb)->ht==THASH_TLB)            \
  82.185 -             INVALID_TLB(hash) = 1;             \
  82.186 -           else                                 \
  82.187 -             INVALID_VHPT(hash) = 1;            \
  82.188 -           hash->next = NULL; }
  82.189 -
  82.190 +#define INVALIDATE_HASH_HEADER(hcb,hash)    INVALIDATE_TLB_HEADER(hash)
  82.191 +/*              \
  82.192 +{           if ((hcb)->ht==THASH_TLB){            \
  82.193 +            INVALIDATE_TLB_HEADER(hash);             \
  82.194 +           }else{                                 \
  82.195 +             INVALIDATE_VHPT_HEADER(hash);            \
  82.196 +            }                                       \
  82.197 +}
  82.198 + */
  82.199  #define PURGABLE_ENTRY(hcb,en)  1
  82.200  //		((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
  82.201  
  82.202 @@ -242,18 +283,20 @@ extern void thash_init(thash_cb_t *hcb, 
  82.203   *    NOTES:
  82.204   *      1: TLB entry may be TR, TC or Foreign Map. For TR entry,
  82.205   *         itr[]/dtr[] need to be updated too.
  82.206 - *      2: Inserting to collision chain may trigger recycling if 
  82.207 + *      2: Inserting to collision chain may trigger recycling if
  82.208   *         the buffer for collision chain is empty.
  82.209   *      3: The new entry is inserted at the hash table.
  82.210   *         (I.e. head of the collision chain)
  82.211   *      4: Return the entry in hash table or collision chain.
  82.212   *
  82.213   */
  82.214 -extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
  82.215 +extern void thash_vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
  82.216 +//extern void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va);
  82.217  extern void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx);
  82.218 -
  82.219 +extern thash_data_t *vtr_find_overlap(thash_cb_t *hcb, thash_data_t *data, char cl);
  82.220 +extern u64 get_mfn(struct domain *d, u64 gpfn);
  82.221  /*
  82.222 - * Force to delete a found entry no matter TR or foreign map for TLB. 
  82.223 + * Force to delete a found entry no matter TR or foreign map for TLB.
  82.224   *    NOTES:
  82.225   *      1: TLB entry may be TR, TC or Foreign Map. For TR entry,
  82.226   *         itr[]/dtr[] need to be updated too.
  82.227 @@ -307,7 +350,7 @@ extern void thash_purge_entries_ex(thash
  82.228                          u64 rid, u64 va, u64 sz, 
  82.229                          search_section_t p_sect, 
  82.230                          CACHE_LINE_TYPE cl);
  82.231 -extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in);
  82.232 +extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in, u64 va);
  82.233  
  82.234  /*
  82.235   * Purge all TCs or VHPT entries including those in Hash table.
  82.236 @@ -335,8 +378,11 @@ extern void purge_machine_tc_by_domid(do
  82.237  extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
  82.238  extern ia64_rr vmmu_get_rr(struct vcpu *vcpu, u64 va);
  82.239  extern thash_cb_t *init_domain_tlb(struct vcpu *d);
  82.240 +extern thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag);
  82.241 +extern thash_data_t * vhpt_lookup(u64 va);
  82.242 +extern void machine_tlb_purge(u64 va, u64 ps);
  82.243  
  82.244 -#define   VTLB_DEBUG
  82.245 +//#define   VTLB_DEBUG
  82.246  #ifdef   VTLB_DEBUG
  82.247  extern void check_vtlb_sanity(thash_cb_t *vtlb);
  82.248  extern void dump_vtlb(thash_cb_t *vtlb);
    83.1 --- a/xen/include/asm-ia64/vmx.h	Thu Mar 02 10:59:34 2006 +0100
    83.2 +++ b/xen/include/asm-ia64/vmx.h	Thu Mar 02 11:00:49 2006 +0100
    83.3 @@ -34,7 +34,24 @@ extern void vmx_load_state(struct vcpu *
    83.4  extern void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c);
    83.5  extern void vmx_wait_io(void);
    83.6  extern void vmx_io_assist(struct vcpu *v);
    83.7 -
    83.8 +extern void vmx_load_all_rr(struct vcpu *vcpu);
    83.9 +extern void panic_domain(struct pt_regs *regs, const char *fmt, ...);
   83.10 +extern int ia64_hypercall (struct pt_regs *regs);
   83.11 +extern void vmx_save_state(struct vcpu *v);
   83.12 +extern void vmx_load_state(struct vcpu *v);
   83.13 +extern void show_registers(struct pt_regs *regs);
   83.14 +extern int vmx_alloc_contig_pages(struct domain *d);
   83.15 +extern unsigned long __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
   83.16 +extern void sync_split_caches(void);
   83.17 +extern void vmx_virq_line_assist(struct vcpu *v);
   83.18 +extern void set_privileged_operation_isr (struct vcpu *vcpu,int inst);
   83.19 +extern void privilege_op (struct vcpu *vcpu);
   83.20 +extern void set_ifa_itir_iha (struct vcpu *vcpu, u64 vadr,
   83.21 +          int set_ifa, int set_itir, int set_iha);
   83.22 +extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
   83.23 +extern void vmx_intr_assist(struct vcpu *v);
   83.24 +extern void set_illegal_op_isr (struct vcpu *vcpu);
   83.25 +extern  void illegal_op (struct vcpu *vcpu);
   83.26  static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
   83.27  {
   83.28      return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
    84.1 --- a/xen/include/asm-ia64/vmx_mm_def.h	Thu Mar 02 10:59:34 2006 +0100
    84.2 +++ b/xen/include/asm-ia64/vmx_mm_def.h	Thu Mar 02 11:00:49 2006 +0100
    84.3 @@ -27,8 +27,6 @@
    84.4  //#define VHPT_SIZE   (1 << VHPT_SIZE_PS)
    84.5  #define ARCH_PAGE_SHIFT   12
    84.6  #define ARCH_PAGE_SIZE    PSIZE(ARCH_PAGE_SHIFT)
    84.7 -#define INVALID_MFN	(-1)
    84.8 -
    84.9  #define MAX_PHYS_ADDR_BITS  50
   84.10  #define PMASK(size)         (~((size) - 1))
   84.11  #define PSIZE(size)         (1UL<<(size))
   84.12 @@ -36,7 +34,7 @@
   84.13  #define POFFSET(vaddr, ps)  ((vaddr) & (PSIZE(ps) - 1))
   84.14  #define PPN_2_PA(ppn)       ((ppn)<<12)
   84.15  #define CLEARLSB(ppn, nbits)    ((((uint64_t)ppn) >> (nbits)) << (nbits))
   84.16 -#define PAGEALIGN(va, ps)	(va & ~(PSIZE(ps)-1))
   84.17 +#define PAGEALIGN(va, ps)	CLEARLSB(va, ps)
   84.18  
   84.19  #define TLB_AR_R        0
   84.20  #define TLB_AR_RX       1
   84.21 @@ -87,9 +85,6 @@
   84.22  #define STLB_TC         0
   84.23  #define STLB_TR         1
   84.24  
   84.25 -#define VMM_RR_MASK     0xfffff
   84.26 -#define VMM_RR_SHIFT        20
   84.27 -
   84.28  #define IA64_RR_SHIFT       61
   84.29  
   84.30  #define PHYS_PAGE_SHIFT     PPN_SHIFT
   84.31 @@ -145,6 +140,7 @@ bits_v(uint64_t v, uint32_t bs, uint32_t
   84.32      uint64_t    result;
   84.33      __asm __volatile("shl %0=%1, %2;; shr.u %0=%0, %3;;"
   84.34          : "=r" (result): "r"(v), "r"(63-be), "r" (bs+63-be) );
   84.35 +    return result;
   84.36  }
   84.37  
   84.38  #define bits(val, bs, be)                                         \
    85.1 --- a/xen/include/asm-ia64/vmx_pal.h	Thu Mar 02 10:59:34 2006 +0100
    85.2 +++ b/xen/include/asm-ia64/vmx_pal.h	Thu Mar 02 11:00:49 2006 +0100
    85.3 @@ -114,7 +114,7 @@ ia64_pal_vp_save (u64 *vpd, u64 pal_proc
    85.4  	PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
    85.5  	return iprv.status;
    85.6  }
    85.7 -
    85.8 +extern void pal_emul(struct vcpu *vcpu);
    85.9  #define PAL_PROC_VM_BIT		(1UL << 40)
   85.10  #define PAL_PROC_VMSW_BIT	(1UL << 54)
   85.11  #endif /* _ASM_IA64_VT_PAL_H */
    86.1 --- a/xen/include/asm-ia64/vmx_phy_mode.h	Thu Mar 02 10:59:34 2006 +0100
    86.2 +++ b/xen/include/asm-ia64/vmx_phy_mode.h	Thu Mar 02 11:00:49 2006 +0100
    86.3 @@ -96,6 +96,8 @@ extern void prepare_if_physical_mode(VCP
    86.4  extern void recover_if_physical_mode(VCPU *vcpu);
    86.5  extern void vmx_init_all_rr(VCPU *vcpu);
    86.6  extern void vmx_load_all_rr(VCPU *vcpu);
    86.7 +extern void physical_itlb_miss(VCPU *vcpu, u64 vadr);
    86.8 +extern void physical_dtlb_miss(VCPU *vcpu, u64 vadr);
    86.9  /*
   86.10   * No sanity check here, since all psr changes have been
   86.11   * checked in switch_mm_mode().
    87.1 --- a/xen/include/asm-ia64/vmx_platform.h	Thu Mar 02 10:59:34 2006 +0100
    87.2 +++ b/xen/include/asm-ia64/vmx_platform.h	Thu Mar 02 11:00:49 2006 +0100
    87.3 @@ -54,7 +54,7 @@ extern uint64_t dummy_tmr[];
    87.4  #define VCPU(_v,_x)	_v->arch.privregs->_x
    87.5  #define VLAPIC_ID(l) (uint16_t)(VCPU((l)->vcpu, lid) >> 16)
    87.6  #define VLAPIC_IRR(l) VCPU((l)->vcpu, irr[0])
    87.7 -
    87.8 +struct vlapic* apic_round_robin(struct domain *d, uint8_t dest_mode, uint8_t vector, uint32_t bitmap);
    87.9  extern int vmx_vcpu_pend_interrupt(struct vcpu *vcpu, uint8_t vector);
   87.10  static inline int vlapic_set_irq(struct vlapic *t, uint8_t vec, uint8_t trig)
   87.11  {
    88.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Thu Mar 02 10:59:34 2006 +0100
    88.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Thu Mar 02 11:00:49 2006 +0100
    88.3 @@ -51,8 +51,7 @@
    88.4  
    88.5  #define VMM_RR_SHIFT    20
    88.6  #define VMM_RR_MASK     ((1UL<<VMM_RR_SHIFT)-1)
    88.7 -//#define VRID_2_MRID(vcpu,rid)  ((rid) & VMM_RR_MASK) | \
    88.8 -                ((vcpu->domain->domain_id) << VMM_RR_SHIFT)
    88.9 +
   88.10  extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
   88.11  extern u64 cr_igfld_mask (int index, u64 value);
   88.12  extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
   88.13 @@ -118,7 +117,16 @@ extern void memread_p(VCPU *vcpu, u64 *s
   88.14  extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
   88.15  extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
   88.16  extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
   88.17 +extern void vcpu_load_kernel_regs(VCPU *vcpu);
   88.18 +extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu);
   88.19 +extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
   88.20  
   88.21 +extern void dtlb_fault (VCPU *vcpu, u64 vadr);
   88.22 +extern void nested_dtlb (VCPU *vcpu);
   88.23 +extern void alt_dtlb (VCPU *vcpu, u64 vadr);
   88.24 +extern void dvhpt_fault (VCPU *vcpu, u64 vadr);
   88.25 +extern void dnat_page_consumption (VCPU *vcpu, uint64_t vadr);
   88.26 +extern void page_not_present(VCPU *vcpu, u64 vadr);
   88.27  
   88.28  /**************************************************************************
   88.29   VCPU control register access routines
   88.30 @@ -461,10 +469,10 @@ static inline unsigned long
   88.31  vmx_vrrtomrr(VCPU *v, unsigned long val)
   88.32  {
   88.33      ia64_rr rr;
   88.34 -    u64	  rid;
   88.35  
   88.36      rr.rrval=val;
   88.37      rr.rid = rr.rid + v->arch.starting_rid;
   88.38 +    rr.ps = PAGE_SHIFT;
   88.39      rr.ve = 1;
   88.40      return  vmMangleRID(rr.rrval);
   88.41  /* Disable this rid allocation algorithm for now */
    89.1 --- a/xen/include/asm-ia64/xenkregs.h	Thu Mar 02 10:59:34 2006 +0100
    89.2 +++ b/xen/include/asm-ia64/xenkregs.h	Thu Mar 02 11:00:49 2006 +0100
    89.3 @@ -8,7 +8,8 @@
    89.4  #define	IA64_TR_VHPT		4	/* dtr4: vhpt */
    89.5  #define IA64_TR_ARCH_INFO	5
    89.6  #define IA64_TR_PERVP_VHPT	6
    89.7 -
    89.8 +#define IA64_DTR_GUEST_KERNEL   7
    89.9 +#define IA64_ITR_GUEST_KERNEL   2
   89.10  /* Processor status register bits: */
   89.11  #define IA64_PSR_VM_BIT		46
   89.12  #define IA64_PSR_VM	(__IA64_UL(1) << IA64_PSR_VM_BIT)
    90.1 --- a/xen/include/asm-ia64/xensystem.h	Thu Mar 02 10:59:34 2006 +0100
    90.2 +++ b/xen/include/asm-ia64/xensystem.h	Thu Mar 02 11:00:49 2006 +0100
    90.3 @@ -78,7 +78,6 @@ extern struct task_struct *vmx_ia64_swit
    90.4  #define __cmpxchg_user(ptr, new, old, _size)				\
    90.5  ({									\
    90.6  	register long __gu_r8 asm ("r8");				\
    90.7 -	register long __gu_r9 asm ("r9");				\
    90.8  	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));			\
    90.9  	asm volatile ("mov %1=r0;;\n"					\
   90.10  		"[1:]\tcmpxchg"_size".acq %0=[%2],%3,ar.ccv\n"		\
    91.1 --- a/xen/include/public/arch-ia64.h	Thu Mar 02 10:59:34 2006 +0100
    91.2 +++ b/xen/include/public/arch-ia64.h	Thu Mar 02 11:00:49 2006 +0100
    91.3 @@ -249,7 +249,7 @@ typedef struct {
    91.4              int interrupt_delivery_enabled; // virtual psr.i
    91.5              int pending_interruption;
    91.6              int incomplete_regframe; // see SDM vol2 6.8
    91.7 -            unsigned long delivery_mask[4];
    91.8 +            unsigned long reserved5_1[4];
    91.9              int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
   91.10              int banknum; // 0 or 1, which virtual register bank is active
   91.11              unsigned long rrs[8]; // region registers