ia64/xen-unstable

changeset 16579:35b2c54f59d5

[IA64] Fix LOAD_PHYSICAL for INIT handler

Xen virtual address space moves to 0xf400000004000000.
So LOAD_PHYSICAL is broken. I fixed LOAD_PHYSICAL with ia64 patch.
This patch also fixes INIT handler.

Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
author Alex Williamson <alex.williamson@hp.com>
date Tue Dec 11 10:14:11 2007 -0700 (2007-12-11)
parents 4054cd60895b
children 124f7a7f5f84
files xen/arch/ia64/linux-xen/setup.c xen/arch/ia64/xen/xenpatch.c xen/include/asm-ia64/linux-xen/asm/asmmacro.h
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/setup.c	Mon Dec 10 13:49:22 2007 +0000
     1.2 +++ b/xen/arch/ia64/linux-xen/setup.c	Tue Dec 11 10:14:11 2007 -0700
     1.3 @@ -500,9 +500,7 @@ setup_arch (char **cmdline_p)
     1.4  {
     1.5  	unw_init();
     1.6  
     1.7 -#ifndef XEN
     1.8  	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
     1.9 -#endif
    1.10  
    1.11  	*cmdline_p = __va(ia64_boot_param->command_line);
    1.12  #ifndef XEN
     2.1 --- a/xen/arch/ia64/xen/xenpatch.c	Mon Dec 10 13:49:22 2007 +0000
     2.2 +++ b/xen/arch/ia64/xen/xenpatch.c	Tue Dec 11 10:14:11 2007 -0700
     2.3 @@ -106,6 +106,30 @@ static void __init xen_patch_frametable_
     2.4  #endif
     2.5  }
     2.6  
     2.7 +/*
     2.8 + * We need sometimes to load the physical address of a kernel
     2.9 + * object.  Often we can convert the virtual address to physical
    2.10 + * at execution time, but sometimes (either for performance reasons
    2.11 + * or during error recovery) we cannot to this.  Patch the marked
    2.12 + * bundles to load the physical address.
    2.13 + */
    2.14 +void __init
    2.15 +ia64_patch_vtop (unsigned long start, unsigned long end)
    2.16 +{
    2.17 +	s32 *offp = (s32 *)start;
    2.18 +	u64 ip;
    2.19 +
    2.20 +	while (offp < (s32 *)end) {
    2.21 +		ip = (u64)offp + *offp;
    2.22 +
    2.23 +		/* replace virtual address with corresponding physical address */
    2.24 +		ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
    2.25 +		ia64_fc((void *)ip);
    2.26 +		++offp;
    2.27 +	}
    2.28 +	ia64_sync_i();
    2.29 +	ia64_srlz_i();
    2.30 +}
    2.31  
    2.32  void __init xen_patch_kernel(void)
    2.33  {
     3.1 --- a/xen/include/asm-ia64/linux-xen/asm/asmmacro.h	Mon Dec 10 13:49:22 2007 +0000
     3.2 +++ b/xen/include/asm-ia64/linux-xen/asm/asmmacro.h	Tue Dec 11 10:14:11 2007 -0700
     3.3 @@ -59,17 +59,9 @@ name:
     3.4  	.section ".data.patch.vtop", "a"	// declare section & section attributes
     3.5  	.previous
     3.6  
     3.7 -#ifdef XEN
     3.8 -#define	LOAD_PHYSICAL(pr, reg, obj)		\
     3.9 -[1:](pr)movl reg = obj;;			\
    3.10 -	shl reg = reg,4;;			\
    3.11 -	shr.u reg = reg,4;;			\
    3.12 -	.xdata4 ".data.patch.vtop", 1b-.
    3.13 -#else
    3.14  #define	LOAD_PHYSICAL(pr, reg, obj)		\
    3.15  [1:](pr)movl reg = obj;				\
    3.16  	.xdata4 ".data.patch.vtop", 1b-.
    3.17 -#endif
    3.18  
    3.19  /*
    3.20   * For now, we always put in the McKinley E9 workaround.  On CPUs that don't need it,