ia64/xen-unstable

view xen/include/asm-ia64/xensystem.h @ 5797:ca44d2dbb273

Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:58:56 2005 -0700 (2005-07-09)
parents da2752f150a0
children a83ac0806d6b
line source
1 #ifndef _ASM_IA64_XENSYSTEM_H
2 #define _ASM_IA64_XENSYSTEM_H
3 /*
4 * xen specific context definition
5 *
6 * Copyright (C) 2005 Hewlett-Packard Co.
7 * Dan Magenheimer (dan.magenheimer@hp.com)
8 *
9 * Copyright (C) 2005 Intel Co.
10 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
11 *
12 */
13 #include <asm/config.h>
14 #include <linux/kernel.h>
16 /* Define HV space hierarchy */
17 #define XEN_VIRT_SPACE_LOW 0xe800000000000000
18 #define XEN_VIRT_SPACE_HIGH 0xf800000000000000
19 /* This is address to mapping rr7 switch stub, in region 5 */
20 #ifdef CONFIG_VTI
21 #define XEN_RR7_SWITCH_STUB 0xb700000000000000
22 #endif // CONFIG_VTI
24 #define KERNEL_START 0xf000000004000000
25 #define PERCPU_ADDR 0xf100000000000000-PERCPU_PAGE_SIZE
26 #define SHAREDINFO_ADDR 0xf100000000000000
27 #define VHPT_ADDR 0xf200000000000000
29 #ifndef __ASSEMBLY__
31 #define IA64_HAS_EXTRA_STATE(t) 0
33 #ifdef CONFIG_VTI
34 extern struct task_struct *vmx_ia64_switch_to (void *next_task);
35 #define __switch_to(prev,next,last) do { \
36 ia64_save_fpu(prev->arch._thread.fph); \
37 ia64_load_fpu(next->arch._thread.fph); \
38 if (VMX_DOMAIN(prev)) \
39 vmx_save_state(prev); \
40 else { \
41 if (IA64_HAS_EXTRA_STATE(prev)) \
42 ia64_save_extra(prev); \
43 } \
44 if (VMX_DOMAIN(next)) \
45 vmx_load_state(next); \
46 else { \
47 if (IA64_HAS_EXTRA_STATE(next)) \
48 ia64_save_extra(next); \
49 } \
50 ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
51 (last) = vmx_ia64_switch_to((next)); \
52 } while (0)
53 #else // CONFIG_VTI
54 #define __switch_to(prev,next,last) do { \
55 ia64_save_fpu(prev->arch._thread.fph); \
56 ia64_load_fpu(next->arch._thread.fph); \
57 if (IA64_HAS_EXTRA_STATE(prev)) \
58 ia64_save_extra(prev); \
59 if (IA64_HAS_EXTRA_STATE(next)) \
60 ia64_load_extra(next); \
61 ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
62 (last) = ia64_switch_to((next)); \
63 } while (0)
64 #endif // CONFIG_VTI
66 #define __cmpxchg_user(ptr, new, old, _size) \
67 ({ \
68 register long __gu_r8 asm ("r8"); \
69 register long __gu_r9 asm ("r9"); \
70 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
71 asm volatile ("mov %1=r0;;\n" \
72 "[1:]\tcmpxchg"_size".acq %0=[%2],%3,ar.ccv\n" \
73 "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \
74 "[1:]" \
75 : "=r"(old), "=r"(__gu_r8) : \
76 "r"(ptr), "r"(new) : "memory"); \
77 __gu_r8; \
78 })
81 // NOTE: Xen defines args as pointer,old,new whereas ia64 uses pointer,new,old
82 // so reverse them here
83 #define cmpxchg_user(_p,_o,_n) \
84 ({ \
85 register long _rc; \
86 ia64_mf(); \
87 switch ( sizeof(*(_p)) ) { \
88 case 1: _rc = __cmpxchg_user(_p,_n,_o,"1"); break; \
89 case 2: _rc = __cmpxchg_user(_p,_n,_o,"2"); break; \
90 case 4: _rc = __cmpxchg_user(_p,_n,_o,"4"); break; \
91 case 8: _rc = __cmpxchg_user(_p,_n,_o,"8"); break; \
92 } \
93 ia64_mf(); \
94 _rc; \
95 })
97 #endif // __ASSEMBLY__
98 #endif // _ASM_IA64_XENSYSTEM_H