ia64/xen-unstable

view xen/include/asm-ia64/xensystem.h @ 6538:84ee014ebd41

Merge xen-vtx-unstable.hg
author adsharma@los-vmm.sc.intel.com
date Wed Aug 17 12:34:38 2005 -0800 (2005-08-17)
parents 23979fb12c49 f294acb25858
children 99914b54f7bf
line source
1 #ifndef _ASM_IA64_XENSYSTEM_H
2 #define _ASM_IA64_XENSYSTEM_H
3 /*
4 * xen specific context definition
5 *
6 * Copyright (C) 2005 Hewlett-Packard Co.
7 * Dan Magenheimer (dan.magenheimer@hp.com)
8 *
9 * Copyright (C) 2005 Intel Co.
10 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
11 *
12 */
13 #include <asm/config.h>
14 #include <linux/kernel.h>
16 /* Define HV space hierarchy */
17 #define XEN_VIRT_SPACE_LOW 0xe800000000000000
18 #define XEN_VIRT_SPACE_HIGH 0xf800000000000000
19 /* This is address to mapping rr7 switch stub, in region 5 */
20 #ifdef CONFIG_VTI
21 #define XEN_RR7_SWITCH_STUB 0xb700000000000000
22 #endif // CONFIG_VTI
24 #define XEN_START_ADDR 0xf000000000000000
25 #define KERNEL_START 0xf000000004000000
26 #define PERCPU_ADDR 0xf100000000000000-PERCPU_PAGE_SIZE
27 #define SHAREDINFO_ADDR 0xf100000000000000
28 #define VHPT_ADDR 0xf200000000000000
29 #define SHARED_ARCHINFO_ADDR 0xf300000000000000
30 #define XEN_END_ADDR 0xf400000000000000
32 #ifndef __ASSEMBLY__
34 #define IA64_HAS_EXTRA_STATE(t) 0
36 #ifdef CONFIG_VTI
37 extern struct task_struct *vmx_ia64_switch_to (void *next_task);
38 #define __switch_to(prev,next,last) do { \
39 ia64_save_fpu(prev->arch._thread.fph); \
40 ia64_load_fpu(next->arch._thread.fph); \
41 if (VMX_DOMAIN(prev)) \
42 vmx_save_state(prev); \
43 else { \
44 if (IA64_HAS_EXTRA_STATE(prev)) \
45 ia64_save_extra(prev); \
46 } \
47 if (VMX_DOMAIN(next)) \
48 vmx_load_state(next); \
49 else { \
50 if (IA64_HAS_EXTRA_STATE(next)) \
51 ia64_save_extra(next); \
52 } \
53 ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
54 (last) = vmx_ia64_switch_to((next)); \
55 } while (0)
56 #else // CONFIG_VTI
57 #define __switch_to(prev,next,last) do { \
58 ia64_save_fpu(prev->arch._thread.fph); \
59 ia64_load_fpu(next->arch._thread.fph); \
60 if (IA64_HAS_EXTRA_STATE(prev)) \
61 ia64_save_extra(prev); \
62 if (IA64_HAS_EXTRA_STATE(next)) \
63 ia64_load_extra(next); \
64 /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/ \
65 (last) = ia64_switch_to((next)); \
66 vcpu_set_next_timer(current); \
67 } while (0)
68 #endif // CONFIG_VTI
70 #define __cmpxchg_user(ptr, new, old, _size) \
71 ({ \
72 register long __gu_r8 asm ("r8"); \
73 register long __gu_r9 asm ("r9"); \
74 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
75 asm volatile ("mov %1=r0;;\n" \
76 "[1:]\tcmpxchg"_size".acq %0=[%2],%3,ar.ccv\n" \
77 "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \
78 "[1:]" \
79 : "=r"(old), "=r"(__gu_r8) : \
80 "r"(ptr), "r"(new) : "memory"); \
81 __gu_r8; \
82 })
85 // NOTE: Xen defines args as pointer,old,new whereas ia64 uses pointer,new,old
86 // so reverse them here
87 #define cmpxchg_user(_p,_o,_n) \
88 ({ \
89 register long _rc; \
90 ia64_mf(); \
91 switch ( sizeof(*(_p)) ) { \
92 case 1: _rc = __cmpxchg_user(_p,_n,_o,"1"); break; \
93 case 2: _rc = __cmpxchg_user(_p,_n,_o,"2"); break; \
94 case 4: _rc = __cmpxchg_user(_p,_n,_o,"4"); break; \
95 case 8: _rc = __cmpxchg_user(_p,_n,_o,"8"); break; \
96 } \
97 ia64_mf(); \
98 _rc; \
99 })
101 #endif // __ASSEMBLY__
102 #endif // _ASM_IA64_XENSYSTEM_H