ia64/xen-unstable

view xen/include/asm-ia64/xensystem.h @ 5704:9b73afea874e

Certain types of event channel are now auto-bound to vcpu0 by Xen.
Make sure that xenolinux agrees with this.
author sos22@douglas.cl.cam.ac.uk
date Fri Jul 08 15:35:43 2005 +0000 (2005-07-08)
parents da2752f150a0
children ca44d2dbb273 a83ac0806d6b
line source
1 #ifndef _ASM_IA64_XENSYSTEM_H
2 #define _ASM_IA64_XENSYSTEM_H
3 /*
4 * xen specific context definition
5 *
6 * Copyright (C) 2005 Hewlett-Packard Co.
7 * Dan Magenheimer (dan.magenheimer@hp.com)
8 *
9 * Copyright (C) 2005 Intel Co.
10 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
11 *
12 */
13 #include <asm/config.h>
14 #include <linux/kernel.h>
16 /* Define HV space hierarchy */
17 #define XEN_VIRT_SPACE_LOW 0xe800000000000000
18 #define XEN_VIRT_SPACE_HIGH 0xf800000000000000
19 /* This is address to mapping rr7 switch stub, in region 5 */
20 #ifdef CONFIG_VTI
21 #define XEN_RR7_SWITCH_STUB 0xb700000000000000
22 #endif // CONFIG_VTI
24 #define KERNEL_START 0xf000000004000000
25 #define PERCPU_ADDR 0xf100000000000000-PERCPU_PAGE_SIZE
26 #define SHAREDINFO_ADDR 0xf100000000000000
27 #define VHPT_ADDR 0xf200000000000000
29 #ifndef __ASSEMBLY__
31 #define IA64_HAS_EXTRA_STATE(t) 0
33 #ifdef CONFIG_VTI
34 extern struct task_struct *vmx_ia64_switch_to (void *next_task);
35 #define __switch_to(prev,next,last) do { \
36 if (VMX_DOMAIN(prev)) \
37 vmx_save_state(prev); \
38 else { \
39 if (IA64_HAS_EXTRA_STATE(prev)) \
40 ia64_save_extra(prev); \
41 } \
42 if (VMX_DOMAIN(next)) \
43 vmx_load_state(next); \
44 else { \
45 if (IA64_HAS_EXTRA_STATE(next)) \
46 ia64_save_extra(next); \
47 } \
48 ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
49 (last) = vmx_ia64_switch_to((next)); \
50 } while (0)
51 #else // CONFIG_VTI
52 #define __switch_to(prev,next,last) do { \
53 ia64_save_fpu(prev->arch._thread.fph); \
54 ia64_load_fpu(next->arch._thread.fph); \
55 if (IA64_HAS_EXTRA_STATE(prev)) \
56 ia64_save_extra(prev); \
57 if (IA64_HAS_EXTRA_STATE(next)) \
58 ia64_load_extra(next); \
59 ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
60 (last) = ia64_switch_to((next)); \
61 } while (0)
62 #endif // CONFIG_VTI
64 #define __cmpxchg_user(ptr, new, old, _size) \
65 ({ \
66 register long __gu_r8 asm ("r8"); \
67 register long __gu_r9 asm ("r9"); \
68 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
69 asm volatile ("mov %1=r0;;\n" \
70 "[1:]\tcmpxchg"_size".acq %0=[%2],%3,ar.ccv\n" \
71 "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \
72 "[1:]" \
73 : "=r"(old), "=r"(__gu_r8) : \
74 "r"(ptr), "r"(new) : "memory"); \
75 __gu_r8; \
76 })
79 // NOTE: Xen defines args as pointer,old,new whereas ia64 uses pointer,new,old
80 // so reverse them here
81 #define cmpxchg_user(_p,_o,_n) \
82 ({ \
83 register long _rc; \
84 ia64_mf(); \
85 switch ( sizeof(*(_p)) ) { \
86 case 1: _rc = __cmpxchg_user(_p,_n,_o,"1"); break; \
87 case 2: _rc = __cmpxchg_user(_p,_n,_o,"2"); break; \
88 case 4: _rc = __cmpxchg_user(_p,_n,_o,"4"); break; \
89 case 8: _rc = __cmpxchg_user(_p,_n,_o,"8"); break; \
90 } \
91 ia64_mf(); \
92 _rc; \
93 })
95 #endif // __ASSEMBLY__
96 #endif // _ASM_IA64_XENSYSTEM_H