ia64/xen-unstable

view xen/include/asm-ia64/vmx_phy_mode.h @ 5704:9b73afea874e

Certain types of event channel are now auto-bound to vcpu0 by Xen.
Make sure that xenolinux agrees with this.
author sos22@douglas.cl.cam.ac.uk
date Fri Jul 08 15:35:43 2005 +0000 (2005-07-08)
parents c91f74efda05
children ca44d2dbb273 a83ac0806d6b
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_phy_mode.h:
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 */
21 #ifndef _PHY_MODE_H_
22 #define _PHY_MODE_H_
24 /*
25 * Guest Physical Mode is emulated by GVMM, which is actually running
26 * in virtual mode.
27 *
28 * For all combinations of (it,dt,rt), only three were taken into
29 * account:
30 * (0,0,0): some firmware and kernel start code execute in this mode;
31 * (1,1,1): most kernel C code execute in this mode;
32 * (1,0,1): some low level TLB miss handler code execute in this mode;
33 * Till now, no other kind of combinations were found.
34 *
35 * Because all physical addresses fall into two categories:
36 * 0x0xxxxxxxxxxxxxxx, which is cacheable, and 0x8xxxxxxxxxxxxxxx, which
37 * is uncacheable. These two kinds of addresses reside in region 0 and 4
38 * of the virtual mode. Therefore, we load two different Region IDs
39 * (A, B) into RR0 and RR4, respectively, when guest is entering phsical
40 * mode. These two RIDs are totally different from the RIDs used in
41 * virtual mode. So, the aliasness between physical addresses and virtual
42 * addresses can be disambiguated by different RIDs.
43 *
44 * RID A and B are stolen from the cpu ulm region id. In linux, each
45 * process is allocated 8 RIDs:
46 * mmu_context << 3 + 0
47 * mmu_context << 3 + 1
48 * mmu_context << 3 + 2
49 * mmu_context << 3 + 3
50 * mmu_context << 3 + 4
51 * mmu_context << 3 + 5
52 * mmu_context << 3 + 6
53 * mmu_context << 3 + 7
54 * Because all processes share region 5~7, the last 3 are left untouched.
55 * So, we stolen "mmu_context << 3 + 5" and "mmu_context << 3 + 6" from
56 * ulm and use them as RID A and RID B.
57 *
58 * When guest is running in (1,0,1) mode, the instructions been accessed
59 * reside in region 5~7, not in region 0 or 4. So, instruction can be
60 * accessed in virtual mode without interferring physical data access.
61 *
62 * When dt!=rt, it is rarely to perform "load/store" and "RSE" operation
63 * at the same time. No need to consider such a case. We consider (0,1)
64 * as (0,0).
65 *
66 */
69 #include <asm/vmx_vcpu.h>
70 #include <asm/regionreg.h>
71 #include <asm/gcc_intrin.h>
72 #include <asm/pgtable.h>
73 /* Due to change of ia64_set_rr interface */
75 #define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
76 #define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
78 #ifdef PHY_16M /* 16M: large granule for test*/
79 #define EMUL_PHY_PAGE_SHIFT 24
80 #else /* 4K: emulated physical page granule */
81 #define EMUL_PHY_PAGE_SHIFT 12
82 #endif
83 #define IA64_RSC_MODE 0x0000000000000003
84 #define XEN_RR7_RID (0xf00010)
85 #define GUEST_IN_PHY 0x1
86 extern int valid_mm_mode[];
87 extern int mm_switch_table[][8];
88 extern void physical_mode_init(VCPU *);
89 extern void switch_to_physical_rid(VCPU *);
90 extern void switch_to_virtual_rid(VCPU *vcpu);
91 extern void switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr);
92 extern void stlb_phys_lookup(VCPU *vcpu, UINT64 paddr, UINT64 type);
93 extern void check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr);
94 extern void prepare_if_physical_mode(VCPU *vcpu);
95 extern void recover_if_physical_mode(VCPU *vcpu);
96 extern void vmx_init_all_rr(VCPU *vcpu);
97 extern void vmx_load_all_rr(VCPU *vcpu);
98 /*
99 * No sanity check here, since all psr changes have been
100 * checked in switch_mm_mode().
101 */
102 #define is_physical_mode(v) \
103 ((v->arch.mode_flags) & GUEST_IN_PHY)
105 #define is_virtual_mode(v) \
106 (!is_physical_mode(v))
108 #define MODE_IND(psr) \
109 (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
111 #define SW_BAD 0 /* Bad mode transitition */
112 #define SW_V2P 1 /* Physical emulatino is activated */
113 #define SW_P2V 2 /* Exit physical mode emulation */
114 #define SW_SELF 3 /* No mode transition */
115 #define SW_NOP 4 /* Mode transition, but without action required */
117 #define INV_MODE 0 /* Invalid mode */
118 #define GUEST_VIRT 1 /* Guest in virtual mode */
119 #define GUEST_PHYS 2 /* Guest in physical mode, requiring emulation */
123 #endif /* _PHY_MODE_H_ */