ia64/xen-unstable

view tools/libxc/xc_pagetab.c @ 9488:0a6f5527ca4b

[IA64] set itv handoff as masked and enable reading irr[0-3]

Set initial vcpu itv handoff state to mask the timer vector.
This seems to match hardware and makes logical sense from a
spurious interrupt perspective. Enable vcpu_get_irr[0-3]
functions as they seem to work and have the proper backing.
This enables the check_sal_cache_flush() in arch/ia64/kernel.sal.c
to work unmodified, allowing us to remove the Xen changes from
the file (and thus the file from the sparse tree).

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Tue Apr 04 09:39:45 2006 -0600 (2006-04-04)
parents dd5649730b32
children fdc26ec44145
line source
1 /******************************************************************************
2 * xc_pagetab.c
3 *
4 * Function to translate virtual to physical addresses.
5 */
6 #include "xc_private.h"
8 #if defined(__i386__)
10 #define L1_PAGETABLE_SHIFT_PAE 12
11 #define L2_PAGETABLE_SHIFT_PAE 21
12 #define L3_PAGETABLE_SHIFT_PAE 30
14 #define L1_PAGETABLE_SHIFT 12
15 #define L2_PAGETABLE_SHIFT 22
17 #define L0_PAGETABLE_MASK_PAE 0x0000000ffffff000ULL
18 #define L1_PAGETABLE_MASK_PAE 0x1ffULL
19 #define L2_PAGETABLE_MASK_PAE 0x1ffULL
20 #define L3_PAGETABLE_MASK_PAE 0x3ULL
22 #define L0_PAGETABLE_MASK 0xfffff000ULL
23 #define L1_PAGETABLE_MASK 0x3ffULL
24 #define L2_PAGETABLE_MASK 0x3ffULL
26 #elif defined(__x86_64__)
28 #define L1_PAGETABLE_SHIFT_PAE 12
29 #define L2_PAGETABLE_SHIFT_PAE 21
30 #define L3_PAGETABLE_SHIFT_PAE 30
31 #define L4_PAGETABLE_SHIFT_PAE 39
33 #define L1_PAGETABLE_SHIFT L1_PAGETABLE_SHIFT_PAE
34 #define L2_PAGETABLE_SHIFT L2_PAGETABLE_SHIFT_PAE
36 #define L0_PAGETABLE_MASK_PAE 0x000000fffffff000ULL
37 #define L1_PAGETABLE_MASK_PAE 0x1ffULL
38 #define L2_PAGETABLE_MASK_PAE 0x1ffULL
39 #define L3_PAGETABLE_MASK_PAE 0x1ffULL
40 #define L4_PAGETABLE_MASK_PAE 0x1ffULL
42 #define L0_PAGETABLE_MASK L0_PAGETABLE_MASK_PAE
43 #define L1_PAGETABLE_MASK L1_PAGETABLE_MASK_PAE
44 #define L2_PAGETABLE_MASK L2_PAGETABLE_MASK_PAE
46 #endif
48 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
49 int vcpu, unsigned long long virt )
50 {
51 vcpu_guest_context_t ctx;
52 unsigned long long cr3;
53 void *pd, *pt, *pdppage = NULL, *pdp, *pml = NULL;
54 unsigned long long pde, pte, pdpe, pmle;
55 unsigned long mfn = 0;
56 #if defined (__i386__)
57 static int pt_levels = 0;
59 if (pt_levels == 0) {
60 xen_capabilities_info_t xen_caps = "";
62 if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0)
63 goto out;
64 if (strstr(xen_caps, "xen-3.0-x86_64"))
65 pt_levels = 4;
66 else if (strstr(xen_caps, "xen-3.0-x86_32p"))
67 pt_levels = 3;
68 else if (strstr(xen_caps, "xen-3.0-x86_32"))
69 pt_levels = 2;
70 else
71 goto out;
72 }
73 #elif defined (__x86_64__)
74 #define pt_levels 4
75 #endif
77 if (xc_vcpu_getcontext(xc_handle, dom, vcpu, &ctx) != 0) {
78 fprintf(stderr, "failed to retreive vcpu context\n");
79 goto out;
80 }
81 cr3 = ctx.ctrlreg[3];
83 /* Page Map Level 4 */
85 #if defined(__i386__)
86 pmle = cr3;
87 #elif defined(__x86_64__)
88 pml = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ, cr3 >> PAGE_SHIFT);
89 if (pml == NULL) {
90 fprintf(stderr, "failed to map PML4\n");
91 goto out;
92 }
93 pmle = *(unsigned long long *)(pml + 8 * ((virt >> L4_PAGETABLE_SHIFT_PAE) & L4_PAGETABLE_MASK_PAE));
94 if((pmle & 1) == 0) {
95 fprintf(stderr, "page entry not present in PML4\n");
96 goto out_unmap_pml;
97 }
98 #endif
100 /* Page Directory Pointer Table */
102 if (pt_levels >= 3) {
103 pdppage = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ, pmle >> PAGE_SHIFT);
104 if (pdppage == NULL) {
105 fprintf(stderr, "failed to map PDP\n");
106 goto out_unmap_pml;
107 }
108 if (pt_levels >= 4)
109 pdp = pdppage;
110 else
111 /* PDP is only 32 bit aligned with 3 level pts */
112 pdp = pdppage + (pmle & ~(XC_PAGE_MASK | 0x1f));
114 pdpe = *(unsigned long long *)(pdp + 8 * ((virt >> L3_PAGETABLE_SHIFT_PAE) & L3_PAGETABLE_MASK_PAE));
116 if((pdpe & 1) == 0) {
117 fprintf(stderr, "page entry not present in PDP\n");
118 goto out_unmap_pdp;
119 }
120 } else {
121 pdpe = pmle;
122 }
124 /* Page Directory */
126 pd = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ, pdpe >> PAGE_SHIFT);
127 if (pd == NULL) {
128 fprintf(stderr, "failed to map PD\n");
129 goto out_unmap_pdp;
130 }
132 if (pt_levels >= 3)
133 pde = *(unsigned long long *)(pd + 8 * ((virt >> L2_PAGETABLE_SHIFT_PAE) & L2_PAGETABLE_MASK_PAE));
134 else
135 pde = *(unsigned long long *)(pd + 4 * ((virt >> L2_PAGETABLE_SHIFT) & L2_PAGETABLE_MASK));
137 if ((pde & 1) == 0) {
138 fprintf(stderr, "page entry not present in PD\n");
139 goto out_unmap_pd;
140 }
142 /* Page Table */
144 if (pde & 0x00000008) { /* 4M page (or 2M in PAE mode) */
145 fprintf(stderr, "Cannot currently cope with 2/4M pages\n");
146 exit(-1);
147 } else { /* 4k page */
148 pt = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ,
149 pde >> PAGE_SHIFT);
151 if (pt == NULL) {
152 fprintf(stderr, "failed to map PT\n");
153 goto out_unmap_pd;
154 }
156 if (pt_levels >= 3)
157 pte = *(unsigned long long *)(pt + 8 * ((virt >> L1_PAGETABLE_SHIFT_PAE) & L1_PAGETABLE_MASK_PAE));
158 else
159 pte = *(unsigned long long *)(pt + 4 * ((virt >> L1_PAGETABLE_SHIFT) & L1_PAGETABLE_MASK));
161 if ((pte & 0x00000001) == 0) {
162 fprintf(stderr, "page entry not present in PT\n");
163 goto out_unmap_pt;
164 }
166 if (pt_levels >= 3)
167 mfn = (pte & L0_PAGETABLE_MASK_PAE) >> PAGE_SHIFT;
168 else
169 mfn = (pte & L0_PAGETABLE_MASK) >> PAGE_SHIFT;
170 }
172 out_unmap_pt:
173 munmap(pt, PAGE_SIZE);
174 out_unmap_pd:
175 munmap(pd, PAGE_SIZE);
176 out_unmap_pdp:
177 munmap(pdppage, PAGE_SIZE);
178 out_unmap_pml:
179 munmap(pml, PAGE_SIZE);
180 out:
181 return mfn;
182 }
184 /*
185 * Local variables:
186 * mode: C
187 * c-set-style: "BSD"
188 * c-basic-offset: 4
189 * tab-width: 4
190 * indent-tabs-mode: nil
191 * End:
192 */