ia64/xen-unstable

view tools/libxc/xc_dom_x86.c @ 18893:628b3a76dbf4

libxc: Fix gcc 4.3 build failure

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Dec 09 12:42:18 2008 +0000 (2008-12-09)
parents e5c9c8e6e726
children f0d033f0a319
line source
1 /*
2 * Xen domain builder -- i386 and x86_64 bits.
3 *
4 * Most architecture-specific code for x86 goes here.
5 * - prepare page tables.
6 * - fill architecture-specific structs.
7 *
8 * This code is licenced under the GPL.
9 * written 2006 by Gerd Hoffmann <kraxel@suse.de>.
10 *
11 */
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <inttypes.h>
17 #include <xen/xen.h>
18 #include <xen/foreign/x86_32.h>
19 #include <xen/foreign/x86_64.h>
20 #include <xen/hvm/hvm_info_table.h>
21 #include <xen/io/protocols.h>
23 #include "xg_private.h"
24 #include "xc_dom.h"
25 #include "xenctrl.h"
27 /* ------------------------------------------------------------------------ */
29 #define bits_to_mask(bits) (((xen_vaddr_t)1 << (bits))-1)
30 #define round_down(addr, mask) ((addr) & ~(mask))
31 #define round_up(addr, mask) ((addr) | (mask))
33 static unsigned long
34 nr_page_tables(xen_vaddr_t start, xen_vaddr_t end, unsigned long bits)
35 {
36 xen_vaddr_t mask = bits_to_mask(bits);
37 int tables;
39 if ( bits == 0 )
40 return 0; /* unused */
42 if ( bits == (8 * sizeof(unsigned long)) )
43 {
44 /* must be pgd, need one */
45 start = 0;
46 end = -1;
47 tables = 1;
48 }
49 else
50 {
51 start = round_down(start, mask);
52 end = round_up(end, mask);
53 tables = ((end - start) >> bits) + 1;
54 }
56 xc_dom_printf("%s: 0x%016" PRIx64 "/%ld: 0x%016" PRIx64
57 " -> 0x%016" PRIx64 ", %d table(s)\n",
58 __FUNCTION__, mask, bits, start, end, tables);
59 return tables;
60 }
62 static int count_pgtables(struct xc_dom_image *dom, int pae,
63 int l4_bits, int l3_bits, int l2_bits, int l1_bits)
64 {
65 int pages, extra_pages;
66 xen_vaddr_t try_virt_end;
68 extra_pages = dom->alloc_bootstack ? 1 : 0;
69 extra_pages += dom->extra_pages;
70 extra_pages += 128; /* 512kB padding */
71 pages = extra_pages;
72 for ( ; ; )
73 {
74 try_virt_end = round_up(dom->virt_alloc_end + pages * PAGE_SIZE_X86,
75 bits_to_mask(22)); /* 4MB alignment */
76 dom->pg_l4 =
77 nr_page_tables(dom->parms.virt_base, try_virt_end, l4_bits);
78 dom->pg_l3 =
79 nr_page_tables(dom->parms.virt_base, try_virt_end, l3_bits);
80 dom->pg_l2 =
81 nr_page_tables(dom->parms.virt_base, try_virt_end, l2_bits);
82 dom->pg_l1 =
83 nr_page_tables(dom->parms.virt_base, try_virt_end, l1_bits);
84 if (pae && try_virt_end < 0xc0000000)
85 {
86 xc_dom_printf("%s: PAE: extra l2 page table for l3#3\n",
87 __FUNCTION__);
88 dom->pg_l2++;
89 }
90 dom->pgtables = dom->pg_l4 + dom->pg_l3 + dom->pg_l2 + dom->pg_l1;
91 pages = dom->pgtables + extra_pages;
92 if ( dom->virt_alloc_end + pages * PAGE_SIZE_X86 <= try_virt_end + 1 )
93 break;
94 }
95 dom->virt_pgtab_end = try_virt_end + 1;
96 return 0;
97 }
99 /* ------------------------------------------------------------------------ */
100 /* i386 pagetables */
102 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
103 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
104 #define L3_PROT (_PAGE_PRESENT)
106 static int count_pgtables_x86_32(struct xc_dom_image *dom)
107 {
108 return count_pgtables(dom, 0, 0, 0, 32, L2_PAGETABLE_SHIFT_I386);
109 }
111 static int count_pgtables_x86_32_pae(struct xc_dom_image *dom)
112 {
113 return count_pgtables(dom, 1, 0, 32,
114 L3_PAGETABLE_SHIFT_PAE, L2_PAGETABLE_SHIFT_PAE);
115 }
117 #define pfn_to_paddr(pfn) ((xen_paddr_t)(pfn) << PAGE_SHIFT_X86)
119 static int setup_pgtables_x86_32(struct xc_dom_image *dom)
120 {
121 xen_pfn_t l2pfn = dom->pgtables_seg.pfn;
122 xen_pfn_t l1pfn = dom->pgtables_seg.pfn + dom->pg_l2;
123 l2_pgentry_32_t *l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
124 l1_pgentry_32_t *l1tab = NULL;
125 unsigned long l2off, l1off;
126 xen_vaddr_t addr;
127 xen_pfn_t pgpfn;
129 for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
130 addr += PAGE_SIZE_X86 )
131 {
132 if ( l1tab == NULL )
133 {
134 /* get L1 tab, make L2 entry */
135 l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
136 l2off = l2_table_offset_i386(addr);
137 l2tab[l2off] =
138 pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
139 l1pfn++;
140 }
142 /* make L1 entry */
143 l1off = l1_table_offset_i386(addr);
144 pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
145 l1tab[l1off] =
146 pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT;
147 if ( (addr >= dom->pgtables_seg.vstart) &&
148 (addr < dom->pgtables_seg.vend) )
149 l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
150 if ( l1off == (L1_PAGETABLE_ENTRIES_I386 - 1) )
151 l1tab = NULL;
152 }
153 return 0;
154 }
156 /*
157 * Move the l3 page table page below 4G for guests which do not
158 * support the extended-cr3 format. The l3 is currently empty so we
159 * do not need to preserve the current contents.
160 */
161 static xen_pfn_t move_l3_below_4G(struct xc_dom_image *dom,
162 xen_pfn_t l3pfn,
163 xen_pfn_t l3mfn)
164 {
165 xen_pfn_t new_l3mfn;
166 struct xc_mmu *mmu;
167 void *l3tab;
168 int xc = dom->guest_xc;
170 mmu = xc_alloc_mmu_updates(xc, dom->guest_domid);
171 if ( mmu == NULL )
172 {
173 xc_dom_printf("%s: failed at %d\n", __FUNCTION__, __LINE__);
174 return l3mfn;
175 }
177 xc_dom_unmap_one(dom, l3pfn);
179 new_l3mfn = xc_make_page_below_4G(dom->guest_xc, dom->guest_domid, l3mfn);
180 if ( !new_l3mfn )
181 goto out;
183 dom->p2m_host[l3pfn] = new_l3mfn;
184 if ( xc_dom_update_guest_p2m(dom) != 0 )
185 goto out;
187 if ( xc_add_mmu_update(xc, mmu,
188 (((unsigned long long)new_l3mfn)
189 << XC_DOM_PAGE_SHIFT(dom)) |
190 MMU_MACHPHYS_UPDATE, l3pfn) )
191 goto out;
193 if ( xc_flush_mmu_updates(xc, mmu) )
194 goto out;
196 /*
197 * This ensures that the entire pgtables_seg is mapped by a single
198 * mmap region. arch_setup_bootlate() relies on this to be able to
199 * unmap and pin the pagetables.
200 */
201 if ( xc_dom_seg_to_ptr(dom, &dom->pgtables_seg) == NULL )
202 goto out;
204 l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
205 memset(l3tab, 0, XC_DOM_PAGE_SIZE(dom));
207 xc_dom_printf("%s: successfully relocated L3 below 4G. "
208 "(L3 PFN %#"PRIpfn" MFN %#"PRIpfn"=>%#"PRIpfn")\n",
209 __FUNCTION__, l3pfn, l3mfn, new_l3mfn);
211 l3mfn = new_l3mfn;
213 out:
214 free(mmu);
216 return l3mfn;
217 }
219 static int setup_pgtables_x86_32_pae(struct xc_dom_image *dom)
220 {
221 xen_pfn_t l3pfn = dom->pgtables_seg.pfn;
222 xen_pfn_t l2pfn = dom->pgtables_seg.pfn + dom->pg_l3;
223 xen_pfn_t l1pfn = dom->pgtables_seg.pfn + dom->pg_l3 + dom->pg_l2;
224 l3_pgentry_64_t *l3tab;
225 l2_pgentry_64_t *l2tab = NULL;
226 l1_pgentry_64_t *l1tab = NULL;
227 unsigned long l3off, l2off, l1off;
228 xen_vaddr_t addr;
229 xen_pfn_t pgpfn;
230 xen_pfn_t l3mfn = xc_dom_p2m_guest(dom, l3pfn);
232 if ( dom->parms.pae == 1 )
233 {
234 if ( l3mfn >= 0x100000 )
235 l3mfn = move_l3_below_4G(dom, l3pfn, l3mfn);
237 if ( l3mfn >= 0x100000 )
238 {
239 xc_dom_panic(XC_INTERNAL_ERROR,"%s: cannot move L3 below 4G. "
240 "extended-cr3 not supported by guest. "
241 "(L3 PFN %#"PRIpfn" MFN %#"PRIpfn")\n",
242 __FUNCTION__, l3pfn, l3mfn);
243 return -EINVAL;
244 }
245 }
247 l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
249 for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
250 addr += PAGE_SIZE_X86 )
251 {
252 if ( l2tab == NULL )
253 {
254 /* get L2 tab, make L3 entry */
255 l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
256 l3off = l3_table_offset_pae(addr);
257 l3tab[l3off] =
258 pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
259 l2pfn++;
260 }
262 if ( l1tab == NULL )
263 {
264 /* get L1 tab, make L2 entry */
265 l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
266 l2off = l2_table_offset_pae(addr);
267 l2tab[l2off] =
268 pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
269 if ( l2off == (L2_PAGETABLE_ENTRIES_PAE - 1) )
270 l2tab = NULL;
271 l1pfn++;
272 }
274 /* make L1 entry */
275 l1off = l1_table_offset_pae(addr);
276 pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
277 l1tab[l1off] =
278 pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT;
279 if ( (addr >= dom->pgtables_seg.vstart) &&
280 (addr < dom->pgtables_seg.vend) )
281 l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
282 if ( l1off == (L1_PAGETABLE_ENTRIES_PAE - 1) )
283 l1tab = NULL;
284 }
286 if ( dom->virt_pgtab_end <= 0xc0000000 )
287 {
288 xc_dom_printf("%s: PAE: extra l2 page table for l3#3\n", __FUNCTION__);
289 l3tab[3] = pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
290 }
291 return 0;
292 }
294 #undef L1_PROT
295 #undef L2_PROT
296 #undef L3_PROT
298 /* ------------------------------------------------------------------------ */
299 /* x86_64 pagetables */
301 static int count_pgtables_x86_64(struct xc_dom_image *dom)
302 {
303 return count_pgtables(dom, 0,
304 L4_PAGETABLE_SHIFT_X86_64 + 9,
305 L4_PAGETABLE_SHIFT_X86_64,
306 L3_PAGETABLE_SHIFT_X86_64,
307 L2_PAGETABLE_SHIFT_X86_64);
308 }
310 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
311 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
312 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
313 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
315 static int setup_pgtables_x86_64(struct xc_dom_image *dom)
316 {
317 xen_pfn_t l4pfn = dom->pgtables_seg.pfn;
318 xen_pfn_t l3pfn = dom->pgtables_seg.pfn + dom->pg_l4;
319 xen_pfn_t l2pfn = dom->pgtables_seg.pfn + dom->pg_l4 + dom->pg_l3;
320 xen_pfn_t l1pfn =
321 dom->pgtables_seg.pfn + dom->pg_l4 + dom->pg_l3 + dom->pg_l2;
322 l4_pgentry_64_t *l4tab = xc_dom_pfn_to_ptr(dom, l4pfn, 1);
323 l3_pgentry_64_t *l3tab = NULL;
324 l2_pgentry_64_t *l2tab = NULL;
325 l1_pgentry_64_t *l1tab = NULL;
326 uint64_t l4off, l3off, l2off, l1off;
327 uint64_t addr;
328 xen_pfn_t pgpfn;
330 for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
331 addr += PAGE_SIZE_X86 )
332 {
333 if ( l3tab == NULL )
334 {
335 /* get L3 tab, make L4 entry */
336 l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
337 l4off = l4_table_offset_x86_64(addr);
338 l4tab[l4off] =
339 pfn_to_paddr(xc_dom_p2m_guest(dom, l3pfn)) | L4_PROT;
340 l3pfn++;
341 }
343 if ( l2tab == NULL )
344 {
345 /* get L2 tab, make L3 entry */
346 l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
347 l3off = l3_table_offset_x86_64(addr);
348 l3tab[l3off] =
349 pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
350 if ( l3off == (L3_PAGETABLE_ENTRIES_X86_64 - 1) )
351 l3tab = NULL;
352 l2pfn++;
353 }
355 if ( l1tab == NULL )
356 {
357 /* get L1 tab, make L2 entry */
358 l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
359 l2off = l2_table_offset_x86_64(addr);
360 l2tab[l2off] =
361 pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
362 if ( l2off == (L2_PAGETABLE_ENTRIES_X86_64 - 1) )
363 l2tab = NULL;
364 l1pfn++;
365 }
367 /* make L1 entry */
368 l1off = l1_table_offset_x86_64(addr);
369 pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
370 l1tab[l1off] =
371 pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT;
372 if ( (addr >= dom->pgtables_seg.vstart) &&
373 (addr < dom->pgtables_seg.vend) )
374 l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
375 if ( l1off == (L1_PAGETABLE_ENTRIES_X86_64 - 1) )
376 l1tab = NULL;
377 }
378 return 0;
379 }
381 #undef L1_PROT
382 #undef L2_PROT
383 #undef L3_PROT
384 #undef L4_PROT
386 /* ------------------------------------------------------------------------ */
388 static int alloc_magic_pages(struct xc_dom_image *dom)
389 {
390 size_t p2m_size = dom->total_pages * dom->arch_hooks->sizeof_pfn;
392 /* allocate phys2mach table */
393 if ( xc_dom_alloc_segment(dom, &dom->p2m_seg, "phys2mach", 0, p2m_size) )
394 return -1;
395 dom->p2m_guest = xc_dom_seg_to_ptr(dom, &dom->p2m_seg);
397 /* allocate special pages */
398 dom->start_info_pfn = xc_dom_alloc_page(dom, "start info");
399 dom->xenstore_pfn = xc_dom_alloc_page(dom, "xenstore");
400 dom->console_pfn = xc_dom_alloc_page(dom, "console");
401 if ( xc_dom_feature_translated(dom) )
402 dom->shared_info_pfn = xc_dom_alloc_page(dom, "shared info");
403 dom->alloc_bootstack = 1;
405 return 0;
406 }
408 /* ------------------------------------------------------------------------ */
410 static int start_info_x86_32(struct xc_dom_image *dom)
411 {
412 start_info_x86_32_t *start_info =
413 xc_dom_pfn_to_ptr(dom, dom->start_info_pfn, 1);
414 xen_pfn_t shinfo =
415 xc_dom_feature_translated(dom) ? dom->shared_info_pfn : dom->
416 shared_info_mfn;
418 xc_dom_printf("%s: called\n", __FUNCTION__);
420 memset(start_info, 0, sizeof(*start_info));
421 strncpy(start_info->magic, dom->guest_type, sizeof(start_info->magic));
422 start_info->magic[sizeof(start_info->magic) - 1] = '\0';
423 start_info->nr_pages = dom->total_pages;
424 start_info->shared_info = shinfo << PAGE_SHIFT_X86;
425 start_info->pt_base = dom->pgtables_seg.vstart;
426 start_info->nr_pt_frames = dom->pgtables;
427 start_info->mfn_list = dom->p2m_seg.vstart;
429 start_info->flags = dom->flags;
430 start_info->store_mfn = xc_dom_p2m_guest(dom, dom->xenstore_pfn);
431 start_info->store_evtchn = dom->xenstore_evtchn;
432 start_info->console.domU.mfn = xc_dom_p2m_guest(dom, dom->console_pfn);
433 start_info->console.domU.evtchn = dom->console_evtchn;
435 if ( dom->ramdisk_blob )
436 {
437 start_info->mod_start = dom->ramdisk_seg.vstart;
438 start_info->mod_len = dom->ramdisk_seg.vend - dom->ramdisk_seg.vstart;
439 }
441 if ( dom->cmdline )
442 {
443 strncpy((char *)start_info->cmd_line, dom->cmdline, MAX_GUEST_CMDLINE);
444 start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = '\0';
445 }
447 return 0;
448 }
450 static int start_info_x86_64(struct xc_dom_image *dom)
451 {
452 start_info_x86_64_t *start_info =
453 xc_dom_pfn_to_ptr(dom, dom->start_info_pfn, 1);
454 xen_pfn_t shinfo =
455 xc_dom_feature_translated(dom) ? dom->shared_info_pfn : dom->
456 shared_info_mfn;
458 xc_dom_printf("%s: called\n", __FUNCTION__);
460 memset(start_info, 0, sizeof(*start_info));
461 strncpy(start_info->magic, dom->guest_type, sizeof(start_info->magic));
462 start_info->magic[sizeof(start_info->magic) - 1] = '\0';
463 start_info->nr_pages = dom->total_pages;
464 start_info->shared_info = shinfo << PAGE_SHIFT_X86;
465 start_info->pt_base = dom->pgtables_seg.vstart;
466 start_info->nr_pt_frames = dom->pgtables;
467 start_info->mfn_list = dom->p2m_seg.vstart;
469 start_info->flags = dom->flags;
470 start_info->store_mfn = xc_dom_p2m_guest(dom, dom->xenstore_pfn);
471 start_info->store_evtchn = dom->xenstore_evtchn;
472 start_info->console.domU.mfn = xc_dom_p2m_guest(dom, dom->console_pfn);
473 start_info->console.domU.evtchn = dom->console_evtchn;
475 if ( dom->ramdisk_blob )
476 {
477 start_info->mod_start = dom->ramdisk_seg.vstart;
478 start_info->mod_len = dom->ramdisk_seg.vend - dom->ramdisk_seg.vstart;
479 }
481 if ( dom->cmdline )
482 {
483 strncpy((char *)start_info->cmd_line, dom->cmdline, MAX_GUEST_CMDLINE);
484 start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = '\0';
485 }
487 return 0;
488 }
490 static int shared_info_x86_32(struct xc_dom_image *dom, void *ptr)
491 {
492 shared_info_x86_32_t *shared_info = ptr;
493 int i;
495 xc_dom_printf("%s: called\n", __FUNCTION__);
497 memset(shared_info, 0, sizeof(*shared_info));
498 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
499 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
500 return 0;
501 }
503 static int shared_info_x86_64(struct xc_dom_image *dom, void *ptr)
504 {
505 shared_info_x86_64_t *shared_info = ptr;
506 int i;
508 xc_dom_printf("%s: called\n", __FUNCTION__);
510 memset(shared_info, 0, sizeof(*shared_info));
511 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
512 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
513 return 0;
514 }
516 /* ------------------------------------------------------------------------ */
518 static int vcpu_x86_32(struct xc_dom_image *dom, void *ptr)
519 {
520 vcpu_guest_context_x86_32_t *ctxt = ptr;
521 xen_pfn_t cr3_pfn;
523 xc_dom_printf("%s: called\n", __FUNCTION__);
525 /* clear everything */
526 memset(ctxt, 0, sizeof(*ctxt));
528 ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_32;
529 ctxt->user_regs.es = FLAT_KERNEL_DS_X86_32;
530 ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_32;
531 ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_32;
532 ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_32;
533 ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_32;
534 ctxt->user_regs.eip = dom->parms.virt_entry;
535 ctxt->user_regs.esp =
536 dom->parms.virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
537 ctxt->user_regs.esi =
538 dom->parms.virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
539 ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
541 ctxt->kernel_ss = ctxt->user_regs.ss;
542 ctxt->kernel_sp = ctxt->user_regs.esp;
544 ctxt->flags = VGCF_in_kernel_X86_32 | VGCF_online_X86_32;
545 if ( dom->parms.pae == 2 /* extended_cr3 */ ||
546 dom->parms.pae == 3 /* bimodal */ )
547 ctxt->vm_assist |= (1UL << VMASST_TYPE_pae_extended_cr3);
549 cr3_pfn = xc_dom_p2m_guest(dom, dom->pgtables_seg.pfn);
550 ctxt->ctrlreg[3] = xen_pfn_to_cr3_x86_32(cr3_pfn);
551 xc_dom_printf("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "\n",
552 __FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
554 return 0;
555 }
557 static int vcpu_x86_64(struct xc_dom_image *dom, void *ptr)
558 {
559 vcpu_guest_context_x86_64_t *ctxt = ptr;
560 xen_pfn_t cr3_pfn;
562 xc_dom_printf("%s: called\n", __FUNCTION__);
564 /* clear everything */
565 memset(ctxt, 0, sizeof(*ctxt));
567 ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_64;
568 ctxt->user_regs.es = FLAT_KERNEL_DS_X86_64;
569 ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_64;
570 ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_64;
571 ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_64;
572 ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_64;
573 ctxt->user_regs.rip = dom->parms.virt_entry;
574 ctxt->user_regs.rsp =
575 dom->parms.virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
576 ctxt->user_regs.rsi =
577 dom->parms.virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
578 ctxt->user_regs.rflags = 1 << 9; /* Interrupt Enable */
580 ctxt->kernel_ss = ctxt->user_regs.ss;
581 ctxt->kernel_sp = ctxt->user_regs.esp;
583 ctxt->flags = VGCF_in_kernel_X86_64 | VGCF_online_X86_64;
584 cr3_pfn = xc_dom_p2m_guest(dom, dom->pgtables_seg.pfn);
585 ctxt->ctrlreg[3] = xen_pfn_to_cr3_x86_64(cr3_pfn);
586 xc_dom_printf("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "\n",
587 __FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
589 return 0;
590 }
592 /* ------------------------------------------------------------------------ */
594 static struct xc_dom_arch xc_dom_32 = {
595 .guest_type = "xen-3.0-x86_32",
596 .native_protocol = XEN_IO_PROTO_ABI_X86_32,
597 .page_shift = PAGE_SHIFT_X86,
598 .sizeof_pfn = 4,
599 .alloc_magic_pages = alloc_magic_pages,
600 .count_pgtables = count_pgtables_x86_32,
601 .setup_pgtables = setup_pgtables_x86_32,
602 .start_info = start_info_x86_32,
603 .shared_info = shared_info_x86_32,
604 .vcpu = vcpu_x86_32,
605 };
606 static struct xc_dom_arch xc_dom_32_pae = {
607 .guest_type = "xen-3.0-x86_32p",
608 .native_protocol = XEN_IO_PROTO_ABI_X86_32,
609 .page_shift = PAGE_SHIFT_X86,
610 .sizeof_pfn = 4,
611 .alloc_magic_pages = alloc_magic_pages,
612 .count_pgtables = count_pgtables_x86_32_pae,
613 .setup_pgtables = setup_pgtables_x86_32_pae,
614 .start_info = start_info_x86_32,
615 .shared_info = shared_info_x86_32,
616 .vcpu = vcpu_x86_32,
617 };
619 static struct xc_dom_arch xc_dom_64 = {
620 .guest_type = "xen-3.0-x86_64",
621 .native_protocol = XEN_IO_PROTO_ABI_X86_64,
622 .page_shift = PAGE_SHIFT_X86,
623 .sizeof_pfn = 8,
624 .alloc_magic_pages = alloc_magic_pages,
625 .count_pgtables = count_pgtables_x86_64,
626 .setup_pgtables = setup_pgtables_x86_64,
627 .start_info = start_info_x86_64,
628 .shared_info = shared_info_x86_64,
629 .vcpu = vcpu_x86_64,
630 };
632 static void __init register_arch_hooks(void)
633 {
634 xc_dom_register_arch_hooks(&xc_dom_32);
635 xc_dom_register_arch_hooks(&xc_dom_32_pae);
636 xc_dom_register_arch_hooks(&xc_dom_64);
637 }
639 static int x86_compat(int xc, domid_t domid, char *guest_type)
640 {
641 static const struct {
642 char *guest;
643 uint32_t size;
644 } types[] = {
645 { "xen-3.0-x86_32p", 32 },
646 { "xen-3.0-x86_64", 64 },
647 };
648 DECLARE_DOMCTL;
649 int i,rc;
651 memset(&domctl, 0, sizeof(domctl));
652 domctl.domain = domid;
653 domctl.cmd = XEN_DOMCTL_set_address_size;
654 for ( i = 0; i < sizeof(types)/sizeof(types[0]); i++ )
655 if ( !strcmp(types[i].guest, guest_type) )
656 domctl.u.address_size.size = types[i].size;
657 if ( domctl.u.address_size.size == 0 )
658 /* nothing to do */
659 return 0;
661 xc_dom_printf("%s: guest %s, address size %" PRId32 "\n", __FUNCTION__,
662 guest_type, domctl.u.address_size.size);
663 rc = do_domctl(xc, &domctl);
664 if ( rc != 0 )
665 xc_dom_printf("%s: warning: failed (rc=%d)\n",
666 __FUNCTION__, rc);
667 return rc;
668 }
671 static int x86_shadow(int xc, domid_t domid)
672 {
673 int rc, mode;
675 xc_dom_printf("%s: called\n", __FUNCTION__);
677 mode = XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT |
678 XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE;
680 rc = xc_shadow_control(xc, domid,
681 XEN_DOMCTL_SHADOW_OP_ENABLE,
682 NULL, 0, NULL, mode, NULL);
683 if ( rc != 0 )
684 {
685 xc_dom_panic(XC_INTERNAL_ERROR,
686 "%s: SHADOW_OP_ENABLE (mode=0x%x) failed (rc=%d)\n",
687 __FUNCTION__, mode, rc);
688 return rc;
689 }
690 xc_dom_printf("%s: shadow enabled (mode=0x%x)\n", __FUNCTION__, mode);
691 return rc;
692 }
694 int arch_setup_meminit(struct xc_dom_image *dom)
695 {
696 int rc;
697 xen_pfn_t pfn;
699 rc = x86_compat(dom->guest_xc, dom->guest_domid, dom->guest_type);
700 if ( rc )
701 return rc;
702 if ( xc_dom_feature_translated(dom) )
703 {
704 dom->shadow_enabled = 1;
705 rc = x86_shadow(dom->guest_xc, dom->guest_domid);
706 if ( rc )
707 return rc;
708 }
710 /* setup initial p2m */
711 dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * dom->total_pages);
712 for ( pfn = 0; pfn < dom->total_pages; pfn++ )
713 dom->p2m_host[pfn] = pfn;
715 /* allocate guest memory */
716 rc = xc_domain_memory_populate_physmap(dom->guest_xc, dom->guest_domid,
717 dom->total_pages, 0, 0,
718 dom->p2m_host);
719 return rc;
720 }
722 int arch_setup_bootearly(struct xc_dom_image *dom)
723 {
724 xc_dom_printf("%s: doing nothing\n", __FUNCTION__);
725 return 0;
726 }
728 int arch_setup_bootlate(struct xc_dom_image *dom)
729 {
730 static const struct {
731 char *guest;
732 unsigned long pgd_type;
733 } types[] = {
734 { "xen-3.0-x86_32", MMUEXT_PIN_L2_TABLE},
735 { "xen-3.0-x86_32p", MMUEXT_PIN_L3_TABLE},
736 { "xen-3.0-x86_64", MMUEXT_PIN_L4_TABLE},
737 };
738 unsigned long pgd_type = 0;
739 shared_info_t *shared_info;
740 xen_pfn_t shinfo;
741 int i, rc;
743 for ( i = 0; i < sizeof(types) / sizeof(types[0]); i++ )
744 if ( !strcmp(types[i].guest, dom->guest_type) )
745 pgd_type = types[i].pgd_type;
747 if ( !xc_dom_feature_translated(dom) )
748 {
749 /* paravirtualized guest */
750 xc_dom_unmap_one(dom, dom->pgtables_seg.pfn);
751 rc = pin_table(dom->guest_xc, pgd_type,
752 xc_dom_p2m_host(dom, dom->pgtables_seg.pfn),
753 dom->guest_domid);
754 if ( rc != 0 )
755 {
756 xc_dom_panic(XC_INTERNAL_ERROR,
757 "%s: pin_table failed (pfn 0x%" PRIpfn ", rc=%d)\n",
758 __FUNCTION__, dom->pgtables_seg.pfn, rc);
759 return rc;
760 }
761 shinfo = dom->shared_info_mfn;
762 }
763 else
764 {
765 /* paravirtualized guest with auto-translation */
766 struct xen_add_to_physmap xatp;
767 int i;
769 /* Map shared info frame into guest physmap. */
770 xatp.domid = dom->guest_domid;
771 xatp.space = XENMAPSPACE_shared_info;
772 xatp.idx = 0;
773 xatp.gpfn = dom->shared_info_pfn;
774 rc = xc_memory_op(dom->guest_xc, XENMEM_add_to_physmap, &xatp);
775 if ( rc != 0 )
776 {
777 xc_dom_panic(XC_INTERNAL_ERROR, "%s: mapping shared_info failed "
778 "(pfn=0x%" PRIpfn ", rc=%d)\n",
779 __FUNCTION__, xatp.gpfn, rc);
780 return rc;
781 }
783 /* Map grant table frames into guest physmap. */
784 for ( i = 0; ; i++ )
785 {
786 xatp.domid = dom->guest_domid;
787 xatp.space = XENMAPSPACE_grant_table;
788 xatp.idx = i;
789 xatp.gpfn = dom->total_pages + i;
790 rc = xc_memory_op(dom->guest_xc, XENMEM_add_to_physmap, &xatp);
791 if ( rc != 0 )
792 {
793 if ( (i > 0) && (errno == EINVAL) )
794 {
795 xc_dom_printf("%s: %d grant tables mapped\n", __FUNCTION__,
796 i);
797 break;
798 }
799 xc_dom_panic(XC_INTERNAL_ERROR,
800 "%s: mapping grant tables failed " "(pfn=0x%"
801 PRIpfn ", rc=%d)\n", __FUNCTION__, xatp.gpfn, rc);
802 return rc;
803 }
804 }
805 shinfo = dom->shared_info_pfn;
806 }
808 /* setup shared_info page */
809 xc_dom_printf("%s: shared_info: pfn 0x%" PRIpfn ", mfn 0x%" PRIpfn "\n",
810 __FUNCTION__, dom->shared_info_pfn, dom->shared_info_mfn);
811 shared_info = xc_map_foreign_range(dom->guest_xc, dom->guest_domid,
812 PAGE_SIZE_X86,
813 PROT_READ | PROT_WRITE,
814 shinfo);
815 if ( shared_info == NULL )
816 return -1;
817 dom->arch_hooks->shared_info(dom, shared_info);
818 munmap(shared_info, PAGE_SIZE_X86);
820 return 0;
821 }
823 /*
824 * Local variables:
825 * mode: C
826 * c-set-style: "BSD"
827 * c-basic-offset: 4
828 * tab-width: 4
829 * indent-tabs-mode: nil
830 * End:
831 */