ia64/xen-unstable

view tools/libxc/xc_dom_x86.c @ 17838:e5c9c8e6e726

tools: replace sprintf with snprintf where applicable

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 12 15:41:15 2008 +0100 (2008-06-12)
parents 9cc381efbc29
children 628b3a76dbf4
line source
1 /*
2 * Xen domain builder -- i386 and x86_64 bits.
3 *
4 * Most architecture-specific code for x86 goes here.
5 * - prepare page tables.
6 * - fill architecture-specific structs.
7 *
8 * This code is licenced under the GPL.
9 * written 2006 by Gerd Hoffmann <kraxel@suse.de>.
10 *
11 */
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <inttypes.h>
17 #include <xen/xen.h>
18 #include <xen/foreign/x86_32.h>
19 #include <xen/foreign/x86_64.h>
20 #include <xen/hvm/hvm_info_table.h>
21 #include <xen/io/protocols.h>
23 #include "xg_private.h"
24 #include "xc_dom.h"
25 #include "xenctrl.h"
27 /* ------------------------------------------------------------------------ */
29 #define bits_to_mask(bits) (((xen_vaddr_t)1 << (bits))-1)
30 #define round_down(addr, mask) ((addr) & ~(mask))
31 #define round_up(addr, mask) ((addr) | (mask))
33 static unsigned long
34 nr_page_tables(xen_vaddr_t start, xen_vaddr_t end, unsigned long bits)
35 {
36 xen_vaddr_t mask = bits_to_mask(bits);
37 int tables;
39 if ( bits == 0 )
40 return 0; /* unused */
42 if ( bits == (8 * sizeof(unsigned long)) )
43 {
44 /* must be pgd, need one */
45 start = 0;
46 end = -1;
47 tables = 1;
48 }
49 else
50 {
51 start = round_down(start, mask);
52 end = round_up(end, mask);
53 tables = ((end - start) >> bits) + 1;
54 }
56 xc_dom_printf("%s: 0x%016" PRIx64 "/%ld: 0x%016" PRIx64
57 " -> 0x%016" PRIx64 ", %d table(s)\n",
58 __FUNCTION__, mask, bits, start, end, tables);
59 return tables;
60 }
62 static int count_pgtables(struct xc_dom_image *dom, int pae,
63 int l4_bits, int l3_bits, int l2_bits, int l1_bits)
64 {
65 int pages, extra_pages;
66 xen_vaddr_t try_virt_end;
68 extra_pages = dom->alloc_bootstack ? 1 : 0;
69 extra_pages += dom->extra_pages;
70 extra_pages += 128; /* 512kB padding */
71 pages = extra_pages;
72 for ( ; ; )
73 {
74 try_virt_end = round_up(dom->virt_alloc_end + pages * PAGE_SIZE_X86,
75 bits_to_mask(22)); /* 4MB alignment */
76 dom->pg_l4 =
77 nr_page_tables(dom->parms.virt_base, try_virt_end, l4_bits);
78 dom->pg_l3 =
79 nr_page_tables(dom->parms.virt_base, try_virt_end, l3_bits);
80 dom->pg_l2 =
81 nr_page_tables(dom->parms.virt_base, try_virt_end, l2_bits);
82 dom->pg_l1 =
83 nr_page_tables(dom->parms.virt_base, try_virt_end, l1_bits);
84 if (pae && try_virt_end < 0xc0000000)
85 {
86 xc_dom_printf("%s: PAE: extra l2 page table for l3#3\n",
87 __FUNCTION__);
88 dom->pg_l2++;
89 }
90 dom->pgtables = dom->pg_l4 + dom->pg_l3 + dom->pg_l2 + dom->pg_l1;
91 pages = dom->pgtables + extra_pages;
92 if ( dom->virt_alloc_end + pages * PAGE_SIZE_X86 <= try_virt_end + 1 )
93 break;
94 }
95 dom->virt_pgtab_end = try_virt_end + 1;
96 return 0;
97 }
99 /* ------------------------------------------------------------------------ */
100 /* i386 pagetables */
102 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
103 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
104 #define L3_PROT (_PAGE_PRESENT)
106 static int count_pgtables_x86_32(struct xc_dom_image *dom)
107 {
108 return count_pgtables(dom, 0, 0, 0, 32, L2_PAGETABLE_SHIFT_I386);
109 }
111 static int count_pgtables_x86_32_pae(struct xc_dom_image *dom)
112 {
113 return count_pgtables(dom, 1, 0, 32,
114 L3_PAGETABLE_SHIFT_PAE, L2_PAGETABLE_SHIFT_PAE);
115 }
117 #define pfn_to_paddr(pfn) ((xen_paddr_t)(pfn) << PAGE_SHIFT_X86)
119 static int setup_pgtables_x86_32(struct xc_dom_image *dom)
120 {
121 xen_pfn_t l2pfn = dom->pgtables_seg.pfn;
122 xen_pfn_t l1pfn = dom->pgtables_seg.pfn + dom->pg_l2;
123 l2_pgentry_32_t *l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
124 l1_pgentry_32_t *l1tab = NULL;
125 unsigned long l2off, l1off;
126 xen_vaddr_t addr;
127 xen_pfn_t pgpfn;
129 for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
130 addr += PAGE_SIZE_X86 )
131 {
132 if ( l1tab == NULL )
133 {
134 /* get L1 tab, make L2 entry */
135 l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
136 l2off = l2_table_offset_i386(addr);
137 l2tab[l2off] =
138 pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
139 l1pfn++;
140 }
142 /* make L1 entry */
143 l1off = l1_table_offset_i386(addr);
144 pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
145 l1tab[l1off] =
146 pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT;
147 if ( (addr >= dom->pgtables_seg.vstart) &&
148 (addr < dom->pgtables_seg.vend) )
149 l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
150 if ( l1off == (L1_PAGETABLE_ENTRIES_I386 - 1) )
151 l1tab = NULL;
152 }
153 return 0;
154 }
156 /*
157 * Move the l3 page table page below 4G for guests which do not
158 * support the extended-cr3 format. The l3 is currently empty so we
159 * do not need to preserve the current contents.
160 */
161 static xen_pfn_t move_l3_below_4G(struct xc_dom_image *dom,
162 xen_pfn_t l3pfn,
163 xen_pfn_t l3mfn)
164 {
165 xen_pfn_t new_l3mfn;
166 struct xc_mmu *mmu;
167 void *l3tab;
168 int xc = dom->guest_xc;
170 mmu = xc_alloc_mmu_updates(xc, dom->guest_domid);
171 if ( mmu == NULL )
172 {
173 xc_dom_printf("%s: failed at %d\n", __FUNCTION__, __LINE__);
174 return l3mfn;
175 }
177 xc_dom_unmap_one(dom, l3pfn);
179 new_l3mfn = xc_make_page_below_4G(dom->guest_xc, dom->guest_domid, l3mfn);
180 if ( !new_l3mfn )
181 goto out;
183 dom->p2m_host[l3pfn] = new_l3mfn;
184 if ( xc_dom_update_guest_p2m(dom) != 0 )
185 goto out;
187 if ( xc_add_mmu_update(xc, mmu,
188 (((unsigned long long)new_l3mfn)
189 << XC_DOM_PAGE_SHIFT(dom)) |
190 MMU_MACHPHYS_UPDATE, l3pfn) )
191 goto out;
193 if ( xc_flush_mmu_updates(xc, mmu) )
194 goto out;
196 /*
197 * This ensures that the entire pgtables_seg is mapped by a single
198 * mmap region. arch_setup_bootlate() relies on this to be able to
199 * unmap and pin the pagetables.
200 */
201 if ( xc_dom_seg_to_ptr(dom, &dom->pgtables_seg) == NULL )
202 goto out;
204 l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
205 memset(l3tab, 0, XC_DOM_PAGE_SIZE(dom));
207 xc_dom_printf("%s: successfully relocated L3 below 4G. "
208 "(L3 PFN %#"PRIpfn" MFN %#"PRIpfn"=>%#"PRIpfn")\n",
209 __FUNCTION__, l3pfn, l3mfn, new_l3mfn);
211 l3mfn = new_l3mfn;
213 out:
214 free(mmu);
216 return l3mfn;
217 }
219 static int setup_pgtables_x86_32_pae(struct xc_dom_image *dom)
220 {
221 xen_pfn_t l3pfn = dom->pgtables_seg.pfn;
222 xen_pfn_t l2pfn = dom->pgtables_seg.pfn + dom->pg_l3;
223 xen_pfn_t l1pfn = dom->pgtables_seg.pfn + dom->pg_l3 + dom->pg_l2;
224 l3_pgentry_64_t *l3tab;
225 l2_pgentry_64_t *l2tab = NULL;
226 l1_pgentry_64_t *l1tab = NULL;
227 unsigned long l3off, l2off, l1off;
228 xen_vaddr_t addr;
229 xen_pfn_t pgpfn;
230 xen_pfn_t l3mfn = xc_dom_p2m_guest(dom, l3pfn);
232 if ( dom->parms.pae == 1 )
233 {
234 if ( l3mfn >= 0x100000 )
235 l3mfn = move_l3_below_4G(dom, l3pfn, l3mfn);
237 if ( l3mfn >= 0x100000 )
238 {
239 xc_dom_panic(XC_INTERNAL_ERROR,"%s: cannot move L3 below 4G. "
240 "extended-cr3 not supported by guest. "
241 "(L3 PFN %#"PRIpfn" MFN %#"PRIpfn")\n",
242 __FUNCTION__, l3pfn, l3mfn);
243 return -EINVAL;
244 }
245 }
247 l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
249 for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
250 addr += PAGE_SIZE_X86 )
251 {
252 if ( l2tab == NULL )
253 {
254 /* get L2 tab, make L3 entry */
255 l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
256 l3off = l3_table_offset_pae(addr);
257 l3tab[l3off] =
258 pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
259 l2pfn++;
260 }
262 if ( l1tab == NULL )
263 {
264 /* get L1 tab, make L2 entry */
265 l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
266 l2off = l2_table_offset_pae(addr);
267 l2tab[l2off] =
268 pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
269 if ( l2off == (L2_PAGETABLE_ENTRIES_PAE - 1) )
270 l2tab = NULL;
271 l1pfn++;
272 }
274 /* make L1 entry */
275 l1off = l1_table_offset_pae(addr);
276 pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
277 l1tab[l1off] =
278 pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT;
279 if ( (addr >= dom->pgtables_seg.vstart) &&
280 (addr < dom->pgtables_seg.vend) )
281 l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
282 if ( l1off == (L1_PAGETABLE_ENTRIES_PAE - 1) )
283 l1tab = NULL;
284 }
286 if ( dom->virt_pgtab_end <= 0xc0000000 )
287 {
288 xc_dom_printf("%s: PAE: extra l2 page table for l3#3\n", __FUNCTION__);
289 l3tab[3] = pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
290 }
291 return 0;
292 }
294 #undef L1_PROT
295 #undef L2_PROT
296 #undef L3_PROT
298 /* ------------------------------------------------------------------------ */
299 /* x86_64 pagetables */
301 static int count_pgtables_x86_64(struct xc_dom_image *dom)
302 {
303 return count_pgtables(dom, 0,
304 L4_PAGETABLE_SHIFT_X86_64 + 9,
305 L4_PAGETABLE_SHIFT_X86_64,
306 L3_PAGETABLE_SHIFT_X86_64,
307 L2_PAGETABLE_SHIFT_X86_64);
308 }
310 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
311 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
312 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
313 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
315 static int setup_pgtables_x86_64(struct xc_dom_image *dom)
316 {
317 xen_pfn_t l4pfn = dom->pgtables_seg.pfn;
318 xen_pfn_t l3pfn = dom->pgtables_seg.pfn + dom->pg_l4;
319 xen_pfn_t l2pfn = dom->pgtables_seg.pfn + dom->pg_l4 + dom->pg_l3;
320 xen_pfn_t l1pfn =
321 dom->pgtables_seg.pfn + dom->pg_l4 + dom->pg_l3 + dom->pg_l2;
322 l4_pgentry_64_t *l4tab = xc_dom_pfn_to_ptr(dom, l4pfn, 1);
323 l3_pgentry_64_t *l3tab = NULL;
324 l2_pgentry_64_t *l2tab = NULL;
325 l1_pgentry_64_t *l1tab = NULL;
326 uint64_t l4off, l3off, l2off, l1off;
327 uint64_t addr;
328 xen_pfn_t pgpfn;
330 for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
331 addr += PAGE_SIZE_X86 )
332 {
333 if ( l3tab == NULL )
334 {
335 /* get L3 tab, make L4 entry */
336 l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
337 l4off = l4_table_offset_x86_64(addr);
338 l4tab[l4off] =
339 pfn_to_paddr(xc_dom_p2m_guest(dom, l3pfn)) | L4_PROT;
340 l3pfn++;
341 }
343 if ( l2tab == NULL )
344 {
345 /* get L2 tab, make L3 entry */
346 l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
347 l3off = l3_table_offset_x86_64(addr);
348 l3tab[l3off] =
349 pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
350 if ( l3off == (L3_PAGETABLE_ENTRIES_X86_64 - 1) )
351 l3tab = NULL;
352 l2pfn++;
353 }
355 if ( l1tab == NULL )
356 {
357 /* get L1 tab, make L2 entry */
358 l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
359 l2off = l2_table_offset_x86_64(addr);
360 l2tab[l2off] =
361 pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
362 if ( l2off == (L2_PAGETABLE_ENTRIES_X86_64 - 1) )
363 l2tab = NULL;
364 l1pfn++;
365 }
367 /* make L1 entry */
368 l1off = l1_table_offset_x86_64(addr);
369 pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
370 l1tab[l1off] =
371 pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT;
372 if ( (addr >= dom->pgtables_seg.vstart) &&
373 (addr < dom->pgtables_seg.vend) )
374 l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
375 if ( l1off == (L1_PAGETABLE_ENTRIES_X86_64 - 1) )
376 l1tab = NULL;
377 }
378 return 0;
379 }
381 #undef L1_PROT
382 #undef L2_PROT
383 #undef L3_PROT
384 #undef L4_PROT
386 /* ------------------------------------------------------------------------ */
388 static int alloc_magic_pages(struct xc_dom_image *dom)
389 {
390 size_t p2m_size = dom->total_pages * dom->arch_hooks->sizeof_pfn;
392 /* allocate phys2mach table */
393 if ( xc_dom_alloc_segment(dom, &dom->p2m_seg, "phys2mach", 0, p2m_size) )
394 return -1;
395 dom->p2m_guest = xc_dom_seg_to_ptr(dom, &dom->p2m_seg);
397 /* allocate special pages */
398 dom->start_info_pfn = xc_dom_alloc_page(dom, "start info");
399 dom->xenstore_pfn = xc_dom_alloc_page(dom, "xenstore");
400 dom->console_pfn = xc_dom_alloc_page(dom, "console");
401 if ( xc_dom_feature_translated(dom) )
402 dom->shared_info_pfn = xc_dom_alloc_page(dom, "shared info");
403 dom->alloc_bootstack = 1;
405 return 0;
406 }
408 /* ------------------------------------------------------------------------ */
410 static int start_info_x86_32(struct xc_dom_image *dom)
411 {
412 start_info_x86_32_t *start_info =
413 xc_dom_pfn_to_ptr(dom, dom->start_info_pfn, 1);
414 xen_pfn_t shinfo =
415 xc_dom_feature_translated(dom) ? dom->shared_info_pfn : dom->
416 shared_info_mfn;
418 xc_dom_printf("%s: called\n", __FUNCTION__);
420 memset(start_info, 0, sizeof(*start_info));
421 snprintf(start_info->magic, sizeof(start_info->magic), dom->guest_type);
422 start_info->nr_pages = dom->total_pages;
423 start_info->shared_info = shinfo << PAGE_SHIFT_X86;
424 start_info->pt_base = dom->pgtables_seg.vstart;
425 start_info->nr_pt_frames = dom->pgtables;
426 start_info->mfn_list = dom->p2m_seg.vstart;
428 start_info->flags = dom->flags;
429 start_info->store_mfn = xc_dom_p2m_guest(dom, dom->xenstore_pfn);
430 start_info->store_evtchn = dom->xenstore_evtchn;
431 start_info->console.domU.mfn = xc_dom_p2m_guest(dom, dom->console_pfn);
432 start_info->console.domU.evtchn = dom->console_evtchn;
434 if ( dom->ramdisk_blob )
435 {
436 start_info->mod_start = dom->ramdisk_seg.vstart;
437 start_info->mod_len = dom->ramdisk_seg.vend - dom->ramdisk_seg.vstart;
438 }
440 if ( dom->cmdline )
441 {
442 strncpy((char *)start_info->cmd_line, dom->cmdline, MAX_GUEST_CMDLINE);
443 start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = '\0';
444 }
446 return 0;
447 }
449 static int start_info_x86_64(struct xc_dom_image *dom)
450 {
451 start_info_x86_64_t *start_info =
452 xc_dom_pfn_to_ptr(dom, dom->start_info_pfn, 1);
453 xen_pfn_t shinfo =
454 xc_dom_feature_translated(dom) ? dom->shared_info_pfn : dom->
455 shared_info_mfn;
457 xc_dom_printf("%s: called\n", __FUNCTION__);
459 memset(start_info, 0, sizeof(*start_info));
460 snprintf(start_info->magic, sizeof(start_info->magic), dom->guest_type);
461 start_info->nr_pages = dom->total_pages;
462 start_info->shared_info = shinfo << PAGE_SHIFT_X86;
463 start_info->pt_base = dom->pgtables_seg.vstart;
464 start_info->nr_pt_frames = dom->pgtables;
465 start_info->mfn_list = dom->p2m_seg.vstart;
467 start_info->flags = dom->flags;
468 start_info->store_mfn = xc_dom_p2m_guest(dom, dom->xenstore_pfn);
469 start_info->store_evtchn = dom->xenstore_evtchn;
470 start_info->console.domU.mfn = xc_dom_p2m_guest(dom, dom->console_pfn);
471 start_info->console.domU.evtchn = dom->console_evtchn;
473 if ( dom->ramdisk_blob )
474 {
475 start_info->mod_start = dom->ramdisk_seg.vstart;
476 start_info->mod_len = dom->ramdisk_seg.vend - dom->ramdisk_seg.vstart;
477 }
479 if ( dom->cmdline )
480 {
481 strncpy((char *)start_info->cmd_line, dom->cmdline, MAX_GUEST_CMDLINE);
482 start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = '\0';
483 }
485 return 0;
486 }
488 static int shared_info_x86_32(struct xc_dom_image *dom, void *ptr)
489 {
490 shared_info_x86_32_t *shared_info = ptr;
491 int i;
493 xc_dom_printf("%s: called\n", __FUNCTION__);
495 memset(shared_info, 0, sizeof(*shared_info));
496 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
497 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
498 return 0;
499 }
501 static int shared_info_x86_64(struct xc_dom_image *dom, void *ptr)
502 {
503 shared_info_x86_64_t *shared_info = ptr;
504 int i;
506 xc_dom_printf("%s: called\n", __FUNCTION__);
508 memset(shared_info, 0, sizeof(*shared_info));
509 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
510 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
511 return 0;
512 }
514 /* ------------------------------------------------------------------------ */
516 static int vcpu_x86_32(struct xc_dom_image *dom, void *ptr)
517 {
518 vcpu_guest_context_x86_32_t *ctxt = ptr;
519 xen_pfn_t cr3_pfn;
521 xc_dom_printf("%s: called\n", __FUNCTION__);
523 /* clear everything */
524 memset(ctxt, 0, sizeof(*ctxt));
526 ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_32;
527 ctxt->user_regs.es = FLAT_KERNEL_DS_X86_32;
528 ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_32;
529 ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_32;
530 ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_32;
531 ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_32;
532 ctxt->user_regs.eip = dom->parms.virt_entry;
533 ctxt->user_regs.esp =
534 dom->parms.virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
535 ctxt->user_regs.esi =
536 dom->parms.virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
537 ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
539 ctxt->kernel_ss = ctxt->user_regs.ss;
540 ctxt->kernel_sp = ctxt->user_regs.esp;
542 ctxt->flags = VGCF_in_kernel_X86_32 | VGCF_online_X86_32;
543 if ( dom->parms.pae == 2 /* extended_cr3 */ ||
544 dom->parms.pae == 3 /* bimodal */ )
545 ctxt->vm_assist |= (1UL << VMASST_TYPE_pae_extended_cr3);
547 cr3_pfn = xc_dom_p2m_guest(dom, dom->pgtables_seg.pfn);
548 ctxt->ctrlreg[3] = xen_pfn_to_cr3_x86_32(cr3_pfn);
549 xc_dom_printf("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "\n",
550 __FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
552 return 0;
553 }
555 static int vcpu_x86_64(struct xc_dom_image *dom, void *ptr)
556 {
557 vcpu_guest_context_x86_64_t *ctxt = ptr;
558 xen_pfn_t cr3_pfn;
560 xc_dom_printf("%s: called\n", __FUNCTION__);
562 /* clear everything */
563 memset(ctxt, 0, sizeof(*ctxt));
565 ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_64;
566 ctxt->user_regs.es = FLAT_KERNEL_DS_X86_64;
567 ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_64;
568 ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_64;
569 ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_64;
570 ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_64;
571 ctxt->user_regs.rip = dom->parms.virt_entry;
572 ctxt->user_regs.rsp =
573 dom->parms.virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
574 ctxt->user_regs.rsi =
575 dom->parms.virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
576 ctxt->user_regs.rflags = 1 << 9; /* Interrupt Enable */
578 ctxt->kernel_ss = ctxt->user_regs.ss;
579 ctxt->kernel_sp = ctxt->user_regs.esp;
581 ctxt->flags = VGCF_in_kernel_X86_64 | VGCF_online_X86_64;
582 cr3_pfn = xc_dom_p2m_guest(dom, dom->pgtables_seg.pfn);
583 ctxt->ctrlreg[3] = xen_pfn_to_cr3_x86_64(cr3_pfn);
584 xc_dom_printf("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "\n",
585 __FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
587 return 0;
588 }
590 /* ------------------------------------------------------------------------ */
592 static struct xc_dom_arch xc_dom_32 = {
593 .guest_type = "xen-3.0-x86_32",
594 .native_protocol = XEN_IO_PROTO_ABI_X86_32,
595 .page_shift = PAGE_SHIFT_X86,
596 .sizeof_pfn = 4,
597 .alloc_magic_pages = alloc_magic_pages,
598 .count_pgtables = count_pgtables_x86_32,
599 .setup_pgtables = setup_pgtables_x86_32,
600 .start_info = start_info_x86_32,
601 .shared_info = shared_info_x86_32,
602 .vcpu = vcpu_x86_32,
603 };
604 static struct xc_dom_arch xc_dom_32_pae = {
605 .guest_type = "xen-3.0-x86_32p",
606 .native_protocol = XEN_IO_PROTO_ABI_X86_32,
607 .page_shift = PAGE_SHIFT_X86,
608 .sizeof_pfn = 4,
609 .alloc_magic_pages = alloc_magic_pages,
610 .count_pgtables = count_pgtables_x86_32_pae,
611 .setup_pgtables = setup_pgtables_x86_32_pae,
612 .start_info = start_info_x86_32,
613 .shared_info = shared_info_x86_32,
614 .vcpu = vcpu_x86_32,
615 };
617 static struct xc_dom_arch xc_dom_64 = {
618 .guest_type = "xen-3.0-x86_64",
619 .native_protocol = XEN_IO_PROTO_ABI_X86_64,
620 .page_shift = PAGE_SHIFT_X86,
621 .sizeof_pfn = 8,
622 .alloc_magic_pages = alloc_magic_pages,
623 .count_pgtables = count_pgtables_x86_64,
624 .setup_pgtables = setup_pgtables_x86_64,
625 .start_info = start_info_x86_64,
626 .shared_info = shared_info_x86_64,
627 .vcpu = vcpu_x86_64,
628 };
630 static void __init register_arch_hooks(void)
631 {
632 xc_dom_register_arch_hooks(&xc_dom_32);
633 xc_dom_register_arch_hooks(&xc_dom_32_pae);
634 xc_dom_register_arch_hooks(&xc_dom_64);
635 }
637 static int x86_compat(int xc, domid_t domid, char *guest_type)
638 {
639 static const struct {
640 char *guest;
641 uint32_t size;
642 } types[] = {
643 { "xen-3.0-x86_32p", 32 },
644 { "xen-3.0-x86_64", 64 },
645 };
646 DECLARE_DOMCTL;
647 int i,rc;
649 memset(&domctl, 0, sizeof(domctl));
650 domctl.domain = domid;
651 domctl.cmd = XEN_DOMCTL_set_address_size;
652 for ( i = 0; i < sizeof(types)/sizeof(types[0]); i++ )
653 if ( !strcmp(types[i].guest, guest_type) )
654 domctl.u.address_size.size = types[i].size;
655 if ( domctl.u.address_size.size == 0 )
656 /* nothing to do */
657 return 0;
659 xc_dom_printf("%s: guest %s, address size %" PRId32 "\n", __FUNCTION__,
660 guest_type, domctl.u.address_size.size);
661 rc = do_domctl(xc, &domctl);
662 if ( rc != 0 )
663 xc_dom_printf("%s: warning: failed (rc=%d)\n",
664 __FUNCTION__, rc);
665 return rc;
666 }
669 static int x86_shadow(int xc, domid_t domid)
670 {
671 int rc, mode;
673 xc_dom_printf("%s: called\n", __FUNCTION__);
675 mode = XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT |
676 XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE;
678 rc = xc_shadow_control(xc, domid,
679 XEN_DOMCTL_SHADOW_OP_ENABLE,
680 NULL, 0, NULL, mode, NULL);
681 if ( rc != 0 )
682 {
683 xc_dom_panic(XC_INTERNAL_ERROR,
684 "%s: SHADOW_OP_ENABLE (mode=0x%x) failed (rc=%d)\n",
685 __FUNCTION__, mode, rc);
686 return rc;
687 }
688 xc_dom_printf("%s: shadow enabled (mode=0x%x)\n", __FUNCTION__, mode);
689 return rc;
690 }
692 int arch_setup_meminit(struct xc_dom_image *dom)
693 {
694 int rc;
695 xen_pfn_t pfn;
697 rc = x86_compat(dom->guest_xc, dom->guest_domid, dom->guest_type);
698 if ( rc )
699 return rc;
700 if ( xc_dom_feature_translated(dom) )
701 {
702 dom->shadow_enabled = 1;
703 rc = x86_shadow(dom->guest_xc, dom->guest_domid);
704 if ( rc )
705 return rc;
706 }
708 /* setup initial p2m */
709 dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * dom->total_pages);
710 for ( pfn = 0; pfn < dom->total_pages; pfn++ )
711 dom->p2m_host[pfn] = pfn;
713 /* allocate guest memory */
714 rc = xc_domain_memory_populate_physmap(dom->guest_xc, dom->guest_domid,
715 dom->total_pages, 0, 0,
716 dom->p2m_host);
717 return rc;
718 }
720 int arch_setup_bootearly(struct xc_dom_image *dom)
721 {
722 xc_dom_printf("%s: doing nothing\n", __FUNCTION__);
723 return 0;
724 }
726 int arch_setup_bootlate(struct xc_dom_image *dom)
727 {
728 static const struct {
729 char *guest;
730 unsigned long pgd_type;
731 } types[] = {
732 { "xen-3.0-x86_32", MMUEXT_PIN_L2_TABLE},
733 { "xen-3.0-x86_32p", MMUEXT_PIN_L3_TABLE},
734 { "xen-3.0-x86_64", MMUEXT_PIN_L4_TABLE},
735 };
736 unsigned long pgd_type = 0;
737 shared_info_t *shared_info;
738 xen_pfn_t shinfo;
739 int i, rc;
741 for ( i = 0; i < sizeof(types) / sizeof(types[0]); i++ )
742 if ( !strcmp(types[i].guest, dom->guest_type) )
743 pgd_type = types[i].pgd_type;
745 if ( !xc_dom_feature_translated(dom) )
746 {
747 /* paravirtualized guest */
748 xc_dom_unmap_one(dom, dom->pgtables_seg.pfn);
749 rc = pin_table(dom->guest_xc, pgd_type,
750 xc_dom_p2m_host(dom, dom->pgtables_seg.pfn),
751 dom->guest_domid);
752 if ( rc != 0 )
753 {
754 xc_dom_panic(XC_INTERNAL_ERROR,
755 "%s: pin_table failed (pfn 0x%" PRIpfn ", rc=%d)\n",
756 __FUNCTION__, dom->pgtables_seg.pfn, rc);
757 return rc;
758 }
759 shinfo = dom->shared_info_mfn;
760 }
761 else
762 {
763 /* paravirtualized guest with auto-translation */
764 struct xen_add_to_physmap xatp;
765 int i;
767 /* Map shared info frame into guest physmap. */
768 xatp.domid = dom->guest_domid;
769 xatp.space = XENMAPSPACE_shared_info;
770 xatp.idx = 0;
771 xatp.gpfn = dom->shared_info_pfn;
772 rc = xc_memory_op(dom->guest_xc, XENMEM_add_to_physmap, &xatp);
773 if ( rc != 0 )
774 {
775 xc_dom_panic(XC_INTERNAL_ERROR, "%s: mapping shared_info failed "
776 "(pfn=0x%" PRIpfn ", rc=%d)\n",
777 __FUNCTION__, xatp.gpfn, rc);
778 return rc;
779 }
781 /* Map grant table frames into guest physmap. */
782 for ( i = 0; ; i++ )
783 {
784 xatp.domid = dom->guest_domid;
785 xatp.space = XENMAPSPACE_grant_table;
786 xatp.idx = i;
787 xatp.gpfn = dom->total_pages + i;
788 rc = xc_memory_op(dom->guest_xc, XENMEM_add_to_physmap, &xatp);
789 if ( rc != 0 )
790 {
791 if ( (i > 0) && (errno == EINVAL) )
792 {
793 xc_dom_printf("%s: %d grant tables mapped\n", __FUNCTION__,
794 i);
795 break;
796 }
797 xc_dom_panic(XC_INTERNAL_ERROR,
798 "%s: mapping grant tables failed " "(pfn=0x%"
799 PRIpfn ", rc=%d)\n", __FUNCTION__, xatp.gpfn, rc);
800 return rc;
801 }
802 }
803 shinfo = dom->shared_info_pfn;
804 }
806 /* setup shared_info page */
807 xc_dom_printf("%s: shared_info: pfn 0x%" PRIpfn ", mfn 0x%" PRIpfn "\n",
808 __FUNCTION__, dom->shared_info_pfn, dom->shared_info_mfn);
809 shared_info = xc_map_foreign_range(dom->guest_xc, dom->guest_domid,
810 PAGE_SIZE_X86,
811 PROT_READ | PROT_WRITE,
812 shinfo);
813 if ( shared_info == NULL )
814 return -1;
815 dom->arch_hooks->shared_info(dom, shared_info);
816 munmap(shared_info, PAGE_SIZE_X86);
818 return 0;
819 }
821 /*
822 * Local variables:
823 * mode: C
824 * c-set-style: "BSD"
825 * c-basic-offset: 4
826 * tab-width: 4
827 * indent-tabs-mode: nil
828 * End:
829 */