ia64/xen-unstable

view tools/libxc/xc_linux_build.c @ 6489:1f46fafb7221

Fix build failure due to missing unistd.h
Signed-off-by: Michal Ostrowski <mostrows@watson.ibm.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Aug 30 13:16:05 2005 +0000 (2005-08-30)
parents 9312a3e8a6f8
children b043928b0873
line source
1 /******************************************************************************
2 * xc_linux_build.c
3 */
5 #include "xg_private.h"
6 #include <xenctrl.h>
8 #if defined(__i386__)
9 #define ELFSIZE 32
10 #endif
12 #if defined(__x86_64__) || defined(__ia64__)
13 #define ELFSIZE 64
14 #endif
17 #include "xc_elf.h"
18 #include "xc_aout9.h"
19 #include <stdlib.h>
20 #include <unistd.h>
21 #include <zlib.h>
23 #if defined(__i386__)
24 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
25 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
26 #define L3_PROT (_PAGE_PRESENT)
27 #endif
29 #if defined(__x86_64__)
30 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
31 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
32 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
33 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
34 #endif
37 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
38 #define round_pgdown(_p) ((_p)&PAGE_MASK)
40 #ifdef __ia64__
41 #define probe_aout9(image,image_size,load_funcs) 1
42 #endif
44 static int probeimageformat(char *image,
45 unsigned long image_size,
46 struct load_funcs *load_funcs)
47 {
48 if ( probe_elf(image, image_size, load_funcs) &&
49 probe_bin(image, image_size, load_funcs) &&
50 probe_aout9(image, image_size, load_funcs) )
51 {
52 ERROR( "Unrecognized image format" );
53 return -EINVAL;
54 }
56 return 0;
57 }
59 #define alloc_pt(ltab, vltab) \
60 ltab = page_array[ppt_alloc++] << PAGE_SHIFT; \
61 if (vltab != NULL) { \
62 munmap(vltab, PAGE_SIZE); \
63 } \
64 if ((vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, \
65 PROT_READ|PROT_WRITE, \
66 ltab >> PAGE_SHIFT)) == NULL) { \
67 goto error_out; \
68 } \
69 memset(vltab, 0, PAGE_SIZE);
71 #if defined(__i386__)
73 static int setup_pg_tables(int xc_handle, u32 dom,
74 vcpu_guest_context_t *ctxt,
75 unsigned long dsi_v_start,
76 unsigned long v_end,
77 unsigned long *page_array,
78 unsigned long vpt_start,
79 unsigned long vpt_end)
80 {
81 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
82 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
83 unsigned long l1tab = 0;
84 unsigned long l2tab = 0;
85 unsigned long ppt_alloc;
86 unsigned long count;
88 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
89 alloc_pt(l2tab, vl2tab);
90 vl2e = &vl2tab[l2_table_offset(dsi_v_start)];
91 ctxt->ctrlreg[3] = l2tab;
93 for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++ )
94 {
95 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
96 {
97 alloc_pt(l1tab, vl1tab);
98 vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
99 *vl2e++ = l1tab | L2_PROT;
100 }
102 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
103 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
104 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
105 *vl1e &= ~_PAGE_RW;
106 vl1e++;
107 }
108 munmap(vl1tab, PAGE_SIZE);
109 munmap(vl2tab, PAGE_SIZE);
110 return 0;
112 error_out:
113 if (vl1tab)
114 munmap(vl1tab, PAGE_SIZE);
115 if (vl2tab)
116 munmap(vl2tab, PAGE_SIZE);
117 return -1;
118 }
120 static int setup_pg_tables_pae(int xc_handle, u32 dom,
121 vcpu_guest_context_t *ctxt,
122 unsigned long dsi_v_start,
123 unsigned long v_end,
124 unsigned long *page_array,
125 unsigned long vpt_start,
126 unsigned long vpt_end)
127 {
128 l1_pgentry_64_t *vl1tab=NULL, *vl1e=NULL;
129 l2_pgentry_64_t *vl2tab=NULL, *vl2e=NULL;
130 l3_pgentry_64_t *vl3tab=NULL, *vl3e=NULL;
131 unsigned long l1tab = 0;
132 unsigned long l2tab = 0;
133 unsigned long l3tab = 0;
134 unsigned long ppt_alloc;
135 unsigned long count;
137 /* First allocate page for page dir. */
138 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
139 alloc_pt(l3tab, vl3tab);
140 vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
141 ctxt->ctrlreg[3] = l3tab;
143 for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
144 {
145 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
146 {
147 alloc_pt(l1tab, vl1tab);
149 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
150 {
151 alloc_pt(l2tab, vl2tab);
152 vl2e = &vl2tab[l2_table_offset_pae(dsi_v_start + (count<<PAGE_SHIFT))];
153 *vl3e = l2tab | L3_PROT;
154 vl3e++;
155 }
156 vl1e = &vl1tab[l1_table_offset_pae(dsi_v_start + (count<<PAGE_SHIFT))];
157 *vl2e = l1tab | L2_PROT;
158 vl2e++;
159 }
161 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
162 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
163 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
164 {
165 *vl1e &= ~_PAGE_RW;
166 }
167 vl1e++;
168 }
170 munmap(vl1tab, PAGE_SIZE);
171 munmap(vl2tab, PAGE_SIZE);
172 munmap(vl3tab, PAGE_SIZE);
173 return 0;
175 error_out:
176 if (vl1tab)
177 munmap(vl1tab, PAGE_SIZE);
178 if (vl2tab)
179 munmap(vl2tab, PAGE_SIZE);
180 if (vl3tab)
181 munmap(vl3tab, PAGE_SIZE);
182 return -1;
183 }
185 #endif
187 #if defined(__x86_64__)
189 static int setup_pg_tables_64(int xc_handle, u32 dom,
190 vcpu_guest_context_t *ctxt,
191 unsigned long dsi_v_start,
192 unsigned long v_end,
193 unsigned long *page_array,
194 unsigned long vpt_start,
195 unsigned long vpt_end)
196 {
197 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
198 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
199 l3_pgentry_t *vl3tab=NULL, *vl3e=NULL;
200 l4_pgentry_t *vl4tab=NULL, *vl4e=NULL;
201 unsigned long l2tab = 0;
202 unsigned long l1tab = 0;
203 unsigned long l3tab = 0;
204 unsigned long l4tab = 0;
205 unsigned long ppt_alloc;
206 unsigned long count;
208 /* First allocate page for page dir. */
209 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
210 alloc_pt(l4tab, vl4tab);
211 vl4e = &vl4tab[l4_table_offset(dsi_v_start)];
212 ctxt->ctrlreg[3] = l4tab;
214 for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
215 {
216 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
217 {
218 alloc_pt(l1tab, vl1tab);
220 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
221 {
222 alloc_pt(l2tab, vl2tab);
223 if ( !((unsigned long)vl3e & (PAGE_SIZE-1)) )
224 {
225 alloc_pt(l3tab, vl3tab);
226 vl3e = &vl3tab[l3_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
227 *vl4e = l3tab | L4_PROT;
228 vl4e++;
229 }
230 vl2e = &vl2tab[l2_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
231 *vl3e = l2tab | L3_PROT;
232 vl3e++;
233 }
234 vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
235 *vl2e = l1tab | L2_PROT;
236 vl2e++;
237 }
239 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
240 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
241 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
242 {
243 *vl1e &= ~_PAGE_RW;
244 }
245 vl1e++;
246 }
248 munmap(vl1tab, PAGE_SIZE);
249 munmap(vl2tab, PAGE_SIZE);
250 munmap(vl3tab, PAGE_SIZE);
251 munmap(vl4tab, PAGE_SIZE);
252 return 0;
254 error_out:
255 if (vl1tab)
256 munmap(vl1tab, PAGE_SIZE);
257 if (vl2tab)
258 munmap(vl2tab, PAGE_SIZE);
259 if (vl3tab)
260 munmap(vl3tab, PAGE_SIZE);
261 if (vl4tab)
262 munmap(vl4tab, PAGE_SIZE);
263 return -1;
264 }
265 #endif
267 #ifdef __ia64__
268 #include <asm/fpu.h> /* for FPSR_DEFAULT */
269 static int setup_guest(int xc_handle,
270 u32 dom,
271 char *image, unsigned long image_size,
272 gzFile initrd_gfd, unsigned long initrd_len,
273 unsigned long nr_pages,
274 unsigned long *pvsi, unsigned long *pvke,
275 unsigned long *pvss, vcpu_guest_context_t *ctxt,
276 const char *cmdline,
277 unsigned long shared_info_frame,
278 unsigned int control_evtchn,
279 unsigned long flags,
280 unsigned int vcpus,
281 unsigned int store_evtchn, unsigned long *store_mfn)
282 {
283 unsigned long *page_array = NULL;
284 struct load_funcs load_funcs;
285 struct domain_setup_info dsi;
286 unsigned long start_page;
287 int rc;
289 rc = probeimageformat(image, image_size, &load_funcs);
290 if ( rc != 0 )
291 goto error_out;
293 memset(&dsi, 0, sizeof(struct domain_setup_info));
295 rc = (load_funcs.parseimage)(image, image_size, &dsi);
296 if ( rc != 0 )
297 goto error_out;
299 dsi.v_start = round_pgdown(dsi.v_start);
300 dsi.v_end = round_pgup(dsi.v_end);
302 start_page = dsi.v_start >> PAGE_SHIFT;
303 nr_pages = (dsi.v_end - dsi.v_start) >> PAGE_SHIFT;
304 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
305 {
306 PERROR("Could not allocate memory");
307 goto error_out;
308 }
310 if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, start_page, nr_pages) != nr_pages )
311 {
312 PERROR("Could not get the page frame list");
313 goto error_out;
314 }
316 (load_funcs.loadimage)(image, image_size, xc_handle, dom, page_array,
317 &dsi);
319 *pvke = dsi.v_kernentry;
320 return 0;
322 error_out:
323 free(page_array);
324 return -1;
325 }
326 #else /* x86 */
327 static int setup_guest(int xc_handle,
328 u32 dom,
329 char *image, unsigned long image_size,
330 gzFile initrd_gfd, unsigned long initrd_len,
331 unsigned long nr_pages,
332 unsigned long *pvsi, unsigned long *pvke,
333 unsigned long *pvss, vcpu_guest_context_t *ctxt,
334 const char *cmdline,
335 unsigned long shared_info_frame,
336 unsigned int control_evtchn,
337 unsigned long flags,
338 unsigned int vcpus,
339 unsigned int store_evtchn, unsigned long *store_mfn)
340 {
341 unsigned long *page_array = NULL;
342 unsigned long count, i;
343 start_info_t *start_info;
344 shared_info_t *shared_info;
345 xc_mmu_t *mmu = NULL;
346 int rc;
348 unsigned long nr_pt_pages;
349 unsigned long physmap_pfn;
350 u32 *physmap, *physmap_e;
352 struct load_funcs load_funcs;
353 struct domain_setup_info dsi;
354 unsigned long vinitrd_start;
355 unsigned long vinitrd_end;
356 unsigned long vphysmap_start;
357 unsigned long vphysmap_end;
358 unsigned long vstartinfo_start;
359 unsigned long vstartinfo_end;
360 unsigned long vstoreinfo_start;
361 unsigned long vstoreinfo_end;
362 unsigned long vstack_start;
363 unsigned long vstack_end;
364 unsigned long vpt_start;
365 unsigned long vpt_end;
366 unsigned long v_end;
368 rc = probeimageformat(image, image_size, &load_funcs);
369 if ( rc != 0 )
370 goto error_out;
372 memset(&dsi, 0, sizeof(struct domain_setup_info));
374 rc = (load_funcs.parseimage)(image, image_size, &dsi);
375 if ( rc != 0 )
376 goto error_out;
378 if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
379 {
380 PERROR("Guest OS must load to a page boundary.\n");
381 goto error_out;
382 }
384 /*
385 * Why do we need this? The number of page-table frames depends on the
386 * size of the bootstrap address space. But the size of the address space
387 * depends on the number of page-table frames (since each one is mapped
388 * read-only). We have a pair of simultaneous equations in two unknowns,
389 * which we solve by exhaustive search.
390 */
391 vinitrd_start = round_pgup(dsi.v_end);
392 vinitrd_end = vinitrd_start + initrd_len;
393 vphysmap_start = round_pgup(vinitrd_end);
394 vphysmap_end = vphysmap_start + (nr_pages * sizeof(unsigned long));
395 vstoreinfo_start = round_pgup(vphysmap_end);
396 vstoreinfo_end = vstoreinfo_start + PAGE_SIZE;
397 vpt_start = vstoreinfo_end;
399 for ( nr_pt_pages = 2; ; nr_pt_pages++ )
400 {
401 vpt_end = vpt_start + (nr_pt_pages * PAGE_SIZE);
402 vstartinfo_start = vpt_end;
403 vstartinfo_end = vstartinfo_start + PAGE_SIZE;
404 vstack_start = vstartinfo_end;
405 vstack_end = vstack_start + PAGE_SIZE;
406 v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
407 if ( (v_end - vstack_end) < (512UL << 10) )
408 v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
409 #if defined(__i386__)
410 if (dsi.pae_kernel) {
411 /* FIXME: assumes one L2 pgtable @ 0xc0000000 */
412 if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT_PAE)-1)) >>
413 L2_PAGETABLE_SHIFT_PAE) + 2) <= nr_pt_pages )
414 break;
415 } else {
416 if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >>
417 L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
418 break;
419 }
420 #endif
421 #if defined(__x86_64__)
422 #define NR(_l,_h,_s) \
423 (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
424 ((_l) & ~((1UL<<(_s))-1))) >> (_s))
425 if ( (1 + /* # L4 */
426 NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
427 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
428 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
429 <= nr_pt_pages )
430 break;
431 #endif
432 }
434 #define _p(a) ((void *) (a))
436 printf("VIRTUAL MEMORY ARRANGEMENT:\n"
437 " Loaded kernel: %p->%p\n"
438 " Init. ramdisk: %p->%p\n"
439 " Phys-Mach map: %p->%p\n"
440 " Store page: %p->%p\n"
441 " Page tables: %p->%p\n"
442 " Start info: %p->%p\n"
443 " Boot stack: %p->%p\n"
444 " TOTAL: %p->%p\n",
445 _p(dsi.v_kernstart), _p(dsi.v_kernend),
446 _p(vinitrd_start), _p(vinitrd_end),
447 _p(vphysmap_start), _p(vphysmap_end),
448 _p(vstoreinfo_start), _p(vstoreinfo_end),
449 _p(vpt_start), _p(vpt_end),
450 _p(vstartinfo_start), _p(vstartinfo_end),
451 _p(vstack_start), _p(vstack_end),
452 _p(dsi.v_start), _p(v_end));
453 printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
455 if ( (v_end - dsi.v_start) > (nr_pages * PAGE_SIZE) )
456 {
457 printf("Initial guest OS requires too much space\n"
458 "(%luMB is greater than %luMB limit)\n",
459 (v_end-dsi.v_start)>>20, (nr_pages<<PAGE_SHIFT)>>20);
460 goto error_out;
461 }
463 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
464 {
465 PERROR("Could not allocate memory");
466 goto error_out;
467 }
469 if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
470 {
471 PERROR("Could not get the page frame list");
472 goto error_out;
473 }
475 (load_funcs.loadimage)(image, image_size, xc_handle, dom, page_array,
476 &dsi);
478 /* Load the initial ramdisk image. */
479 if ( initrd_len != 0 )
480 {
481 for ( i = (vinitrd_start - dsi.v_start);
482 i < (vinitrd_end - dsi.v_start); i += PAGE_SIZE )
483 {
484 char page[PAGE_SIZE];
485 if ( gzread(initrd_gfd, page, PAGE_SIZE) == -1 )
486 {
487 PERROR("Error reading initrd image, could not");
488 goto error_out;
489 }
490 xc_copy_to_domain_page(xc_handle, dom,
491 page_array[i>>PAGE_SHIFT], page);
492 }
493 }
495 if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
496 goto error_out;
498 /* setup page tables */
499 #if defined(__i386__)
500 if (dsi.pae_kernel)
501 rc = setup_pg_tables_pae(xc_handle, dom, ctxt,
502 dsi.v_start, v_end,
503 page_array, vpt_start, vpt_end);
504 else {
505 rc = setup_pg_tables(xc_handle, dom, ctxt,
506 dsi.v_start, v_end,
507 page_array, vpt_start, vpt_end);
508 }
509 #endif
510 #if defined(__x86_64__)
511 rc = setup_pg_tables_64(xc_handle, dom, ctxt,
512 dsi.v_start, v_end,
513 page_array, vpt_start, vpt_end);
514 #endif
515 if (0 != rc)
516 goto error_out;
518 /* Write the phys->machine and machine->phys table entries. */
519 physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
520 physmap = physmap_e = xc_map_foreign_range(
521 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
522 page_array[physmap_pfn++]);
523 for ( count = 0; count < nr_pages; count++ )
524 {
525 if ( xc_add_mmu_update(xc_handle, mmu,
526 (page_array[count] << PAGE_SHIFT) |
527 MMU_MACHPHYS_UPDATE, count) )
528 {
529 munmap(physmap, PAGE_SIZE);
530 goto error_out;
531 }
532 *physmap_e++ = page_array[count];
533 if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
534 {
535 munmap(physmap, PAGE_SIZE);
536 physmap = physmap_e = xc_map_foreign_range(
537 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
538 page_array[physmap_pfn++]);
539 }
540 }
541 munmap(physmap, PAGE_SIZE);
543 #if defined(__i386__)
544 /*
545 * Pin down l2tab addr as page dir page - causes hypervisor to provide
546 * correct protection for the page
547 */
548 if (dsi.pae_kernel) {
549 if ( pin_table(xc_handle, MMUEXT_PIN_L3_TABLE,
550 ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
551 goto error_out;
552 } else {
553 if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE,
554 ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
555 goto error_out;
556 }
557 #endif
559 #if defined(__x86_64__)
560 /*
561 * Pin down l4tab addr as page dir page - causes hypervisor to provide
562 * correct protection for the page
563 */
564 if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE,
565 ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
566 goto error_out;
567 #endif
569 *store_mfn = page_array[(vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT];
571 start_info = xc_map_foreign_range(
572 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
573 page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
574 memset(start_info, 0, sizeof(*start_info));
575 start_info->nr_pages = nr_pages;
576 start_info->shared_info = shared_info_frame << PAGE_SHIFT;
577 start_info->flags = flags;
578 start_info->pt_base = vpt_start;
579 start_info->nr_pt_frames = nr_pt_pages;
580 start_info->mfn_list = vphysmap_start;
581 start_info->domain_controller_evtchn = control_evtchn;
582 start_info->store_mfn = *store_mfn;
583 start_info->store_evtchn = store_evtchn;
584 if ( initrd_len != 0 )
585 {
586 start_info->mod_start = vinitrd_start;
587 start_info->mod_len = initrd_len;
588 }
589 strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
590 start_info->cmd_line[MAX_GUEST_CMDLINE-1] = '\0';
591 munmap(start_info, PAGE_SIZE);
593 /* shared_info page starts its life empty. */
594 shared_info = xc_map_foreign_range(
595 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
596 memset(shared_info, 0, sizeof(shared_info_t));
597 /* Mask all upcalls... */
598 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
599 shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
601 shared_info->n_vcpu = vcpus;
602 printf(" VCPUS: %d\n", shared_info->n_vcpu);
604 munmap(shared_info, PAGE_SIZE);
606 /* Send the page update requests down to the hypervisor. */
607 if ( xc_finish_mmu_updates(xc_handle, mmu) )
608 goto error_out;
610 free(mmu);
611 free(page_array);
613 *pvsi = vstartinfo_start;
614 *pvss = vstack_start;
615 *pvke = dsi.v_kernentry;
617 return 0;
619 error_out:
620 free(mmu);
621 free(page_array);
622 return -1;
623 }
624 #endif
626 int xc_linux_build(int xc_handle,
627 u32 domid,
628 const char *image_name,
629 const char *ramdisk_name,
630 const char *cmdline,
631 unsigned int control_evtchn,
632 unsigned long flags,
633 unsigned int vcpus,
634 unsigned int store_evtchn,
635 unsigned long *store_mfn)
636 {
637 dom0_op_t launch_op, op;
638 int initrd_fd = -1;
639 gzFile initrd_gfd = NULL;
640 int rc, i;
641 vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
642 unsigned long nr_pages;
643 char *image = NULL;
644 unsigned long image_size, initrd_size=0;
645 unsigned long vstartinfo_start, vkern_entry, vstack_start;
647 if ( (nr_pages = xc_get_tot_pages(xc_handle, domid)) < 0 )
648 {
649 PERROR("Could not find total pages for domain");
650 goto error_out;
651 }
653 if ( (image = xc_read_kernel_image(image_name, &image_size)) == NULL )
654 goto error_out;
656 if ( (ramdisk_name != NULL) && (strlen(ramdisk_name) != 0) )
657 {
658 if ( (initrd_fd = open(ramdisk_name, O_RDONLY)) < 0 )
659 {
660 PERROR("Could not open the initial ramdisk image");
661 goto error_out;
662 }
664 initrd_size = xc_get_filesz(initrd_fd);
666 if ( (initrd_gfd = gzdopen(initrd_fd, "rb")) == NULL )
667 {
668 PERROR("Could not allocate decompression state for initrd");
669 goto error_out;
670 }
671 }
673 if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
674 {
675 PERROR("xc_linux_build: ctxt mlock failed");
676 return 1;
677 }
679 op.cmd = DOM0_GETDOMAININFO;
680 op.u.getdomaininfo.domain = (domid_t)domid;
681 if ( (xc_dom0_op(xc_handle, &op) < 0) ||
682 ((u16)op.u.getdomaininfo.domain != domid) )
683 {
684 PERROR("Could not get info on domain");
685 goto error_out;
686 }
688 if ( xc_domain_get_vcpu_context(xc_handle, domid, 0, ctxt) )
689 {
690 PERROR("Could not get vcpu context");
691 goto error_out;
692 }
694 if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) ||
695 #ifdef __ia64__
696 0 )
697 #else
698 (ctxt->ctrlreg[3] != 0) )
699 #endif
700 {
701 ERROR("Domain is already constructed");
702 goto error_out;
703 }
705 if ( setup_guest(xc_handle, domid, image, image_size,
706 initrd_gfd, initrd_size, nr_pages,
707 &vstartinfo_start, &vkern_entry,
708 &vstack_start, ctxt, cmdline,
709 op.u.getdomaininfo.shared_info_frame,
710 control_evtchn, flags, vcpus,
711 store_evtchn, store_mfn) < 0 )
712 {
713 ERROR("Error constructing guest OS");
714 goto error_out;
715 }
717 if ( initrd_fd >= 0 )
718 close(initrd_fd);
719 if ( initrd_gfd )
720 gzclose(initrd_gfd);
721 free(image);
723 #ifdef __ia64__
724 /* based on new_thread in xen/arch/ia64/domain.c */
725 ctxt->regs.cr_ipsr = 0; /* all necessary bits filled by hypervisor */
726 ctxt->regs.cr_iip = vkern_entry;
727 ctxt->regs.cr_ifs = 1UL << 63;
728 ctxt->regs.ar_fpsr = FPSR_DEFAULT;
729 /* ctxt->regs.r28 = dom_fw_setup(); currently done by hypervisor, should move here */
730 ctxt->vcpu.privregs = 0;
731 ctxt->shared.domain_controller_evtchn = control_evtchn;
732 ctxt->shared.flags = flags;
733 i = 0; /* silence unused variable warning */
734 #else /* x86 */
735 /*
736 * Initial register values:
737 * DS,ES,FS,GS = FLAT_KERNEL_DS
738 * CS:EIP = FLAT_KERNEL_CS:start_pc
739 * SS:ESP = FLAT_KERNEL_DS:start_stack
740 * ESI = start_info
741 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
742 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
743 */
744 ctxt->user_regs.ds = FLAT_KERNEL_DS;
745 ctxt->user_regs.es = FLAT_KERNEL_DS;
746 ctxt->user_regs.fs = FLAT_KERNEL_DS;
747 ctxt->user_regs.gs = FLAT_KERNEL_DS;
748 ctxt->user_regs.ss = FLAT_KERNEL_SS;
749 ctxt->user_regs.cs = FLAT_KERNEL_CS;
750 ctxt->user_regs.eip = vkern_entry;
751 ctxt->user_regs.esp = vstack_start + PAGE_SIZE;
752 ctxt->user_regs.esi = vstartinfo_start;
753 ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
755 /* FPU is set up to default initial state. */
756 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
758 /* Virtual IDT is empty at start-of-day. */
759 for ( i = 0; i < 256; i++ )
760 {
761 ctxt->trap_ctxt[i].vector = i;
762 ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
763 }
765 /* No LDT. */
766 ctxt->ldt_ents = 0;
768 /* Use the default Xen-provided GDT. */
769 ctxt->gdt_ents = 0;
771 /* Ring 1 stack is the initial stack. */
772 ctxt->kernel_ss = FLAT_KERNEL_SS;
773 ctxt->kernel_sp = vstack_start + PAGE_SIZE;
775 /* No debugging. */
776 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
778 /* No callback handlers. */
779 #if defined(__i386__)
780 ctxt->event_callback_cs = FLAT_KERNEL_CS;
781 ctxt->event_callback_eip = 0;
782 ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
783 ctxt->failsafe_callback_eip = 0;
784 #elif defined(__x86_64__)
785 ctxt->event_callback_eip = 0;
786 ctxt->failsafe_callback_eip = 0;
787 ctxt->syscall_callback_eip = 0;
788 #endif
789 #endif /* x86 */
791 memset( &launch_op, 0, sizeof(launch_op) );
793 launch_op.u.setdomaininfo.domain = (domid_t)domid;
794 launch_op.u.setdomaininfo.vcpu = 0;
795 launch_op.u.setdomaininfo.ctxt = ctxt;
797 launch_op.cmd = DOM0_SETDOMAININFO;
798 rc = xc_dom0_op(xc_handle, &launch_op);
800 return rc;
802 error_out:
803 if ( initrd_gfd != NULL )
804 gzclose(initrd_gfd);
805 else if ( initrd_fd >= 0 )
806 close(initrd_fd);
807 free(image);
809 return -1;
810 }