direct-io.hg

view tools/libxc/xc_linux_build.c @ 12005:cf05aabe6e65

[LIBXC][IA64] fix build warning

With warnings becoming errors, this fixes the libxc build on ia64

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Wed Oct 25 15:29:00 2006 -0600 (2006-10-25)
parents 21905d2497d6
children 4a320d26fc24
line source
1 /******************************************************************************
2 * xc_linux_build.c
3 */
5 #include <stddef.h>
6 #include "xg_private.h"
7 #include "xc_private.h"
8 #include <xenctrl.h>
10 #include "xc_elf.h"
11 #include <stdlib.h>
12 #include <unistd.h>
13 #include <inttypes.h>
14 #include <zlib.h>
16 /* Handy for printing out '0' prepended values at native pointer size */
17 #define _p(a) ((void *) ((ulong)a))
19 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
20 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
21 #if defined(__i386__)
22 #define L3_PROT (_PAGE_PRESENT)
23 #elif defined(__x86_64__)
24 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
25 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
26 #endif
28 #ifdef __ia64__
29 #define get_tot_pages xc_get_max_pages
30 #else
31 #define get_tot_pages xc_get_tot_pages
32 #endif
34 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
35 #define round_pgdown(_p) ((_p)&PAGE_MASK)
37 struct initrd_info {
38 enum { INITRD_none, INITRD_file, INITRD_mem } type;
39 unsigned long len;
40 union {
41 gzFile file_handle;
42 char *mem_addr;
43 } u;
44 };
46 static const char *feature_names[XENFEAT_NR_SUBMAPS*32] = {
47 [XENFEAT_writable_page_tables] = "writable_page_tables",
48 [XENFEAT_writable_descriptor_tables] = "writable_descriptor_tables",
49 [XENFEAT_auto_translated_physmap] = "auto_translated_physmap",
50 [XENFEAT_supervisor_mode_kernel] = "supervisor_mode_kernel",
51 [XENFEAT_pae_pgdir_above_4gb] = "pae_pgdir_above_4gb"
52 };
54 static inline void set_feature_bit (int nr, uint32_t *addr)
55 {
56 addr[nr>>5] |= (1<<(nr&31));
57 }
59 static inline int test_feature_bit(int nr, uint32_t *addr)
60 {
61 return !!(addr[nr>>5] & (1<<(nr&31)));
62 }
64 static int parse_features(
65 const char *feats,
66 uint32_t supported[XENFEAT_NR_SUBMAPS],
67 uint32_t required[XENFEAT_NR_SUBMAPS])
68 {
69 const char *end, *p;
70 int i, req;
72 if ( (end = strchr(feats, ',')) == NULL )
73 end = feats + strlen(feats);
75 while ( feats < end )
76 {
77 p = strchr(feats, '|');
78 if ( (p == NULL) || (p > end) )
79 p = end;
81 req = (*feats == '!');
82 if ( req )
83 feats++;
85 for ( i = 0; i < XENFEAT_NR_SUBMAPS*32; i++ )
86 {
87 if ( feature_names[i] == NULL )
88 continue;
90 if ( strncmp(feature_names[i], feats, p-feats) == 0 )
91 {
92 set_feature_bit(i, supported);
93 if ( required && req )
94 set_feature_bit(i, required);
95 break;
96 }
97 }
99 if ( i == XENFEAT_NR_SUBMAPS*32 )
100 {
101 ERROR("Unknown feature \"%.*s\".", (int)(p-feats), feats);
102 if ( req )
103 {
104 ERROR("Kernel requires an unknown hypervisor feature.");
105 return -EINVAL;
106 }
107 }
109 feats = p;
110 if ( *feats == '|' )
111 feats++;
112 }
114 return -EINVAL;
115 }
117 static int probeimageformat(const char *image,
118 unsigned long image_size,
119 struct load_funcs *load_funcs)
120 {
121 if ( probe_elf(image, image_size, load_funcs) &&
122 probe_bin(image, image_size, load_funcs) )
123 {
124 ERROR( "Unrecognized image format" );
125 return -EINVAL;
126 }
128 return 0;
129 }
131 static int load_initrd(int xc_handle, domid_t dom,
132 struct initrd_info *initrd,
133 unsigned long physbase,
134 xen_pfn_t *phys_to_mach)
135 {
136 char page[PAGE_SIZE];
137 unsigned long pfn_start, pfn, nr_pages;
139 if ( initrd->type == INITRD_none )
140 return 0;
142 pfn_start = physbase >> PAGE_SHIFT;
143 nr_pages = (initrd->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
145 for ( pfn = pfn_start; pfn < (pfn_start + nr_pages); pfn++ )
146 {
147 if ( initrd->type == INITRD_mem )
148 {
149 xc_copy_to_domain_page(
150 xc_handle, dom, phys_to_mach[pfn],
151 &initrd->u.mem_addr[(pfn - pfn_start) << PAGE_SHIFT]);
152 }
153 else
154 {
155 if ( gzread(initrd->u.file_handle, page, PAGE_SIZE) == -1 )
156 {
157 PERROR("Error reading initrd image, could not");
158 return -EINVAL;
159 }
160 xc_copy_to_domain_page(xc_handle, dom, phys_to_mach[pfn], page);
161 }
162 }
164 return 0;
165 }
167 #define alloc_pt(ltab, vltab, pltab) \
168 do { \
169 pltab = ppt_alloc++; \
170 ltab = (uint64_t)page_array[pltab] << PAGE_SHIFT; \
171 pltab <<= PAGE_SHIFT; \
172 if ( vltab != NULL ) \
173 munmap(vltab, PAGE_SIZE); \
174 if ( (vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, \
175 PROT_READ|PROT_WRITE, \
176 ltab >> PAGE_SHIFT)) == NULL ) \
177 goto error_out; \
178 memset(vltab, 0x0, PAGE_SIZE); \
179 } while ( 0 )
181 #if defined(__i386__)
183 static int setup_pg_tables(int xc_handle, uint32_t dom,
184 vcpu_guest_context_t *ctxt,
185 unsigned long dsi_v_start,
186 unsigned long v_end,
187 xen_pfn_t *page_array,
188 unsigned long vpt_start,
189 unsigned long vpt_end,
190 unsigned shadow_mode_enabled)
191 {
192 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
193 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
194 unsigned long l1tab = 0, pl1tab;
195 unsigned long l2tab = 0, pl2tab;
196 unsigned long ppt_alloc;
197 unsigned long count;
199 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
200 alloc_pt(l2tab, vl2tab, pl2tab);
201 vl2e = &vl2tab[l2_table_offset(dsi_v_start)];
202 if (shadow_mode_enabled)
203 ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl2tab >> PAGE_SHIFT);
204 else
205 ctxt->ctrlreg[3] = xen_pfn_to_cr3(l2tab >> PAGE_SHIFT);
207 for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++ )
208 {
209 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
210 {
211 alloc_pt(l1tab, vl1tab, pl1tab);
212 vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
213 if (shadow_mode_enabled)
214 *vl2e = pl1tab | L2_PROT;
215 else
216 *vl2e = l1tab | L2_PROT;
217 vl2e++;
218 }
220 if ( shadow_mode_enabled )
221 {
222 *vl1e = (count << PAGE_SHIFT) | L1_PROT;
223 }
224 else
225 {
226 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
227 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
228 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
229 *vl1e &= ~_PAGE_RW;
230 }
231 vl1e++;
232 }
233 munmap(vl1tab, PAGE_SIZE);
234 munmap(vl2tab, PAGE_SIZE);
235 return 0;
237 error_out:
238 if (vl1tab)
239 munmap(vl1tab, PAGE_SIZE);
240 if (vl2tab)
241 munmap(vl2tab, PAGE_SIZE);
242 return -1;
243 }
245 static int setup_pg_tables_pae(int xc_handle, uint32_t dom,
246 vcpu_guest_context_t *ctxt,
247 unsigned long dsi_v_start,
248 unsigned long v_end,
249 xen_pfn_t *page_array,
250 unsigned long vpt_start,
251 unsigned long vpt_end,
252 unsigned shadow_mode_enabled,
253 unsigned pae_mode)
254 {
255 l1_pgentry_64_t *vl1tab = NULL, *vl1e = NULL;
256 l2_pgentry_64_t *vl2tab = NULL, *vl2e = NULL;
257 l3_pgentry_64_t *vl3tab = NULL, *vl3e = NULL;
258 uint64_t l1tab, l2tab, l3tab, pl1tab, pl2tab, pl3tab;
259 unsigned long ppt_alloc, count, nmfn;
261 /* First allocate page for page dir. */
262 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
264 if ( pae_mode == PAEKERN_extended_cr3 )
265 {
266 ctxt->vm_assist |= (1UL << VMASST_TYPE_pae_extended_cr3);
267 }
268 else if ( page_array[ppt_alloc] > 0xfffff )
269 {
270 nmfn = xc_make_page_below_4G(xc_handle, dom, page_array[ppt_alloc]);
271 if ( nmfn == 0 )
272 {
273 DPRINTF("Couldn't get a page below 4GB :-(\n");
274 goto error_out;
275 }
276 page_array[ppt_alloc] = nmfn;
277 }
279 alloc_pt(l3tab, vl3tab, pl3tab);
280 vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
281 if (shadow_mode_enabled)
282 ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl3tab >> PAGE_SHIFT);
283 else
284 ctxt->ctrlreg[3] = xen_pfn_to_cr3(l3tab >> PAGE_SHIFT);
286 for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++)
287 {
288 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
289 {
290 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
291 {
292 alloc_pt(l2tab, vl2tab, pl2tab);
293 vl2e = &vl2tab[l2_table_offset_pae(
294 dsi_v_start + (count << PAGE_SHIFT))];
295 if (shadow_mode_enabled)
296 *vl3e = pl2tab | L3_PROT;
297 else
298 *vl3e++ = l2tab | L3_PROT;
299 }
301 alloc_pt(l1tab, vl1tab, pl1tab);
302 vl1e = &vl1tab[l1_table_offset_pae(
303 dsi_v_start + (count << PAGE_SHIFT))];
304 if (shadow_mode_enabled)
305 *vl2e = pl1tab | L2_PROT;
306 else
307 *vl2e++ = l1tab | L2_PROT;
308 }
310 if ( shadow_mode_enabled )
311 {
312 *vl1e = (count << PAGE_SHIFT) | L1_PROT;
313 }
314 else
315 {
316 *vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
317 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
318 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
319 *vl1e &= ~_PAGE_RW;
320 }
321 vl1e++;
322 }
324 /* Xen requires a mid-level pgdir mapping 0xC0000000 region. */
325 if ( (vl3tab[3] & _PAGE_PRESENT) == 0 )
326 {
327 alloc_pt(l2tab, vl2tab, pl2tab);
328 vl3tab[3] = l2tab | L3_PROT;
329 }
331 munmap(vl1tab, PAGE_SIZE);
332 munmap(vl2tab, PAGE_SIZE);
333 munmap(vl3tab, PAGE_SIZE);
334 return 0;
336 error_out:
337 if (vl1tab)
338 munmap(vl1tab, PAGE_SIZE);
339 if (vl2tab)
340 munmap(vl2tab, PAGE_SIZE);
341 if (vl3tab)
342 munmap(vl3tab, PAGE_SIZE);
343 return -1;
344 }
346 #endif
348 #if defined(__x86_64__)
350 static int setup_pg_tables_64(int xc_handle, uint32_t dom,
351 vcpu_guest_context_t *ctxt,
352 unsigned long dsi_v_start,
353 unsigned long v_end,
354 xen_pfn_t *page_array,
355 unsigned long vpt_start,
356 unsigned long vpt_end,
357 int shadow_mode_enabled)
358 {
359 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
360 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
361 l3_pgentry_t *vl3tab=NULL, *vl3e=NULL;
362 l4_pgentry_t *vl4tab=NULL, *vl4e=NULL;
363 unsigned long l2tab = 0, pl2tab;
364 unsigned long l1tab = 0, pl1tab;
365 unsigned long l3tab = 0, pl3tab;
366 unsigned long l4tab = 0, pl4tab;
367 unsigned long ppt_alloc;
368 unsigned long count;
370 /* First allocate page for page dir. */
371 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
372 alloc_pt(l4tab, vl4tab, pl4tab);
373 vl4e = &vl4tab[l4_table_offset(dsi_v_start)];
374 if (shadow_mode_enabled)
375 ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl4tab >> PAGE_SHIFT);
376 else
377 ctxt->ctrlreg[3] = xen_pfn_to_cr3(l4tab >> PAGE_SHIFT);
379 for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
380 {
381 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
382 {
383 alloc_pt(l1tab, vl1tab, pl1tab);
385 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
386 {
387 alloc_pt(l2tab, vl2tab, pl2tab);
388 if ( !((unsigned long)vl3e & (PAGE_SIZE-1)) )
389 {
390 alloc_pt(l3tab, vl3tab, pl3tab);
391 vl3e = &vl3tab[l3_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
392 if (shadow_mode_enabled)
393 *vl4e = pl3tab | L4_PROT;
394 else
395 *vl4e = l3tab | L4_PROT;
396 vl4e++;
397 }
398 vl2e = &vl2tab[l2_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
399 if (shadow_mode_enabled)
400 *vl3e = pl2tab | L3_PROT;
401 else
402 *vl3e = l2tab | L3_PROT;
403 vl3e++;
404 }
405 vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
406 if (shadow_mode_enabled)
407 *vl2e = pl1tab | L2_PROT;
408 else
409 *vl2e = l1tab | L2_PROT;
410 vl2e++;
411 }
413 if ( shadow_mode_enabled )
414 {
415 *vl1e = (count << PAGE_SHIFT) | L1_PROT;
416 }
417 else
418 {
419 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
420 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
421 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
422 {
423 *vl1e &= ~_PAGE_RW;
424 }
425 }
426 vl1e++;
427 }
429 munmap(vl1tab, PAGE_SIZE);
430 munmap(vl2tab, PAGE_SIZE);
431 munmap(vl3tab, PAGE_SIZE);
432 munmap(vl4tab, PAGE_SIZE);
433 return 0;
435 error_out:
436 if (vl1tab)
437 munmap(vl1tab, PAGE_SIZE);
438 if (vl2tab)
439 munmap(vl2tab, PAGE_SIZE);
440 if (vl3tab)
441 munmap(vl3tab, PAGE_SIZE);
442 if (vl4tab)
443 munmap(vl4tab, PAGE_SIZE);
444 return -1;
445 }
446 #endif
448 #ifdef __ia64__
449 static int setup_guest(int xc_handle,
450 uint32_t dom,
451 const char *image, unsigned long image_size,
452 struct initrd_info *initrd,
453 unsigned long nr_pages,
454 unsigned long *pvsi, unsigned long *pvke,
455 unsigned long *pvss, vcpu_guest_context_t *ctxt,
456 const char *cmdline,
457 unsigned long shared_info_frame,
458 unsigned long flags,
459 unsigned int store_evtchn, unsigned long *store_mfn,
460 unsigned int console_evtchn, unsigned long *console_mfn,
461 uint32_t required_features[XENFEAT_NR_SUBMAPS])
462 {
463 xen_pfn_t *page_array = NULL;
464 struct load_funcs load_funcs;
465 struct domain_setup_info dsi;
466 unsigned long vinitrd_start;
467 unsigned long vinitrd_end;
468 unsigned long v_end;
469 unsigned long start_page, pgnr;
470 start_info_t *start_info;
471 unsigned long start_info_mpa;
472 struct xen_ia64_boot_param *bp;
473 shared_info_t *shared_info;
474 int i;
475 DECLARE_DOMCTL;
476 int rc;
478 rc = probeimageformat(image, image_size, &load_funcs);
479 if ( rc != 0 )
480 goto error_out;
482 memset(&dsi, 0, sizeof(struct domain_setup_info));
484 rc = (load_funcs.parseimage)(image, image_size, &dsi);
485 if ( rc != 0 )
486 goto error_out;
488 dsi.v_start = round_pgdown(dsi.v_start);
489 vinitrd_start = round_pgup(dsi.v_end);
490 vinitrd_end = vinitrd_start + initrd->len;
491 v_end = round_pgup(vinitrd_end);
492 start_info_mpa = (nr_pages - 3) << PAGE_SHIFT;
494 /* Build firmware. */
495 memset(&domctl.u.arch_setup, 0, sizeof(domctl.u.arch_setup));
496 domctl.u.arch_setup.flags = 0;
497 domctl.u.arch_setup.bp = start_info_mpa + sizeof (start_info_t);
498 domctl.u.arch_setup.maxmem = (nr_pages - 3) << PAGE_SHIFT;
499 domctl.cmd = XEN_DOMCTL_arch_setup;
500 domctl.domain = (domid_t)dom;
501 if ( xc_domctl(xc_handle, &domctl) )
502 goto error_out;
504 start_page = dsi.v_start >> PAGE_SHIFT;
505 pgnr = (v_end - dsi.v_start) >> PAGE_SHIFT;
506 if ( (page_array = malloc(pgnr * sizeof(xen_pfn_t))) == NULL )
507 {
508 PERROR("Could not allocate memory");
509 goto error_out;
510 }
512 if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array,
513 start_page, pgnr) != pgnr )
514 {
515 PERROR("Could not get the page frame list");
516 goto error_out;
517 }
519 IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
520 " Loaded kernel: %p->%p\n"
521 " Init. ramdisk: %p->%p\n"
522 " TOTAL: %p->%p\n",
523 _p(dsi.v_kernstart), _p(dsi.v_kernend),
524 _p(vinitrd_start), _p(vinitrd_end),
525 _p(dsi.v_start), _p(v_end));
526 IPRINTF(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
528 (load_funcs.loadimage)(image, image_size, xc_handle, dom, page_array,
529 &dsi);
531 if ( load_initrd(xc_handle, dom, initrd,
532 vinitrd_start - dsi.v_start, page_array) )
533 goto error_out;
535 *pvke = dsi.v_kernentry;
537 /* Now need to retrieve machine pfn for system pages:
538 * start_info/store/console
539 */
540 pgnr = 3;
541 if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array,
542 nr_pages - 3, pgnr) != pgnr )
543 {
544 PERROR("Could not get page frame for xenstore");
545 goto error_out;
546 }
548 *store_mfn = page_array[1];
549 *console_mfn = page_array[2];
550 IPRINTF("start_info: 0x%lx at 0x%lx, "
551 "store_mfn: 0x%lx at 0x%lx, "
552 "console_mfn: 0x%lx at 0x%lx\n",
553 page_array[0], nr_pages - 3,
554 *store_mfn, nr_pages - 2,
555 *console_mfn, nr_pages - 1);
557 start_info = xc_map_foreign_range(
558 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[0]);
559 memset(start_info, 0, sizeof(*start_info));
560 rc = xc_version(xc_handle, XENVER_version, NULL);
561 sprintf(start_info->magic, "xen-%i.%i-ia64", rc >> 16, rc & (0xFFFF));
562 start_info->flags = flags;
563 start_info->store_mfn = nr_pages - 2;
564 start_info->store_evtchn = store_evtchn;
565 start_info->console.domU.mfn = nr_pages - 1;
566 start_info->console.domU.evtchn = console_evtchn;
567 start_info->nr_pages = nr_pages; // FIXME?: nr_pages - 2 ????
569 bp = (struct xen_ia64_boot_param *)(start_info + 1);
570 bp->command_line = start_info_mpa + offsetof(start_info_t, cmd_line);
571 if ( cmdline != NULL )
572 {
573 strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
574 start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = 0;
575 }
576 if ( initrd->len != 0 )
577 {
578 bp->initrd_start = vinitrd_start;
579 bp->initrd_size = initrd->len;
580 }
581 ctxt->user_regs.r28 = start_info_mpa + sizeof (start_info_t);
582 munmap(start_info, PAGE_SIZE);
584 /* shared_info page starts its life empty. */
585 shared_info = xc_map_foreign_range(
586 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
587 printf("shared_info = %p, err=%s frame=%lx\n",
588 shared_info, strerror (errno), shared_info_frame);
589 //memset(shared_info, 0, PAGE_SIZE);
590 /* Mask all upcalls... */
591 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
592 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
593 shared_info->arch.start_info_pfn = nr_pages - 3;
595 munmap(shared_info, PAGE_SIZE);
597 free(page_array);
598 return 0;
600 error_out:
601 free(page_array);
602 return -1;
603 }
604 #else /* x86 */
606 /* Check if the platform supports the guest kernel format */
607 static int compat_check(int xc_handle, struct domain_setup_info *dsi)
608 {
609 xen_capabilities_info_t xen_caps = "";
611 if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0) {
612 ERROR("Cannot determine host capabilities.");
613 return 0;
614 }
616 if (strstr(xen_caps, "xen-3.0-x86_32p")) {
617 if (dsi->pae_kernel == PAEKERN_no) {
618 ERROR("Non PAE-kernel on PAE host.");
619 return 0;
620 }
621 } else if (dsi->pae_kernel != PAEKERN_no) {
622 ERROR("PAE-kernel on non-PAE host.");
623 return 0;
624 }
626 return 1;
627 }
629 static inline int increment_ulong(unsigned long *pval, unsigned long inc)
630 {
631 if ( inc >= -*pval )
632 {
633 ERROR("Value wrapped to zero: image too large?");
634 return 0;
635 }
636 *pval += inc;
637 return 1;
638 }
640 static int setup_guest(int xc_handle,
641 uint32_t dom,
642 const char *image, unsigned long image_size,
643 struct initrd_info *initrd,
644 unsigned long nr_pages,
645 unsigned long *pvsi, unsigned long *pvke,
646 unsigned long *pvss, vcpu_guest_context_t *ctxt,
647 const char *cmdline,
648 unsigned long shared_info_frame,
649 unsigned long flags,
650 unsigned int store_evtchn, unsigned long *store_mfn,
651 unsigned int console_evtchn, unsigned long *console_mfn,
652 uint32_t required_features[XENFEAT_NR_SUBMAPS])
653 {
654 xen_pfn_t *page_array = NULL;
655 unsigned long count, i;
656 unsigned long long hypercall_page;
657 int hypercall_page_defined;
658 start_info_t *start_info;
659 shared_info_t *shared_info;
660 xc_mmu_t *mmu = NULL;
661 const char *p;
662 DECLARE_DOMCTL;
663 int rc;
665 unsigned long nr_pt_pages;
666 unsigned long physmap_pfn;
667 xen_pfn_t *physmap, *physmap_e;
669 struct load_funcs load_funcs;
670 struct domain_setup_info dsi;
671 unsigned long vinitrd_start;
672 unsigned long vphysmap_start;
673 unsigned long vstartinfo_start;
674 unsigned long vstoreinfo_start;
675 unsigned long vconsole_start;
676 unsigned long vsharedinfo_start = 0; /* XXX gcc */
677 unsigned long vstack_start;
678 unsigned long vstack_end;
679 unsigned long vpt_start;
680 unsigned long vpt_end;
681 unsigned long v_end;
682 unsigned long guest_store_mfn, guest_console_mfn, guest_shared_info_mfn;
683 unsigned long shadow_mode_enabled;
684 uint32_t supported_features[XENFEAT_NR_SUBMAPS] = { 0, };
686 rc = probeimageformat(image, image_size, &load_funcs);
687 if ( rc != 0 )
688 goto error_out;
690 memset(&dsi, 0, sizeof(struct domain_setup_info));
692 rc = (load_funcs.parseimage)(image, image_size, &dsi);
693 if ( rc != 0 )
694 goto error_out;
696 if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
697 {
698 PERROR("Guest OS must load to a page boundary.");
699 goto error_out;
700 }
702 if (!compat_check(xc_handle, &dsi))
703 goto error_out;
705 /* Parse and validate kernel features. */
706 if ( (p = xen_elfnote_string(&dsi, XEN_ELFNOTE_FEATURES)) != NULL )
707 {
708 if ( !parse_features(p, supported_features, required_features) )
709 {
710 ERROR("Failed to parse guest kernel features.");
711 goto error_out;
712 }
714 IPRINTF("Supported features = { %08x }.\n", supported_features[0]);
715 IPRINTF("Required features = { %08x }.\n", required_features[0]);
716 }
718 for ( i = 0; i < XENFEAT_NR_SUBMAPS; i++ )
719 {
720 if ( (supported_features[i] & required_features[i]) !=
721 required_features[i] )
722 {
723 ERROR("Guest kernel does not support a required feature.");
724 goto error_out;
725 }
726 }
728 shadow_mode_enabled = test_feature_bit(XENFEAT_auto_translated_physmap,
729 required_features);
731 /*
732 * Why do we need this? The number of page-table frames depends on the
733 * size of the bootstrap address space. But the size of the address space
734 * depends on the number of page-table frames (since each one is mapped
735 * read-only). We have a pair of simultaneous equations in two unknowns,
736 * which we solve by exhaustive search.
737 */
738 v_end = round_pgup(dsi.v_end);
739 if ( v_end == 0 )
740 {
741 ERROR("End of mapped kernel image too close to end of memory");
742 goto error_out;
743 }
744 vinitrd_start = v_end;
745 if ( !increment_ulong(&v_end, round_pgup(initrd->len)) )
746 goto error_out;
747 vphysmap_start = v_end;
748 if ( !increment_ulong(&v_end, round_pgup(nr_pages * sizeof(long))) )
749 goto error_out;
750 vstartinfo_start = v_end;
751 if ( !increment_ulong(&v_end, PAGE_SIZE) )
752 goto error_out;
753 vstoreinfo_start = v_end;
754 if ( !increment_ulong(&v_end, PAGE_SIZE) )
755 goto error_out;
756 vconsole_start = v_end;
757 if ( !increment_ulong(&v_end, PAGE_SIZE) )
758 goto error_out;
759 if ( shadow_mode_enabled ) {
760 vsharedinfo_start = v_end;
761 if ( !increment_ulong(&v_end, PAGE_SIZE) )
762 goto error_out;
763 }
764 vpt_start = v_end;
766 for ( nr_pt_pages = 2; ; nr_pt_pages++ )
767 {
768 /* vpt_end = vpt_staret + (nr_pt_pages * PAGE_SIZE); */
769 vpt_end = vpt_start;
770 if ( !increment_ulong(&vpt_end, nr_pt_pages * PAGE_SIZE) )
771 goto error_out;
773 vstack_start = vpt_end;
774 /* vstack_end = vstack_start + PAGE_SIZE; */
775 vstack_end = vstack_start;
776 if ( !increment_ulong(&vstack_end, PAGE_SIZE) )
777 goto error_out;
779 /* v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1); */
780 v_end = vstack_end;
781 if ( !increment_ulong(&v_end, (1UL<<22)-1) )
782 goto error_out;
783 v_end &= ~((1UL<<22)-1);
785 if ( (v_end - vstack_end) < (512UL << 10) )
786 {
787 /* Add extra 4MB to get >= 512kB padding. */
788 if ( !increment_ulong(&v_end, 1UL << 22) )
789 goto error_out;
790 }
792 #define NR(_l,_h,_s) \
793 (((((unsigned long)(_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
794 ((unsigned long)(_l) & ~((1UL<<(_s))-1))) >> (_s))
795 #if defined(__i386__)
796 if ( dsi.pae_kernel != PAEKERN_no )
797 {
798 if ( (1 + /* # L3 */
799 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT_PAE) + /* # L2 */
800 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT_PAE) + /* # L1 */
801 /* Include a fourth mid-level page directory for Xen. */
802 (v_end <= (3 << L3_PAGETABLE_SHIFT_PAE)))
803 <= nr_pt_pages )
804 break;
805 }
806 else
807 {
808 if ( (1 + /* # L2 */
809 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
810 <= nr_pt_pages )
811 break;
812 }
813 #elif defined(__x86_64__)
814 if ( (1 + /* # L4 */
815 NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
816 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
817 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
818 <= nr_pt_pages )
819 break;
820 #endif
821 }
823 IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n");
824 IPRINTF(" Loaded kernel: %p->%p\n", _p(dsi.v_kernstart),
825 _p(dsi.v_kernend));
826 if ( initrd->len )
827 IPRINTF(" Initial ramdisk: %p->%p\n", _p(vinitrd_start),
828 _p(vinitrd_start + initrd->len));
829 IPRINTF(" Phys-Mach map: %p\n", _p(vphysmap_start));
830 IPRINTF(" Start info: %p\n", _p(vstartinfo_start));
831 IPRINTF(" Store page: %p\n", _p(vstoreinfo_start));
832 IPRINTF(" Console page: %p\n", _p(vconsole_start));
833 if ( shadow_mode_enabled )
834 IPRINTF(" Shared Info page: %p\n", _p(vsharedinfo_start));
835 IPRINTF(" Page tables: %p\n", _p(vpt_start));
836 IPRINTF(" Boot stack: %p\n", _p(vstack_start));
837 IPRINTF(" TOTAL: %p->%p\n", _p(dsi.v_start), _p(v_end));
838 IPRINTF(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
840 if ( ((v_end - dsi.v_start)>>PAGE_SHIFT) > nr_pages )
841 {
842 PERROR("Initial guest OS requires too much space\n"
843 "(%pMB is greater than %luMB limit)\n",
844 _p((v_end-dsi.v_start)>>20), nr_pages>>(20-PAGE_SHIFT));
845 goto error_out;
846 }
848 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
849 {
850 PERROR("Could not allocate memory");
851 goto error_out;
852 }
854 if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
855 {
856 PERROR("Could not get the page frame list");
857 goto error_out;
858 }
860 rc = (load_funcs.loadimage)(image, image_size,
861 xc_handle, dom, page_array,
862 &dsi);
863 if ( rc != 0 )
864 goto error_out;
866 if ( load_initrd(xc_handle, dom, initrd,
867 vinitrd_start - dsi.v_start, page_array) )
868 goto error_out;
870 /* setup page tables */
871 #if defined(__i386__)
872 if (dsi.pae_kernel != PAEKERN_no)
873 rc = setup_pg_tables_pae(xc_handle, dom, ctxt,
874 dsi.v_start, v_end,
875 page_array, vpt_start, vpt_end,
876 shadow_mode_enabled, dsi.pae_kernel);
877 else
878 rc = setup_pg_tables(xc_handle, dom, ctxt,
879 dsi.v_start, v_end,
880 page_array, vpt_start, vpt_end,
881 shadow_mode_enabled);
882 #endif
883 #if defined(__x86_64__)
884 rc = setup_pg_tables_64(xc_handle, dom, ctxt,
885 dsi.v_start, v_end,
886 page_array, vpt_start, vpt_end,
887 shadow_mode_enabled);
888 #endif
889 if (0 != rc)
890 goto error_out;
892 #if defined(__i386__)
893 /*
894 * Pin down l2tab addr as page dir page - causes hypervisor to provide
895 * correct protection for the page
896 */
897 if ( !shadow_mode_enabled )
898 {
899 if ( dsi.pae_kernel != PAEKERN_no )
900 {
901 if ( pin_table(xc_handle, MMUEXT_PIN_L3_TABLE,
902 xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
903 goto error_out;
904 }
905 else
906 {
907 if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE,
908 xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
909 goto error_out;
910 }
911 }
912 #endif
914 #if defined(__x86_64__)
915 /*
916 * Pin down l4tab addr as page dir page - causes hypervisor to provide
917 * correct protection for the page
918 */
919 if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE,
920 xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
921 goto error_out;
922 #endif
924 if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
925 goto error_out;
927 /* Write the phys->machine and machine->phys table entries. */
928 physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
929 physmap = physmap_e = xc_map_foreign_range(
930 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
931 page_array[physmap_pfn++]);
933 for ( count = 0; count < nr_pages; count++ )
934 {
935 if ( xc_add_mmu_update(
936 xc_handle, mmu,
937 ((uint64_t)page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
938 count) )
939 {
940 DPRINTF("m2p update failure p=%lx m=%"PRIx64"\n",
941 count, (uint64_t)page_array[count]);
942 munmap(physmap, PAGE_SIZE);
943 goto error_out;
944 }
945 *physmap_e++ = page_array[count];
946 if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
947 {
948 munmap(physmap, PAGE_SIZE);
949 physmap = physmap_e = xc_map_foreign_range(
950 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
951 page_array[physmap_pfn++]);
952 }
953 }
954 munmap(physmap, PAGE_SIZE);
956 /* Send the page update requests down to the hypervisor. */
957 if ( xc_finish_mmu_updates(xc_handle, mmu) )
958 goto error_out;
960 if ( shadow_mode_enabled )
961 {
962 struct xen_add_to_physmap xatp;
964 /* Enable shadow translate mode */
965 if ( xc_shadow_control(xc_handle, dom,
966 XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE,
967 NULL, 0, NULL, 0, NULL) < 0 )
968 {
969 PERROR("Could not enable translation mode");
970 goto error_out;
971 }
973 guest_shared_info_mfn = (vsharedinfo_start-dsi.v_start) >> PAGE_SHIFT;
975 /* Map shared info frame into guest physmap. */
976 xatp.domid = dom;
977 xatp.space = XENMAPSPACE_shared_info;
978 xatp.idx = 0;
979 xatp.gpfn = guest_shared_info_mfn;
980 rc = xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
981 if ( rc != 0 )
982 {
983 PERROR("Cannot map shared info pfn");
984 goto error_out;
985 }
987 /* Map grant table frames into guest physmap. */
988 for ( i = 0; ; i++ )
989 {
990 xatp.domid = dom;
991 xatp.space = XENMAPSPACE_grant_table;
992 xatp.idx = i;
993 xatp.gpfn = nr_pages + i;
994 rc = xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
995 if ( rc != 0 )
996 {
997 if ( errno == EINVAL )
998 break; /* done all grant tables */
999 PERROR("Cannot map grant table pfn");
1000 goto error_out;
1004 else
1006 guest_shared_info_mfn = shared_info_frame;
1009 *store_mfn = page_array[(vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT];
1010 *console_mfn = page_array[(vconsole_start-dsi.v_start) >> PAGE_SHIFT];
1011 if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) ||
1012 xc_clear_domain_page(xc_handle, dom, *console_mfn) )
1013 goto error_out;
1014 if ( shadow_mode_enabled )
1016 guest_store_mfn = (vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT;
1017 guest_console_mfn = (vconsole_start-dsi.v_start) >> PAGE_SHIFT;
1019 else
1021 guest_store_mfn = *store_mfn;
1022 guest_console_mfn = *console_mfn;
1025 start_info = xc_map_foreign_range(
1026 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
1027 page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
1028 /*shared_info, start_info */
1029 memset(start_info, 0, sizeof(*start_info));
1030 rc = xc_version(xc_handle, XENVER_version, NULL);
1031 sprintf(start_info->magic, "xen-%i.%i-x86_%d%s",
1032 rc >> 16, rc & (0xFFFF), (unsigned int)sizeof(long)*8,
1033 (dsi.pae_kernel != PAEKERN_no) ? "p" : "");
1034 start_info->nr_pages = nr_pages;
1035 start_info->shared_info = guest_shared_info_mfn << PAGE_SHIFT;
1036 start_info->flags = flags;
1037 start_info->pt_base = vpt_start;
1038 start_info->nr_pt_frames = nr_pt_pages;
1039 start_info->mfn_list = vphysmap_start;
1040 start_info->store_mfn = guest_store_mfn;
1041 start_info->store_evtchn = store_evtchn;
1042 start_info->console.domU.mfn = guest_console_mfn;
1043 start_info->console.domU.evtchn = console_evtchn;
1044 if ( initrd->len != 0 )
1046 start_info->mod_start = vinitrd_start;
1047 start_info->mod_len = initrd->len;
1049 if ( cmdline != NULL )
1051 strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
1052 start_info->cmd_line[MAX_GUEST_CMDLINE-1] = '\0';
1054 munmap(start_info, PAGE_SIZE);
1056 /* shared_info page starts its life empty. */
1057 shared_info = xc_map_foreign_range(
1058 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
1059 memset(shared_info, 0, PAGE_SIZE);
1060 /* Mask all upcalls... */
1061 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
1062 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
1064 munmap(shared_info, PAGE_SIZE);
1066 /* Send the page update requests down to the hypervisor. */
1067 if ( xc_finish_mmu_updates(xc_handle, mmu) )
1068 goto error_out;
1070 hypercall_page = xen_elfnote_numeric(&dsi, XEN_ELFNOTE_HYPERCALL_PAGE,
1071 &hypercall_page_defined);
1072 if ( hypercall_page_defined )
1074 unsigned long long pfn = (hypercall_page - dsi.v_start) >> PAGE_SHIFT;
1075 if ( pfn >= nr_pages )
1076 goto error_out;
1077 domctl.domain = (domid_t)dom;
1078 domctl.u.hypercall_init.gmfn = shadow_mode_enabled ?
1079 pfn : page_array[pfn];
1080 domctl.cmd = XEN_DOMCTL_hypercall_init;
1081 if ( xc_domctl(xc_handle, &domctl) )
1082 goto error_out;
1085 free(mmu);
1086 free(page_array);
1088 *pvsi = vstartinfo_start;
1089 *pvss = vstack_start;
1090 *pvke = dsi.v_kernentry;
1092 return 0;
1094 error_out:
1095 free(mmu);
1096 free(page_array);
1097 return -1;
1099 #endif
1101 static int xc_linux_build_internal(int xc_handle,
1102 uint32_t domid,
1103 char *image,
1104 unsigned long image_size,
1105 struct initrd_info *initrd,
1106 const char *cmdline,
1107 const char *features,
1108 unsigned long flags,
1109 unsigned int store_evtchn,
1110 unsigned long *store_mfn,
1111 unsigned int console_evtchn,
1112 unsigned long *console_mfn)
1114 struct xen_domctl launch_domctl;
1115 DECLARE_DOMCTL;
1116 int rc, i;
1117 vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
1118 unsigned long nr_pages;
1119 unsigned long vstartinfo_start, vkern_entry, vstack_start;
1120 uint32_t features_bitmap[XENFEAT_NR_SUBMAPS] = { 0, };
1122 if ( features != NULL )
1124 if ( !parse_features(features, features_bitmap, NULL) )
1126 PERROR("Failed to parse configured features\n");
1127 goto error_out;
1131 if ( (nr_pages = get_tot_pages(xc_handle, domid)) < 0 )
1133 PERROR("Could not find total pages for domain");
1134 goto error_out;
1137 #ifdef VALGRIND
1138 memset(&st_ctxt, 0, sizeof(st_ctxt));
1139 #endif
1141 if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) )
1143 PERROR("%s: ctxt lock failed", __func__);
1144 return 1;
1147 domctl.cmd = XEN_DOMCTL_getdomaininfo;
1148 domctl.domain = (domid_t)domid;
1149 if ( (xc_domctl(xc_handle, &domctl) < 0) ||
1150 ((uint16_t)domctl.domain != domid) )
1152 PERROR("Could not get info on domain");
1153 goto error_out;
1156 memset(ctxt, 0, sizeof(*ctxt));
1158 if ( setup_guest(xc_handle, domid, image, image_size,
1159 initrd,
1160 nr_pages,
1161 &vstartinfo_start, &vkern_entry,
1162 &vstack_start, ctxt, cmdline,
1163 domctl.u.getdomaininfo.shared_info_frame,
1164 flags, store_evtchn, store_mfn,
1165 console_evtchn, console_mfn,
1166 features_bitmap) < 0 )
1168 ERROR("Error constructing guest OS");
1169 goto error_out;
1172 #ifdef __ia64__
1173 /* based on new_thread in xen/arch/ia64/domain.c */
1174 ctxt->flags = 0;
1175 ctxt->user_regs.cr_ipsr = 0; /* all necessary bits filled by hypervisor */
1176 ctxt->user_regs.cr_iip = vkern_entry;
1177 ctxt->user_regs.cr_ifs = 1UL << 63;
1178 ctxt->user_regs.ar_fpsr = xc_ia64_fpsr_default();
1179 i = 0; /* silence unused variable warning */
1180 #else /* x86 */
1181 /*
1182 * Initial register values:
1183 * DS,ES,FS,GS = FLAT_KERNEL_DS
1184 * CS:EIP = FLAT_KERNEL_CS:start_pc
1185 * SS:ESP = FLAT_KERNEL_DS:start_stack
1186 * ESI = start_info
1187 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
1188 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
1189 */
1190 ctxt->user_regs.ds = FLAT_KERNEL_DS;
1191 ctxt->user_regs.es = FLAT_KERNEL_DS;
1192 ctxt->user_regs.fs = FLAT_KERNEL_DS;
1193 ctxt->user_regs.gs = FLAT_KERNEL_DS;
1194 ctxt->user_regs.ss = FLAT_KERNEL_SS;
1195 ctxt->user_regs.cs = FLAT_KERNEL_CS;
1196 ctxt->user_regs.eip = vkern_entry;
1197 ctxt->user_regs.esp = vstack_start + PAGE_SIZE;
1198 ctxt->user_regs.esi = vstartinfo_start;
1199 ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
1201 ctxt->flags = VGCF_IN_KERNEL;
1203 /* FPU is set up to default initial state. */
1204 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
1206 /* Virtual IDT is empty at start-of-day. */
1207 for ( i = 0; i < 256; i++ )
1209 ctxt->trap_ctxt[i].vector = i;
1210 ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
1213 /* No LDT. */
1214 ctxt->ldt_ents = 0;
1216 /* Use the default Xen-provided GDT. */
1217 ctxt->gdt_ents = 0;
1219 /* Ring 1 stack is the initial stack. */
1220 ctxt->kernel_ss = FLAT_KERNEL_SS;
1221 ctxt->kernel_sp = vstack_start + PAGE_SIZE;
1223 /* No debugging. */
1224 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
1226 /* No callback handlers. */
1227 #if defined(__i386__)
1228 ctxt->event_callback_cs = FLAT_KERNEL_CS;
1229 ctxt->event_callback_eip = 0;
1230 ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
1231 ctxt->failsafe_callback_eip = 0;
1232 #elif defined(__x86_64__)
1233 ctxt->event_callback_eip = 0;
1234 ctxt->failsafe_callback_eip = 0;
1235 ctxt->syscall_callback_eip = 0;
1236 #endif
1237 #endif /* x86 */
1239 memset( &launch_domctl, 0, sizeof(launch_domctl) );
1241 launch_domctl.domain = (domid_t)domid;
1242 launch_domctl.u.vcpucontext.vcpu = 0;
1243 set_xen_guest_handle(launch_domctl.u.vcpucontext.ctxt, ctxt);
1245 launch_domctl.cmd = XEN_DOMCTL_setvcpucontext;
1246 rc = xc_domctl(xc_handle, &launch_domctl);
1248 return rc;
1250 error_out:
1251 return -1;
1254 int xc_linux_build_mem(int xc_handle,
1255 uint32_t domid,
1256 const char *image_buffer,
1257 unsigned long image_size,
1258 const char *initrd,
1259 unsigned long initrd_len,
1260 const char *cmdline,
1261 const char *features,
1262 unsigned long flags,
1263 unsigned int store_evtchn,
1264 unsigned long *store_mfn,
1265 unsigned int console_evtchn,
1266 unsigned long *console_mfn)
1268 int sts;
1269 char *img_buf;
1270 unsigned long img_len;
1271 struct initrd_info initrd_info = { .type = INITRD_none };
1273 /* A kernel buffer is required */
1274 if ( (image_buffer == NULL) || (image_size == 0) )
1276 ERROR("kernel image buffer not present");
1277 return -1;
1280 /* If it's gzipped, inflate it; otherwise, use as is */
1281 /* xc_inflate_buffer may return the same buffer pointer if */
1282 /* the buffer is already inflated */
1283 img_buf = xc_inflate_buffer(image_buffer, image_size, &img_len);
1284 if ( img_buf == NULL )
1286 ERROR("unable to inflate kernel image buffer");
1287 return -1;
1290 /* RAM disks are optional; if we get one, inflate it */
1291 if ( initrd != NULL )
1293 initrd_info.type = INITRD_mem;
1294 initrd_info.u.mem_addr = xc_inflate_buffer(
1295 initrd, initrd_len, &initrd_info.len);
1296 if ( initrd_info.u.mem_addr == NULL )
1298 ERROR("unable to inflate ram disk buffer");
1299 sts = -1;
1300 goto out;
1304 sts = xc_linux_build_internal(xc_handle, domid, img_buf, img_len,
1305 &initrd_info, cmdline, features, flags,
1306 store_evtchn, store_mfn,
1307 console_evtchn, console_mfn);
1309 out:
1310 /* The inflation routines may pass back the same buffer so be */
1311 /* sure that we have a buffer and that it's not the one passed in. */
1312 /* Don't unnecessarily annoy/surprise/confound the caller */
1313 if ( (img_buf != NULL) && (img_buf != image_buffer) )
1314 free(img_buf);
1315 if ( (initrd_info.u.mem_addr != NULL) &&
1316 (initrd_info.u.mem_addr != initrd) )
1317 free(initrd_info.u.mem_addr);
1319 return sts;
1322 int xc_linux_build(int xc_handle,
1323 uint32_t domid,
1324 const char *image_name,
1325 const char *initrd_name,
1326 const char *cmdline,
1327 const char *features,
1328 unsigned long flags,
1329 unsigned int store_evtchn,
1330 unsigned long *store_mfn,
1331 unsigned int console_evtchn,
1332 unsigned long *console_mfn)
1334 char *image = NULL;
1335 unsigned long image_size;
1336 struct initrd_info initrd_info = { .type = INITRD_none };
1337 int fd = -1, sts = -1;
1339 if ( (image_name == NULL) ||
1340 ((image = xc_read_image(image_name, &image_size)) == NULL ))
1341 return -1;
1343 if ( (initrd_name != NULL) && (strlen(initrd_name) != 0) )
1345 initrd_info.type = INITRD_file;
1347 if ( (fd = open(initrd_name, O_RDONLY)) < 0 )
1349 PERROR("Could not open the initial ramdisk image");
1350 goto error_out;
1353 initrd_info.len = xc_get_filesz(fd);
1354 if ( (initrd_info.u.file_handle = gzdopen(fd, "rb")) == NULL )
1356 PERROR("Could not allocate decompression state for initrd");
1357 goto error_out;
1361 sts = xc_linux_build_internal(xc_handle, domid, image, image_size,
1362 &initrd_info, cmdline, features, flags,
1363 store_evtchn, store_mfn,
1364 console_evtchn, console_mfn);
1366 error_out:
1367 free(image);
1368 if ( initrd_info.type == INITRD_file && initrd_info.u.file_handle )
1369 gzclose(initrd_info.u.file_handle);
1370 else if ( fd >= 0 )
1371 close(fd);
1373 return sts;
1376 /*
1377 * Local variables:
1378 * mode: C
1379 * c-set-style: "BSD"
1380 * c-basic-offset: 4
1381 * tab-width: 4
1382 * indent-tabs-mode: nil
1383 * End:
1384 */