direct-io.hg

view tools/libxc/xc_linux_build.c @ 12988:e080700efa56

[TOOLS] Fix the build. Clearly demarcate PPC-specific stuff.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Dec 13 10:23:53 2006 +0000 (2006-12-13)
parents 6edf8b33e7d1
children ac51e8f37108
line source
1 /******************************************************************************
2 * xc_linux_build.c
3 */
5 #include <stddef.h>
6 #include "xg_private.h"
7 #include "xc_private.h"
8 #include <xenctrl.h>
10 #include "xc_elf.h"
11 #include <stdlib.h>
12 #include <unistd.h>
13 #include <inttypes.h>
14 #include <zlib.h>
16 /* Handy for printing out '0' prepended values at native pointer size */
17 #define _p(a) ((void *) ((ulong)a))
19 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
20 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
21 #if defined(__i386__)
22 #define L3_PROT (_PAGE_PRESENT)
23 #elif defined(__x86_64__)
24 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
25 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
26 #endif
28 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
29 #define round_pgdown(_p) ((_p)&PAGE_MASK)
31 struct initrd_info {
32 enum { INITRD_none, INITRD_file, INITRD_mem } type;
33 /*
34 * .len must be filled in by the user for type==INITRD_mem. It is
35 * filled in by load_initrd() for INITRD_file and unused for
36 * INITRD_none.
37 */
38 unsigned long len;
39 union {
40 gzFile file_handle;
41 char *mem_addr;
42 } u;
43 };
45 static const char *feature_names[XENFEAT_NR_SUBMAPS*32] = {
46 [XENFEAT_writable_page_tables] = "writable_page_tables",
47 [XENFEAT_writable_descriptor_tables] = "writable_descriptor_tables",
48 [XENFEAT_auto_translated_physmap] = "auto_translated_physmap",
49 [XENFEAT_supervisor_mode_kernel] = "supervisor_mode_kernel",
50 [XENFEAT_pae_pgdir_above_4gb] = "pae_pgdir_above_4gb"
51 };
53 static inline void set_feature_bit (int nr, uint32_t *addr)
54 {
55 addr[nr>>5] |= (1<<(nr&31));
56 }
58 static inline int test_feature_bit(int nr, uint32_t *addr)
59 {
60 return !!(addr[nr>>5] & (1<<(nr&31)));
61 }
63 static int parse_features(
64 const char *feats,
65 uint32_t supported[XENFEAT_NR_SUBMAPS],
66 uint32_t required[XENFEAT_NR_SUBMAPS])
67 {
68 const char *end, *p;
69 int i, req;
71 if ( (end = strchr(feats, ',')) == NULL )
72 end = feats + strlen(feats);
74 while ( feats < end )
75 {
76 p = strchr(feats, '|');
77 if ( (p == NULL) || (p > end) )
78 p = end;
80 req = (*feats == '!');
81 if ( req )
82 feats++;
84 for ( i = 0; i < XENFEAT_NR_SUBMAPS*32; i++ )
85 {
86 if ( feature_names[i] == NULL )
87 continue;
89 if ( strncmp(feature_names[i], feats, p-feats) == 0 )
90 {
91 set_feature_bit(i, supported);
92 if ( required && req )
93 set_feature_bit(i, required);
94 break;
95 }
96 }
98 if ( i == XENFEAT_NR_SUBMAPS*32 )
99 {
100 ERROR("Unknown feature \"%.*s\".", (int)(p-feats), feats);
101 if ( req )
102 {
103 ERROR("Kernel requires an unknown hypervisor feature.");
104 return -EINVAL;
105 }
106 }
108 feats = p;
109 if ( *feats == '|' )
110 feats++;
111 }
113 return -EINVAL;
114 }
116 static int probeimageformat(const char *image,
117 unsigned long image_size,
118 struct load_funcs *load_funcs)
119 {
120 if ( probe_elf(image, image_size, load_funcs) &&
121 probe_bin(image, image_size, load_funcs) )
122 {
123 xc_set_error(XC_INVALID_KERNEL, "Not a valid ELF or raw kernel image");
124 return -EINVAL;
125 }
127 return 0;
128 }
130 static int load_initrd(int xc_handle, domid_t dom,
131 struct initrd_info *initrd,
132 unsigned long physbase,
133 xen_pfn_t *phys_to_mach)
134 {
135 char page[PAGE_SIZE];
136 unsigned long pfn_start, pfn;
138 if ( initrd->type == INITRD_none )
139 return 0;
141 pfn_start = physbase >> PAGE_SHIFT;
143 if ( initrd->type == INITRD_mem )
144 {
145 unsigned long nr_pages = (initrd->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
147 for ( pfn = pfn_start; pfn < (pfn_start + nr_pages); pfn++ )
148 {
149 xc_copy_to_domain_page(
150 xc_handle, dom, phys_to_mach[pfn],
151 &initrd->u.mem_addr[(pfn - pfn_start) << PAGE_SHIFT]);
152 }
153 }
154 else
155 {
156 int readlen;
158 pfn = pfn_start;
159 initrd->len = 0;
161 /* gzread returns 0 on EOF */
162 while ( (readlen = gzread(initrd->u.file_handle, page, PAGE_SIZE)) )
163 {
164 if ( readlen < 0 )
165 {
166 PERROR("Error reading initrd image, could not");
167 return -EINVAL;
168 }
170 initrd->len += readlen;
171 xc_copy_to_domain_page(xc_handle, dom, phys_to_mach[pfn++], page);
172 }
173 }
175 return 0;
176 }
178 #define alloc_pt(ltab, vltab) \
179 do { \
180 ltab = ppt_alloc++; \
181 ltab = (uint64_t)page_array[ltab] << PAGE_SHIFT; \
182 if ( vltab != NULL ) \
183 munmap(vltab, PAGE_SIZE); \
184 if ( (vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, \
185 PROT_READ|PROT_WRITE, \
186 ltab >> PAGE_SHIFT)) == NULL ) \
187 goto error_out; \
188 memset(vltab, 0x0, PAGE_SIZE); \
189 } while ( 0 )
191 #if defined(__i386__)
193 static int setup_pg_tables(int xc_handle, uint32_t dom,
194 vcpu_guest_context_t *ctxt,
195 unsigned long dsi_v_start,
196 unsigned long v_end,
197 xen_pfn_t *page_array,
198 unsigned long vpt_start,
199 unsigned long vpt_end,
200 unsigned shadow_mode_enabled)
201 {
202 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
203 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
204 unsigned long l1tab = 0;
205 unsigned long l2tab = 0;
206 unsigned long ppt_alloc;
207 unsigned long count;
209 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
210 alloc_pt(l2tab, vl2tab);
211 vl2e = &vl2tab[l2_table_offset(dsi_v_start)];
212 ctxt->ctrlreg[3] = xen_pfn_to_cr3(l2tab >> PAGE_SHIFT);
214 for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++ )
215 {
216 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
217 {
218 alloc_pt(l1tab, vl1tab);
219 vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
220 *vl2e++ = l1tab | L2_PROT;
221 }
223 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
225 if ( !shadow_mode_enabled )
226 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
227 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
228 *vl1e &= ~_PAGE_RW;
230 vl1e++;
231 }
232 munmap(vl1tab, PAGE_SIZE);
233 munmap(vl2tab, PAGE_SIZE);
234 return 0;
236 error_out:
237 if (vl1tab)
238 munmap(vl1tab, PAGE_SIZE);
239 if (vl2tab)
240 munmap(vl2tab, PAGE_SIZE);
241 return -1;
242 }
244 static int setup_pg_tables_pae(int xc_handle, uint32_t dom,
245 vcpu_guest_context_t *ctxt,
246 unsigned long dsi_v_start,
247 unsigned long v_end,
248 xen_pfn_t *page_array,
249 unsigned long vpt_start,
250 unsigned long vpt_end,
251 unsigned shadow_mode_enabled,
252 unsigned pae_mode)
253 {
254 l1_pgentry_64_t *vl1tab = NULL, *vl1e = NULL;
255 l2_pgentry_64_t *vl2tab = NULL, *vl2e = NULL;
256 l3_pgentry_64_t *vl3tab = NULL, *vl3e = NULL;
257 uint64_t l1tab, l2tab, l3tab;
258 unsigned long ppt_alloc, count, nmfn;
260 /* First allocate page for page dir. */
261 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
263 if ( pae_mode == PAEKERN_extended_cr3 )
264 {
265 ctxt->vm_assist |= (1UL << VMASST_TYPE_pae_extended_cr3);
266 }
267 else if ( page_array[ppt_alloc] > 0xfffff )
268 {
269 nmfn = xc_make_page_below_4G(xc_handle, dom, page_array[ppt_alloc]);
270 if ( nmfn == 0 )
271 {
272 DPRINTF("Couldn't get a page below 4GB :-(\n");
273 goto error_out;
274 }
275 page_array[ppt_alloc] = nmfn;
276 }
278 alloc_pt(l3tab, vl3tab);
279 vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
280 ctxt->ctrlreg[3] = xen_pfn_to_cr3(l3tab >> PAGE_SHIFT);
282 for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++)
283 {
284 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
285 {
286 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
287 {
288 alloc_pt(l2tab, vl2tab);
289 vl2e = &vl2tab[l2_table_offset_pae(
290 dsi_v_start + (count << PAGE_SHIFT))];
291 *vl3e++ = l2tab | L3_PROT;
292 }
294 alloc_pt(l1tab, vl1tab);
295 vl1e = &vl1tab[l1_table_offset_pae(
296 dsi_v_start + (count << PAGE_SHIFT))];
297 *vl2e++ = l1tab | L2_PROT;
299 }
301 *vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
303 if ( !shadow_mode_enabled )
304 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
305 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
306 *vl1e &= ~_PAGE_RW;
308 vl1e++;
309 }
311 /* Xen requires a mid-level pgdir mapping 0xC0000000 region. */
312 if ( (vl3tab[3] & _PAGE_PRESENT) == 0 )
313 {
314 alloc_pt(l2tab, vl2tab);
315 vl3tab[3] = l2tab | L3_PROT;
316 }
318 munmap(vl1tab, PAGE_SIZE);
319 munmap(vl2tab, PAGE_SIZE);
320 munmap(vl3tab, PAGE_SIZE);
321 return 0;
323 error_out:
324 if (vl1tab)
325 munmap(vl1tab, PAGE_SIZE);
326 if (vl2tab)
327 munmap(vl2tab, PAGE_SIZE);
328 if (vl3tab)
329 munmap(vl3tab, PAGE_SIZE);
330 return -1;
331 }
333 #endif
335 #if defined(__x86_64__)
337 static int setup_pg_tables_64(int xc_handle, uint32_t dom,
338 vcpu_guest_context_t *ctxt,
339 unsigned long dsi_v_start,
340 unsigned long v_end,
341 xen_pfn_t *page_array,
342 unsigned long vpt_start,
343 unsigned long vpt_end,
344 int shadow_mode_enabled)
345 {
346 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
347 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
348 l3_pgentry_t *vl3tab=NULL, *vl3e=NULL;
349 l4_pgentry_t *vl4tab=NULL, *vl4e=NULL;
350 unsigned long l2tab = 0;
351 unsigned long l1tab = 0;
352 unsigned long l3tab = 0;
353 unsigned long l4tab = 0;
354 unsigned long ppt_alloc;
355 unsigned long count;
357 /* First allocate page for page dir. */
358 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
359 alloc_pt(l4tab, vl4tab);
360 vl4e = &vl4tab[l4_table_offset(dsi_v_start)];
361 ctxt->ctrlreg[3] = xen_pfn_to_cr3(l4tab >> PAGE_SHIFT);
363 for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
364 {
365 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
366 {
367 alloc_pt(l1tab, vl1tab);
369 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
370 {
371 alloc_pt(l2tab, vl2tab);
372 if ( !((unsigned long)vl3e & (PAGE_SIZE-1)) )
373 {
374 alloc_pt(l3tab, vl3tab);
375 vl3e = &vl3tab[l3_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
376 *vl4e++ = l3tab | L4_PROT;
377 }
378 vl2e = &vl2tab[l2_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
379 *vl3e++ = l2tab | L3_PROT;
380 }
381 vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
382 *vl2e++ = l1tab | L2_PROT;
383 }
385 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
387 if ( !shadow_mode_enabled )
388 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
389 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
390 *vl1e &= ~_PAGE_RW;
392 vl1e++;
393 }
395 munmap(vl1tab, PAGE_SIZE);
396 munmap(vl2tab, PAGE_SIZE);
397 munmap(vl3tab, PAGE_SIZE);
398 munmap(vl4tab, PAGE_SIZE);
399 return 0;
401 error_out:
402 if (vl1tab)
403 munmap(vl1tab, PAGE_SIZE);
404 if (vl2tab)
405 munmap(vl2tab, PAGE_SIZE);
406 if (vl3tab)
407 munmap(vl3tab, PAGE_SIZE);
408 if (vl4tab)
409 munmap(vl4tab, PAGE_SIZE);
410 return -1;
411 }
412 #endif
414 #ifdef __ia64__
415 static int setup_guest(int xc_handle,
416 uint32_t dom,
417 const char *image, unsigned long image_size,
418 struct initrd_info *initrd,
419 unsigned long nr_pages,
420 unsigned long *pvsi, unsigned long *pvke,
421 unsigned long *pvss, vcpu_guest_context_t *ctxt,
422 const char *cmdline,
423 unsigned long shared_info_frame,
424 unsigned long flags,
425 unsigned int store_evtchn, unsigned long *store_mfn,
426 unsigned int console_evtchn, unsigned long *console_mfn,
427 uint32_t required_features[XENFEAT_NR_SUBMAPS])
428 {
429 xen_pfn_t *page_array = NULL;
430 struct load_funcs load_funcs;
431 struct domain_setup_info dsi;
432 unsigned long vinitrd_start;
433 unsigned long vinitrd_end;
434 unsigned long v_end;
435 unsigned long start_page, pgnr;
436 start_info_t *start_info;
437 unsigned long start_info_mpa;
438 struct xen_ia64_boot_param *bp;
439 shared_info_t *shared_info;
440 int i;
441 DECLARE_DOMCTL;
442 int rc;
444 rc = probeimageformat(image, image_size, &load_funcs);
445 if ( rc != 0 )
446 goto error_out;
448 memset(&dsi, 0, sizeof(struct domain_setup_info));
450 rc = (load_funcs.parseimage)(image, image_size, &dsi);
451 if ( rc != 0 )
452 goto error_out;
454 if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL )
455 {
456 PERROR("Could not allocate memory");
457 goto error_out;
458 }
459 for ( i = 0; i < nr_pages; i++ )
460 page_array[i] = i;
461 if ( xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
462 0, 0, page_array) )
463 {
464 PERROR("Could not allocate memory for PV guest.\n");
465 goto error_out;
466 }
468 dsi.v_start = round_pgdown(dsi.v_start);
469 vinitrd_start = round_pgup(dsi.v_end);
470 start_info_mpa = (nr_pages - 3) << PAGE_SHIFT;
471 *pvke = dsi.v_kernentry;
473 /* Build firmware. */
474 memset(&domctl.u.arch_setup, 0, sizeof(domctl.u.arch_setup));
475 domctl.u.arch_setup.flags = 0;
476 domctl.u.arch_setup.bp = start_info_mpa + sizeof (start_info_t);
477 domctl.u.arch_setup.maxmem = (nr_pages - 3) << PAGE_SHIFT;
478 domctl.cmd = XEN_DOMCTL_arch_setup;
479 domctl.domain = (domid_t)dom;
480 if ( xc_domctl(xc_handle, &domctl) )
481 goto error_out;
483 start_page = dsi.v_start >> PAGE_SHIFT;
484 /* in order to get initrd->len, we need to load initrd image at first */
485 if ( load_initrd(xc_handle, dom, initrd,
486 vinitrd_start - dsi.v_start, page_array + start_page) )
487 goto error_out;
489 vinitrd_end = vinitrd_start + initrd->len;
490 v_end = round_pgup(vinitrd_end);
491 pgnr = (v_end - dsi.v_start) >> PAGE_SHIFT;
492 if ( pgnr > nr_pages )
493 {
494 PERROR("too small memory is specified. "
495 "At least %ld kb is necessary.\n",
496 pgnr << (PAGE_SHIFT - 10));
497 }
499 IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
500 " Loaded kernel: %p->%p\n"
501 " Init. ramdisk: %p->%p\n"
502 " TOTAL: %p->%p\n",
503 _p(dsi.v_kernstart), _p(dsi.v_kernend),
504 _p(vinitrd_start), _p(vinitrd_end),
505 _p(dsi.v_start), _p(v_end));
506 IPRINTF(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
508 (load_funcs.loadimage)(image, image_size, xc_handle, dom,
509 page_array + start_page, &dsi);
511 *store_mfn = page_array[nr_pages - 2]; //XXX
512 *console_mfn = page_array[nr_pages - 1]; //XXX
513 IPRINTF("start_info: 0x%lx at 0x%lx, "
514 "store_mfn: 0x%lx at 0x%lx, "
515 "console_mfn: 0x%lx at 0x%lx\n",
516 page_array[nr_pages - 3], nr_pages - 3,
517 *store_mfn, nr_pages - 2,
518 *console_mfn, nr_pages - 1);
520 start_info = xc_map_foreign_range(
521 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
522 page_array[nr_pages - 3]);
523 if ( start_info == NULL )
524 goto error_out;
526 memset(start_info, 0, sizeof(*start_info));
527 rc = xc_version(xc_handle, XENVER_version, NULL);
528 sprintf(start_info->magic, "xen-%i.%i-ia64", rc >> 16, rc & (0xFFFF));
529 start_info->flags = flags;
530 start_info->store_mfn = nr_pages - 2;
531 start_info->store_evtchn = store_evtchn;
532 start_info->console.domU.mfn = nr_pages - 1;
533 start_info->console.domU.evtchn = console_evtchn;
534 start_info->nr_pages = nr_pages; // FIXME?: nr_pages - 2 ????
536 bp = (struct xen_ia64_boot_param *)(start_info + 1);
537 bp->command_line = start_info_mpa + offsetof(start_info_t, cmd_line);
538 if ( cmdline != NULL )
539 {
540 strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
541 start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = 0;
542 }
543 if ( initrd->len != 0 )
544 {
545 bp->initrd_start = vinitrd_start;
546 bp->initrd_size = initrd->len;
547 }
548 ctxt->user_regs.r28 = start_info_mpa + sizeof (start_info_t);
549 munmap(start_info, PAGE_SIZE);
551 /*
552 * shared_info is assiged into guest pseudo physical address space
553 * by XEN_DOMCTL_arch_setup. shared_info_frame is stale value until that.
554 * So passed shared_info_frame is stale. obtain the right value here.
555 */
556 domctl.cmd = XEN_DOMCTL_getdomaininfo;
557 domctl.domain = (domid_t)dom;
558 if ( (xc_domctl(xc_handle, &domctl) < 0) ||
559 ((uint16_t)domctl.domain != dom) )
560 {
561 PERROR("Could not get info on domain");
562 goto error_out;
563 }
564 shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
566 /* shared_info page starts its life empty. */
567 shared_info = xc_map_foreign_range(
568 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
569 printf("shared_info = %p frame=%lx\n",
570 shared_info, shared_info_frame);
571 //memset(shared_info, 0, PAGE_SIZE);
572 /* Mask all upcalls... */
573 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
574 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
575 shared_info->arch.start_info_pfn = nr_pages - 3;
577 munmap(shared_info, PAGE_SIZE);
578 free(page_array);
579 return 0;
581 error_out:
582 free(page_array);
583 return -1;
584 }
585 #else /* x86 */
587 /* Check if the platform supports the guest kernel format */
588 static int compat_check(int xc_handle, struct domain_setup_info *dsi)
589 {
590 xen_capabilities_info_t xen_caps = "";
592 if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0) {
593 xc_set_error(XC_INVALID_KERNEL,
594 "Cannot determine host capabilities.");
595 return 0;
596 }
598 if (strstr(xen_caps, "xen-3.0-x86_32p")) {
599 if (dsi->pae_kernel == PAEKERN_no) {
600 xc_set_error(XC_INVALID_KERNEL,
601 "Non PAE-kernel on PAE host.");
602 return 0;
603 }
604 } else if (dsi->pae_kernel != PAEKERN_no) {
605 xc_set_error(XC_INVALID_KERNEL,
606 "PAE-kernel on non-PAE host.");
607 return 0;
608 }
610 return 1;
611 }
613 static inline int increment_ulong(unsigned long *pval, unsigned long inc)
614 {
615 if ( inc >= -*pval )
616 {
617 ERROR("Value wrapped to zero: image too large?");
618 return 0;
619 }
620 *pval += inc;
621 return 1;
622 }
624 static int setup_guest(int xc_handle,
625 uint32_t dom,
626 const char *image, unsigned long image_size,
627 struct initrd_info *initrd,
628 unsigned long nr_pages,
629 unsigned long *pvsi, unsigned long *pvke,
630 unsigned long *pvss, vcpu_guest_context_t *ctxt,
631 const char *cmdline,
632 unsigned long shared_info_frame,
633 unsigned long flags,
634 unsigned int store_evtchn, unsigned long *store_mfn,
635 unsigned int console_evtchn, unsigned long *console_mfn,
636 uint32_t required_features[XENFEAT_NR_SUBMAPS])
637 {
638 xen_pfn_t *page_array = NULL;
639 unsigned long count, i;
640 unsigned long long hypercall_page;
641 int hypercall_page_defined;
642 start_info_t *start_info;
643 shared_info_t *shared_info;
644 const char *p;
645 DECLARE_DOMCTL;
646 int rc;
648 unsigned long nr_pt_pages;
649 unsigned long physmap_pfn;
650 xen_pfn_t *physmap, *physmap_e;
652 struct load_funcs load_funcs;
653 struct domain_setup_info dsi;
654 unsigned long vinitrd_start;
655 unsigned long vphysmap_start;
656 unsigned long vstartinfo_start;
657 unsigned long vstoreinfo_start;
658 unsigned long vconsole_start;
659 unsigned long vsharedinfo_start = 0; /* XXX gcc */
660 unsigned long vstack_start;
661 unsigned long vstack_end;
662 unsigned long vpt_start;
663 unsigned long vpt_end;
664 unsigned long v_end;
665 unsigned long guest_store_mfn, guest_console_mfn, guest_shared_info_mfn;
666 unsigned long shadow_mode_enabled;
667 uint32_t supported_features[XENFEAT_NR_SUBMAPS] = { 0, };
669 rc = probeimageformat(image, image_size, &load_funcs);
670 if ( rc != 0 )
671 goto error_out;
673 memset(&dsi, 0, sizeof(struct domain_setup_info));
675 rc = (load_funcs.parseimage)(image, image_size, &dsi);
676 if ( rc != 0 )
677 goto error_out;
679 if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
680 {
681 PERROR("Guest OS must load to a page boundary.");
682 goto error_out;
683 }
685 if ( !compat_check(xc_handle, &dsi) )
686 goto error_out;
688 /* Parse and validate kernel features. */
689 if ( (p = xen_elfnote_string(&dsi, XEN_ELFNOTE_FEATURES)) != NULL )
690 {
691 if ( !parse_features(p, supported_features, required_features) )
692 {
693 ERROR("Failed to parse guest kernel features.");
694 goto error_out;
695 }
697 IPRINTF("Supported features = { %08x }.\n", supported_features[0]);
698 IPRINTF("Required features = { %08x }.\n", required_features[0]);
699 }
701 for ( i = 0; i < XENFEAT_NR_SUBMAPS; i++ )
702 {
703 if ( (supported_features[i] & required_features[i]) !=
704 required_features[i] )
705 {
706 ERROR("Guest kernel does not support a required feature.");
707 goto error_out;
708 }
709 }
711 shadow_mode_enabled = test_feature_bit(XENFEAT_auto_translated_physmap,
712 required_features);
714 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
715 {
716 PERROR("Could not allocate memory");
717 goto error_out;
718 }
720 for ( i = 0; i < nr_pages; i++ )
721 page_array[i] = i;
723 if ( xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
724 0, 0, page_array) )
725 {
726 PERROR("Could not allocate memory for PV guest.\n");
727 goto error_out;
728 }
731 if ( shadow_mode_enabled )
732 {
733 /*
734 * Enable shadow translate mode. This must happen after
735 * populate physmap because the p2m reservation is based on
736 * the domains current memory allocation.
737 */
738 if ( xc_shadow_control(xc_handle, dom,
739 XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE,
740 NULL, 0, NULL, 0, NULL) < 0 )
741 {
742 PERROR("Could not enable translation mode");
743 goto error_out;
744 }
746 /* Reinitialise the gpfn->gmfn array. */
747 for ( i = 0; i < nr_pages; i++ )
748 page_array[i] = i;
749 }
751 rc = (load_funcs.loadimage)(image, image_size,
752 xc_handle, dom, page_array,
753 &dsi);
754 if ( rc != 0 )
755 goto error_out;
757 /*
758 * Why do we need this? The number of page-table frames depends on the
759 * size of the bootstrap address space. But the size of the address space
760 * depends on the number of page-table frames (since each one is mapped
761 * read-only). We have a pair of simultaneous equations in two unknowns,
762 * which we solve by exhaustive search.
763 */
764 v_end = round_pgup(dsi.v_end);
765 if ( v_end == 0 )
766 {
767 ERROR("End of mapped kernel image too close to end of memory");
768 goto error_out;
769 }
771 vinitrd_start = v_end;
772 if ( load_initrd(xc_handle, dom, initrd,
773 vinitrd_start - dsi.v_start, page_array) )
774 goto error_out;
775 if ( !increment_ulong(&v_end, round_pgup(initrd->len)) )
776 goto error_out;
778 vphysmap_start = v_end;
779 if ( !increment_ulong(&v_end, round_pgup(nr_pages * sizeof(long))) )
780 goto error_out;
781 vstartinfo_start = v_end;
782 if ( !increment_ulong(&v_end, PAGE_SIZE) )
783 goto error_out;
784 vstoreinfo_start = v_end;
785 if ( !increment_ulong(&v_end, PAGE_SIZE) )
786 goto error_out;
787 vconsole_start = v_end;
788 if ( !increment_ulong(&v_end, PAGE_SIZE) )
789 goto error_out;
790 if ( shadow_mode_enabled ) {
791 vsharedinfo_start = v_end;
792 if ( !increment_ulong(&v_end, PAGE_SIZE) )
793 goto error_out;
794 }
795 vpt_start = v_end;
797 for ( nr_pt_pages = 2; ; nr_pt_pages++ )
798 {
799 /* vpt_end = vpt_staret + (nr_pt_pages * PAGE_SIZE); */
800 vpt_end = vpt_start;
801 if ( !increment_ulong(&vpt_end, nr_pt_pages * PAGE_SIZE) )
802 goto error_out;
804 vstack_start = vpt_end;
805 /* vstack_end = vstack_start + PAGE_SIZE; */
806 vstack_end = vstack_start;
807 if ( !increment_ulong(&vstack_end, PAGE_SIZE) )
808 goto error_out;
810 /* v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1); */
811 v_end = vstack_end;
812 if ( !increment_ulong(&v_end, (1UL<<22)-1) )
813 goto error_out;
814 v_end &= ~((1UL<<22)-1);
816 if ( (v_end - vstack_end) < (512UL << 10) )
817 {
818 /* Add extra 4MB to get >= 512kB padding. */
819 if ( !increment_ulong(&v_end, 1UL << 22) )
820 goto error_out;
821 }
823 #define NR(_l,_h,_s) \
824 (((((unsigned long)(_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
825 ((unsigned long)(_l) & ~((1UL<<(_s))-1))) >> (_s))
826 #if defined(__i386__)
827 if ( dsi.pae_kernel != PAEKERN_no )
828 {
829 if ( (1 + /* # L3 */
830 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT_PAE) + /* # L2 */
831 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT_PAE) + /* # L1 */
832 /* Include a fourth mid-level page directory for Xen. */
833 (v_end <= (3 << L3_PAGETABLE_SHIFT_PAE)))
834 <= nr_pt_pages )
835 break;
836 }
837 else
838 {
839 if ( (1 + /* # L2 */
840 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
841 <= nr_pt_pages )
842 break;
843 }
844 #elif defined(__x86_64__)
845 if ( (1 + /* # L4 */
846 NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
847 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
848 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
849 <= nr_pt_pages )
850 break;
851 #endif
852 }
854 IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n");
855 IPRINTF(" Loaded kernel: %p->%p\n", _p(dsi.v_kernstart),
856 _p(dsi.v_kernend));
857 if ( initrd->len )
858 IPRINTF(" Initial ramdisk: %p->%p\n", _p(vinitrd_start),
859 _p(vinitrd_start + initrd->len));
860 IPRINTF(" Phys-Mach map: %p\n", _p(vphysmap_start));
861 IPRINTF(" Start info: %p\n", _p(vstartinfo_start));
862 IPRINTF(" Store page: %p\n", _p(vstoreinfo_start));
863 IPRINTF(" Console page: %p\n", _p(vconsole_start));
864 if ( shadow_mode_enabled )
865 IPRINTF(" Shared Info page: %p\n", _p(vsharedinfo_start));
866 IPRINTF(" Page tables: %p\n", _p(vpt_start));
867 IPRINTF(" Boot stack: %p\n", _p(vstack_start));
868 IPRINTF(" TOTAL: %p->%p\n", _p(dsi.v_start), _p(v_end));
869 IPRINTF(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
871 if ( ((v_end - dsi.v_start)>>PAGE_SHIFT) > nr_pages )
872 {
873 PERROR("Initial guest OS requires too much space\n"
874 "(%pMB is greater than %luMB limit)\n",
875 _p((v_end-dsi.v_start)>>20), nr_pages>>(20-PAGE_SHIFT));
876 goto error_out;
877 }
879 #if defined(__i386__)
880 if ( dsi.pae_kernel != PAEKERN_no )
881 rc = setup_pg_tables_pae(xc_handle, dom, ctxt,
882 dsi.v_start, v_end,
883 page_array, vpt_start, vpt_end,
884 shadow_mode_enabled, dsi.pae_kernel);
885 else
886 rc = setup_pg_tables(xc_handle, dom, ctxt,
887 dsi.v_start, v_end,
888 page_array, vpt_start, vpt_end,
889 shadow_mode_enabled);
890 #endif
891 #if defined(__x86_64__)
892 rc = setup_pg_tables_64(xc_handle, dom, ctxt,
893 dsi.v_start, v_end,
894 page_array, vpt_start, vpt_end,
895 shadow_mode_enabled);
896 #endif
897 if ( rc != 0 )
898 goto error_out;
900 /*
901 * Pin down l2tab addr as page dir page - causes hypervisor to provide
902 * correct protection for the page
903 */
904 if ( !shadow_mode_enabled )
905 {
906 #if defined(__i386__)
907 if ( dsi.pae_kernel != PAEKERN_no )
908 {
909 if ( pin_table(xc_handle, MMUEXT_PIN_L3_TABLE,
910 xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
911 goto error_out;
912 }
913 else
914 {
915 if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE,
916 xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
917 goto error_out;
918 }
919 #elif defined(__x86_64__)
920 /*
921 * Pin down l4tab addr as page dir page - causes hypervisor to provide
922 * correct protection for the page
923 */
924 if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE,
925 xen_cr3_to_pfn(ctxt->ctrlreg[3]), dom) )
926 goto error_out;
927 #endif
928 }
930 /* Write the phys->machine table entries (machine->phys already done). */
931 physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
932 physmap = physmap_e = xc_map_foreign_range(
933 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
934 page_array[physmap_pfn++]);
935 for ( count = 0; count < nr_pages; count++ )
936 {
937 *physmap_e++ = page_array[count];
938 if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
939 {
940 munmap(physmap, PAGE_SIZE);
941 physmap = physmap_e = xc_map_foreign_range(
942 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
943 page_array[physmap_pfn++]);
944 }
945 }
946 munmap(physmap, PAGE_SIZE);
948 if ( shadow_mode_enabled )
949 {
950 struct xen_add_to_physmap xatp;
952 guest_shared_info_mfn = (vsharedinfo_start-dsi.v_start) >> PAGE_SHIFT;
954 /* Map shared info frame into guest physmap. */
955 xatp.domid = dom;
956 xatp.space = XENMAPSPACE_shared_info;
957 xatp.idx = 0;
958 xatp.gpfn = guest_shared_info_mfn;
959 rc = xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
960 if ( rc != 0 )
961 {
962 PERROR("Cannot map shared info pfn");
963 goto error_out;
964 }
966 /* Map grant table frames into guest physmap. */
967 for ( i = 0; ; i++ )
968 {
969 xatp.domid = dom;
970 xatp.space = XENMAPSPACE_grant_table;
971 xatp.idx = i;
972 xatp.gpfn = nr_pages + i;
973 rc = xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
974 if ( rc != 0 )
975 {
976 if ( errno == EINVAL )
977 break; /* done all grant tables */
978 PERROR("Cannot map grant table pfn");
979 goto error_out;
980 }
981 }
982 }
983 else
984 {
985 guest_shared_info_mfn = shared_info_frame;
986 }
988 *store_mfn = page_array[(vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT];
989 *console_mfn = page_array[(vconsole_start-dsi.v_start) >> PAGE_SHIFT];
990 if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) ||
991 xc_clear_domain_page(xc_handle, dom, *console_mfn) )
992 goto error_out;
993 if ( shadow_mode_enabled )
994 {
995 guest_store_mfn = (vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT;
996 guest_console_mfn = (vconsole_start-dsi.v_start) >> PAGE_SHIFT;
997 }
998 else
999 {
1000 guest_store_mfn = *store_mfn;
1001 guest_console_mfn = *console_mfn;
1004 start_info = xc_map_foreign_range(
1005 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
1006 page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
1007 /*shared_info, start_info */
1008 memset(start_info, 0, sizeof(*start_info));
1009 rc = xc_version(xc_handle, XENVER_version, NULL);
1010 sprintf(start_info->magic, "xen-%i.%i-x86_%d%s",
1011 rc >> 16, rc & (0xFFFF), (unsigned int)sizeof(long)*8,
1012 (dsi.pae_kernel != PAEKERN_no) ? "p" : "");
1013 start_info->nr_pages = nr_pages;
1014 start_info->shared_info = guest_shared_info_mfn << PAGE_SHIFT;
1015 start_info->flags = flags;
1016 start_info->pt_base = vpt_start;
1017 start_info->nr_pt_frames = nr_pt_pages;
1018 start_info->mfn_list = vphysmap_start;
1019 start_info->store_mfn = guest_store_mfn;
1020 start_info->store_evtchn = store_evtchn;
1021 start_info->console.domU.mfn = guest_console_mfn;
1022 start_info->console.domU.evtchn = console_evtchn;
1023 if ( initrd->len != 0 )
1025 start_info->mod_start = vinitrd_start;
1026 start_info->mod_len = initrd->len;
1028 if ( cmdline != NULL )
1030 strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
1031 start_info->cmd_line[MAX_GUEST_CMDLINE-1] = '\0';
1033 munmap(start_info, PAGE_SIZE);
1035 /* shared_info page starts its life empty. */
1036 shared_info = xc_map_foreign_range(
1037 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
1038 memset(shared_info, 0, PAGE_SIZE);
1039 /* Mask all upcalls... */
1040 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
1041 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
1043 munmap(shared_info, PAGE_SIZE);
1045 hypercall_page = xen_elfnote_numeric(&dsi, XEN_ELFNOTE_HYPERCALL_PAGE,
1046 &hypercall_page_defined);
1047 if ( hypercall_page_defined )
1049 unsigned long long pfn = (hypercall_page - dsi.v_start) >> PAGE_SHIFT;
1050 if ( pfn >= nr_pages )
1051 goto error_out;
1052 domctl.domain = (domid_t)dom;
1053 domctl.u.hypercall_init.gmfn = page_array[pfn];
1054 domctl.cmd = XEN_DOMCTL_hypercall_init;
1055 if ( xc_domctl(xc_handle, &domctl) )
1056 goto error_out;
1059 free(page_array);
1061 *pvsi = vstartinfo_start;
1062 *pvss = vstack_start;
1063 *pvke = dsi.v_kernentry;
1065 return 0;
1067 error_out:
1068 free(page_array);
1069 return -1;
1071 #endif
1073 static int xc_linux_build_internal(int xc_handle,
1074 uint32_t domid,
1075 unsigned int mem_mb,
1076 char *image,
1077 unsigned long image_size,
1078 struct initrd_info *initrd,
1079 const char *cmdline,
1080 const char *features,
1081 unsigned long flags,
1082 unsigned int store_evtchn,
1083 unsigned long *store_mfn,
1084 unsigned int console_evtchn,
1085 unsigned long *console_mfn)
1087 struct xen_domctl launch_domctl;
1088 DECLARE_DOMCTL;
1089 int rc;
1090 struct vcpu_guest_context st_ctxt, *ctxt = &st_ctxt;
1091 unsigned long vstartinfo_start, vkern_entry, vstack_start;
1092 uint32_t features_bitmap[XENFEAT_NR_SUBMAPS] = { 0, };
1094 if ( features != NULL )
1096 if ( !parse_features(features, features_bitmap, NULL) )
1098 PERROR("Failed to parse configured features\n");
1099 goto error_out;
1103 memset(ctxt, 0, sizeof(*ctxt));
1105 if ( lock_pages(ctxt, sizeof(*ctxt) ) )
1107 PERROR("%s: ctxt lock failed", __func__);
1108 return 1;
1111 domctl.cmd = XEN_DOMCTL_getdomaininfo;
1112 domctl.domain = (domid_t)domid;
1113 if ( (xc_domctl(xc_handle, &domctl) < 0) ||
1114 ((uint16_t)domctl.domain != domid) )
1116 PERROR("Could not get info on domain");
1117 goto error_out;
1120 if ( setup_guest(xc_handle, domid, image, image_size,
1121 initrd,
1122 mem_mb << (20 - PAGE_SHIFT),
1123 &vstartinfo_start, &vkern_entry,
1124 &vstack_start, ctxt, cmdline,
1125 domctl.u.getdomaininfo.shared_info_frame,
1126 flags, store_evtchn, store_mfn,
1127 console_evtchn, console_mfn,
1128 features_bitmap) < 0 )
1130 goto error_out;
1133 #ifdef __ia64__
1134 /* based on new_thread in xen/arch/ia64/domain.c */
1135 ctxt->user_regs.cr_iip = vkern_entry;
1136 ctxt->user_regs.cr_ifs = 1UL << 63;
1137 ctxt->user_regs.ar_fpsr = xc_ia64_fpsr_default();
1138 #else /* x86 */
1139 /*
1140 * Initial register values:
1141 * DS,ES,FS,GS = FLAT_KERNEL_DS
1142 * CS:EIP = FLAT_KERNEL_CS:start_pc
1143 * SS:ESP = FLAT_KERNEL_DS:start_stack
1144 * ESI = start_info
1145 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
1146 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
1147 */
1148 ctxt->user_regs.ds = FLAT_KERNEL_DS;
1149 ctxt->user_regs.es = FLAT_KERNEL_DS;
1150 ctxt->user_regs.fs = FLAT_KERNEL_DS;
1151 ctxt->user_regs.gs = FLAT_KERNEL_DS;
1152 ctxt->user_regs.ss = FLAT_KERNEL_SS;
1153 ctxt->user_regs.cs = FLAT_KERNEL_CS;
1154 ctxt->user_regs.eip = vkern_entry;
1155 ctxt->user_regs.esp = vstack_start + PAGE_SIZE;
1156 ctxt->user_regs.esi = vstartinfo_start;
1157 ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
1159 ctxt->flags = VGCF_IN_KERNEL;
1161 ctxt->kernel_ss = ctxt->user_regs.ss;
1162 ctxt->kernel_sp = ctxt->user_regs.esp;
1163 #endif /* x86 */
1165 memset(&launch_domctl, 0, sizeof(launch_domctl));
1167 launch_domctl.domain = (domid_t)domid;
1168 launch_domctl.u.vcpucontext.vcpu = 0;
1169 set_xen_guest_handle(launch_domctl.u.vcpucontext.ctxt, ctxt);
1171 launch_domctl.cmd = XEN_DOMCTL_setvcpucontext;
1172 rc = xc_domctl(xc_handle, &launch_domctl);
1174 return rc;
1176 error_out:
1177 return -1;
1180 int xc_linux_build_mem(int xc_handle,
1181 uint32_t domid,
1182 unsigned int mem_mb,
1183 const char *image_buffer,
1184 unsigned long image_size,
1185 const char *initrd,
1186 unsigned long initrd_len,
1187 const char *cmdline,
1188 const char *features,
1189 unsigned long flags,
1190 unsigned int store_evtchn,
1191 unsigned long *store_mfn,
1192 unsigned int console_evtchn,
1193 unsigned long *console_mfn)
1195 int sts;
1196 char *img_buf;
1197 unsigned long img_len;
1198 struct initrd_info initrd_info = { .type = INITRD_none };
1200 /* A kernel buffer is required */
1201 if ( (image_buffer == NULL) || (image_size == 0) )
1203 ERROR("kernel image buffer not present");
1204 return -1;
1207 /* If it's gzipped, inflate it; otherwise, use as is */
1208 /* xc_inflate_buffer may return the same buffer pointer if */
1209 /* the buffer is already inflated */
1210 img_buf = xc_inflate_buffer(image_buffer, image_size, &img_len);
1211 if ( img_buf == NULL )
1213 ERROR("unable to inflate kernel image buffer");
1214 return -1;
1217 /* RAM disks are optional; if we get one, inflate it */
1218 if ( initrd != NULL )
1220 initrd_info.type = INITRD_mem;
1221 initrd_info.u.mem_addr = xc_inflate_buffer(
1222 initrd, initrd_len, &initrd_info.len);
1223 if ( initrd_info.u.mem_addr == NULL )
1225 ERROR("unable to inflate ram disk buffer");
1226 sts = -1;
1227 goto out;
1231 sts = xc_linux_build_internal(xc_handle, domid, mem_mb, img_buf, img_len,
1232 &initrd_info, cmdline, features, flags,
1233 store_evtchn, store_mfn,
1234 console_evtchn, console_mfn);
1236 out:
1237 /* The inflation routines may pass back the same buffer so be */
1238 /* sure that we have a buffer and that it's not the one passed in. */
1239 /* Don't unnecessarily annoy/surprise/confound the caller */
1240 if ( (img_buf != NULL) && (img_buf != image_buffer) )
1241 free(img_buf);
1242 if ( (initrd_info.u.mem_addr != NULL) &&
1243 (initrd_info.u.mem_addr != initrd) )
1244 free(initrd_info.u.mem_addr);
1246 return sts;
1249 int xc_linux_build(int xc_handle,
1250 uint32_t domid,
1251 unsigned int mem_mb,
1252 const char *image_name,
1253 const char *initrd_name,
1254 const char *cmdline,
1255 const char *features,
1256 unsigned long flags,
1257 unsigned int store_evtchn,
1258 unsigned long *store_mfn,
1259 unsigned int console_evtchn,
1260 unsigned long *console_mfn)
1262 char *image = NULL;
1263 unsigned long image_size;
1264 struct initrd_info initrd_info = { .type = INITRD_none };
1265 int fd = -1, sts = -1;
1267 if ( (image_name == NULL) ||
1268 ((image = xc_read_image(image_name, &image_size)) == NULL ))
1269 return -1;
1271 if ( (initrd_name != NULL) && (strlen(initrd_name) != 0) )
1273 initrd_info.type = INITRD_file;
1275 if ( (fd = open(initrd_name, O_RDONLY)) < 0 )
1277 PERROR("Could not open the initial ramdisk image");
1278 goto error_out;
1281 if ( (initrd_info.u.file_handle = gzdopen(fd, "rb")) == NULL )
1283 PERROR("Could not allocate decompression state for initrd");
1284 goto error_out;
1288 sts = xc_linux_build_internal(xc_handle, domid, mem_mb, image, image_size,
1289 &initrd_info, cmdline, features, flags,
1290 store_evtchn, store_mfn,
1291 console_evtchn, console_mfn);
1293 error_out:
1294 free(image);
1295 if ( initrd_info.type == INITRD_file && initrd_info.u.file_handle )
1296 gzclose(initrd_info.u.file_handle);
1297 else if ( fd >= 0 )
1298 close(fd);
1300 return sts;
1303 /*
1304 * Local variables:
1305 * mode: C
1306 * c-set-style: "BSD"
1307 * c-basic-offset: 4
1308 * tab-width: 4
1309 * indent-tabs-mode: nil
1310 * End:
1311 */