direct-io.hg

view tools/libxc/xc_linux_build.c @ 10173:954f4dea9da6

[PAE] Allow pgdirs above 4GB for paravirt guests.
**NOTE**: This obviates the need for lowmem_emergency_pool.
Unpriv guests no longer need to be able to allocate memory
below 4GB for PAE PDPTs.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri May 26 17:22:30 2006 +0100 (2006-05-26)
parents 72c5d8206d48
children 414dabe82a31
line source
1 /******************************************************************************
2 * xc_linux_build.c
3 */
5 #include "xg_private.h"
6 #include "xc_private.h"
7 #include <xenctrl.h>
9 #if defined(__i386__)
10 #define ELFSIZE 32
11 #endif
13 #if defined(__x86_64__) || defined(__ia64__)
14 #define ELFSIZE 64
15 #endif
17 #include "xc_elf.h"
18 #include "xc_aout9.h"
19 #include <stdlib.h>
20 #include <unistd.h>
21 #include <zlib.h>
23 #if defined(__i386__)
24 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
25 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
26 #define L3_PROT (_PAGE_PRESENT)
27 #endif
29 #if defined(__x86_64__)
30 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
31 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
32 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
33 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
34 #endif
36 #ifdef __ia64__
37 #define get_tot_pages xc_get_max_pages
38 #else
39 #define get_tot_pages xc_get_tot_pages
40 #endif
42 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
43 #define round_pgdown(_p) ((_p)&PAGE_MASK)
45 #ifdef __ia64__
46 #define probe_aout9(image,image_size,load_funcs) 1
47 #endif
49 struct initrd_info {
50 enum { INITRD_none, INITRD_file, INITRD_mem } type;
51 unsigned long len;
52 union {
53 gzFile file_handle;
54 char *mem_addr;
55 } u;
56 };
58 static const char *feature_names[XENFEAT_NR_SUBMAPS*32] = {
59 [XENFEAT_writable_page_tables] = "writable_page_tables",
60 [XENFEAT_writable_descriptor_tables] = "writable_descriptor_tables",
61 [XENFEAT_auto_translated_physmap] = "auto_translated_physmap",
62 [XENFEAT_supervisor_mode_kernel] = "supervisor_mode_kernel",
63 [XENFEAT_pae_pgdir_above_4gb] = "pae_pgdir_above_4gb"
64 };
66 static inline void set_feature_bit (int nr, uint32_t *addr)
67 {
68 addr[nr>>5] |= (1<<(nr&31));
69 }
71 static inline int test_feature_bit(int nr, uint32_t *addr)
72 {
73 return !!(addr[nr>>5] & (1<<(nr&31)));
74 }
76 static int parse_features(
77 const char *feats,
78 uint32_t supported[XENFEAT_NR_SUBMAPS],
79 uint32_t required[XENFEAT_NR_SUBMAPS])
80 {
81 const char *end, *p;
82 int i, req;
84 if ( (end = strchr(feats, ',')) == NULL )
85 end = feats + strlen(feats);
87 while ( feats < end )
88 {
89 p = strchr(feats, '|');
90 if ( (p == NULL) || (p > end) )
91 p = end;
93 req = (*feats == '!');
94 if ( req )
95 feats++;
97 for ( i = 0; i < XENFEAT_NR_SUBMAPS*32; i++ )
98 {
99 if ( feature_names[i] == NULL )
100 continue;
102 if ( strncmp(feature_names[i], feats, p-feats) == 0 )
103 {
104 set_feature_bit(i, supported);
105 if ( required && req )
106 set_feature_bit(i, required);
107 break;
108 }
109 }
111 if ( i == XENFEAT_NR_SUBMAPS*32 )
112 {
113 ERROR("Unknown feature \"%.*s\".", (int)(p-feats), feats);
114 if ( req )
115 {
116 ERROR("Kernel requires an unknown hypervisor feature.");
117 return -EINVAL;
118 }
119 }
121 feats = p;
122 if ( *feats == '|' )
123 feats++;
124 }
126 return -EINVAL;
127 }
129 static int probeimageformat(const char *image,
130 unsigned long image_size,
131 struct load_funcs *load_funcs)
132 {
133 if ( probe_elf(image, image_size, load_funcs) &&
134 probe_bin(image, image_size, load_funcs) &&
135 probe_aout9(image, image_size, load_funcs) )
136 {
137 ERROR( "Unrecognized image format" );
138 return -EINVAL;
139 }
141 return 0;
142 }
144 int load_initrd(int xc_handle, domid_t dom,
145 struct initrd_info *initrd,
146 unsigned long physbase,
147 unsigned long *phys_to_mach)
148 {
149 char page[PAGE_SIZE];
150 unsigned long pfn_start, pfn, nr_pages;
152 if ( initrd->type == INITRD_none )
153 return 0;
155 pfn_start = physbase >> PAGE_SHIFT;
156 nr_pages = (initrd->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
158 for ( pfn = pfn_start; pfn < (pfn_start + nr_pages); pfn++ )
159 {
160 if ( initrd->type == INITRD_mem )
161 {
162 xc_copy_to_domain_page(
163 xc_handle, dom, phys_to_mach[pfn],
164 &initrd->u.mem_addr[(pfn - pfn_start) << PAGE_SHIFT]);
165 }
166 else
167 {
168 if ( gzread(initrd->u.file_handle, page, PAGE_SIZE) == -1 )
169 {
170 PERROR("Error reading initrd image, could not");
171 return -EINVAL;
172 }
173 xc_copy_to_domain_page(xc_handle, dom, phys_to_mach[pfn], page);
174 }
175 }
177 return 0;
178 }
180 #define alloc_pt(ltab, vltab, pltab) \
181 do { \
182 pltab = ppt_alloc++; \
183 ltab = (uint64_t)page_array[pltab] << PAGE_SHIFT; \
184 pltab <<= PAGE_SHIFT; \
185 if ( vltab != NULL ) \
186 munmap(vltab, PAGE_SIZE); \
187 if ( (vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, \
188 PROT_READ|PROT_WRITE, \
189 ltab >> PAGE_SHIFT)) == NULL ) \
190 goto error_out; \
191 memset(vltab, 0x0, PAGE_SIZE); \
192 } while ( 0 )
194 #if defined(__i386__)
196 static int setup_pg_tables(int xc_handle, uint32_t dom,
197 vcpu_guest_context_t *ctxt,
198 unsigned long dsi_v_start,
199 unsigned long v_end,
200 unsigned long *page_array,
201 unsigned long vpt_start,
202 unsigned long vpt_end,
203 unsigned shadow_mode_enabled)
204 {
205 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
206 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
207 unsigned long l1tab = 0, pl1tab;
208 unsigned long l2tab = 0, pl2tab;
209 unsigned long ppt_alloc;
210 unsigned long count;
212 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
213 alloc_pt(l2tab, vl2tab, pl2tab);
214 vl2e = &vl2tab[l2_table_offset(dsi_v_start)];
215 if (shadow_mode_enabled)
216 ctxt->ctrlreg[3] = pl2tab;
217 else
218 ctxt->ctrlreg[3] = l2tab;
220 for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++ )
221 {
222 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
223 {
224 alloc_pt(l1tab, vl1tab, pl1tab);
225 vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
226 if (shadow_mode_enabled)
227 *vl2e = pl1tab | L2_PROT;
228 else
229 *vl2e = l1tab | L2_PROT;
230 vl2e++;
231 }
233 if ( shadow_mode_enabled )
234 {
235 *vl1e = (count << PAGE_SHIFT) | L1_PROT;
236 }
237 else
238 {
239 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
240 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
241 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
242 *vl1e &= ~_PAGE_RW;
243 }
244 vl1e++;
245 }
246 munmap(vl1tab, PAGE_SIZE);
247 munmap(vl2tab, PAGE_SIZE);
248 return 0;
250 error_out:
251 if (vl1tab)
252 munmap(vl1tab, PAGE_SIZE);
253 if (vl2tab)
254 munmap(vl2tab, PAGE_SIZE);
255 return -1;
256 }
258 static int setup_pg_tables_pae(int xc_handle, uint32_t dom,
259 vcpu_guest_context_t *ctxt,
260 unsigned long dsi_v_start,
261 unsigned long v_end,
262 unsigned long *page_array,
263 unsigned long vpt_start,
264 unsigned long vpt_end,
265 unsigned shadow_mode_enabled)
266 {
267 l1_pgentry_64_t *vl1tab = NULL, *vl1e = NULL;
268 l2_pgentry_64_t *vl2tab = NULL, *vl2e = NULL;
269 l3_pgentry_64_t *vl3tab = NULL, *vl3e = NULL;
270 uint64_t l1tab, l2tab, l3tab, pl1tab, pl2tab, pl3tab;
271 unsigned long ppt_alloc, count;
273 /* First allocate page for page dir. */
274 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
276 alloc_pt(l3tab, vl3tab, pl3tab);
277 vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
278 if (shadow_mode_enabled)
279 ctxt->ctrlreg[3] = pl3tab;
280 else
281 ctxt->ctrlreg[3] = l3tab;
283 for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++)
284 {
285 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
286 {
287 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
288 {
289 alloc_pt(l2tab, vl2tab, pl2tab);
290 vl2e = &vl2tab[l2_table_offset_pae(
291 dsi_v_start + (count << PAGE_SHIFT))];
292 if (shadow_mode_enabled)
293 *vl3e = pl2tab | L3_PROT;
294 else
295 *vl3e++ = l2tab | L3_PROT;
296 }
298 alloc_pt(l1tab, vl1tab, pl1tab);
299 vl1e = &vl1tab[l1_table_offset_pae(
300 dsi_v_start + (count << PAGE_SHIFT))];
301 if (shadow_mode_enabled)
302 *vl2e = pl1tab | L2_PROT;
303 else
304 *vl2e++ = l1tab | L2_PROT;
305 }
307 if ( shadow_mode_enabled )
308 {
309 *vl1e = (count << PAGE_SHIFT) | L1_PROT;
310 }
311 else
312 {
313 *vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
314 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
315 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
316 *vl1e &= ~_PAGE_RW;
317 }
318 vl1e++;
319 }
321 /* Xen requires a mid-level pgdir mapping 0xC0000000 region. */
322 if ( (vl3tab[3] & _PAGE_PRESENT) == 0 )
323 {
324 alloc_pt(l2tab, vl2tab, pl2tab);
325 vl3tab[3] = l2tab | L3_PROT;
326 }
328 munmap(vl1tab, PAGE_SIZE);
329 munmap(vl2tab, PAGE_SIZE);
330 munmap(vl3tab, PAGE_SIZE);
331 return 0;
333 error_out:
334 if (vl1tab)
335 munmap(vl1tab, PAGE_SIZE);
336 if (vl2tab)
337 munmap(vl2tab, PAGE_SIZE);
338 if (vl3tab)
339 munmap(vl3tab, PAGE_SIZE);
340 return -1;
341 }
343 #endif
345 #if defined(__x86_64__)
347 static int setup_pg_tables_64(int xc_handle, uint32_t dom,
348 vcpu_guest_context_t *ctxt,
349 unsigned long dsi_v_start,
350 unsigned long v_end,
351 unsigned long *page_array,
352 unsigned long vpt_start,
353 unsigned long vpt_end,
354 int shadow_mode_enabled)
355 {
356 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
357 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
358 l3_pgentry_t *vl3tab=NULL, *vl3e=NULL;
359 l4_pgentry_t *vl4tab=NULL, *vl4e=NULL;
360 unsigned long l2tab = 0, pl2tab;
361 unsigned long l1tab = 0, pl1tab;
362 unsigned long l3tab = 0, pl3tab;
363 unsigned long l4tab = 0, pl4tab;
364 unsigned long ppt_alloc;
365 unsigned long count;
367 /* First allocate page for page dir. */
368 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
369 alloc_pt(l4tab, vl4tab, pl4tab);
370 vl4e = &vl4tab[l4_table_offset(dsi_v_start)];
371 if (shadow_mode_enabled)
372 ctxt->ctrlreg[3] = pl4tab;
373 else
374 ctxt->ctrlreg[3] = l4tab;
376 for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
377 {
378 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
379 {
380 alloc_pt(l1tab, vl1tab, pl1tab);
382 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
383 {
384 alloc_pt(l2tab, vl2tab, pl2tab);
385 if ( !((unsigned long)vl3e & (PAGE_SIZE-1)) )
386 {
387 alloc_pt(l3tab, vl3tab, pl3tab);
388 vl3e = &vl3tab[l3_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
389 if (shadow_mode_enabled)
390 *vl4e = pl3tab | L4_PROT;
391 else
392 *vl4e = l3tab | L4_PROT;
393 vl4e++;
394 }
395 vl2e = &vl2tab[l2_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
396 if (shadow_mode_enabled)
397 *vl3e = pl2tab | L3_PROT;
398 else
399 *vl3e = l2tab | L3_PROT;
400 vl3e++;
401 }
402 vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
403 if (shadow_mode_enabled)
404 *vl2e = pl1tab | L2_PROT;
405 else
406 *vl2e = l1tab | L2_PROT;
407 vl2e++;
408 }
410 if ( shadow_mode_enabled )
411 {
412 *vl1e = (count << PAGE_SHIFT) | L1_PROT;
413 }
414 else
415 {
416 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
417 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
418 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
419 {
420 *vl1e &= ~_PAGE_RW;
421 }
422 }
423 vl1e++;
424 }
426 munmap(vl1tab, PAGE_SIZE);
427 munmap(vl2tab, PAGE_SIZE);
428 munmap(vl3tab, PAGE_SIZE);
429 munmap(vl4tab, PAGE_SIZE);
430 return 0;
432 error_out:
433 if (vl1tab)
434 munmap(vl1tab, PAGE_SIZE);
435 if (vl2tab)
436 munmap(vl2tab, PAGE_SIZE);
437 if (vl3tab)
438 munmap(vl3tab, PAGE_SIZE);
439 if (vl4tab)
440 munmap(vl4tab, PAGE_SIZE);
441 return -1;
442 }
443 #endif
445 #ifdef __ia64__
446 extern unsigned long xc_ia64_fpsr_default(void);
448 static int setup_guest(int xc_handle,
449 uint32_t dom,
450 const char *image, unsigned long image_size,
451 struct initrd_info *initrd,
452 unsigned long nr_pages,
453 unsigned long *pvsi, unsigned long *pvke,
454 unsigned long *pvss, vcpu_guest_context_t *ctxt,
455 const char *cmdline,
456 unsigned long shared_info_frame,
457 unsigned long flags,
458 unsigned int store_evtchn, unsigned long *store_mfn,
459 unsigned int console_evtchn, unsigned long *console_mfn,
460 uint32_t required_features[XENFEAT_NR_SUBMAPS])
461 {
462 unsigned long *page_array = NULL;
463 struct load_funcs load_funcs;
464 struct domain_setup_info dsi;
465 unsigned long vinitrd_start;
466 unsigned long vinitrd_end;
467 unsigned long v_end;
468 unsigned long start_page, pgnr;
469 start_info_t *start_info;
470 int rc;
472 rc = probeimageformat(image, image_size, &load_funcs);
473 if ( rc != 0 )
474 goto error_out;
476 memset(&dsi, 0, sizeof(struct domain_setup_info));
478 rc = (load_funcs.parseimage)(image, image_size, &dsi);
479 if ( rc != 0 )
480 goto error_out;
482 dsi.v_start = round_pgdown(dsi.v_start);
483 vinitrd_start = round_pgup(dsi.v_end);
484 vinitrd_end = vinitrd_start + initrd->len;
485 v_end = round_pgup(vinitrd_end);
487 start_page = dsi.v_start >> PAGE_SHIFT;
488 pgnr = (v_end - dsi.v_start) >> PAGE_SHIFT;
489 if ( (page_array = malloc(pgnr * sizeof(unsigned long))) == NULL )
490 {
491 PERROR("Could not allocate memory");
492 goto error_out;
493 }
495 if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array,
496 start_page, pgnr) != pgnr )
497 {
498 PERROR("Could not get the page frame list");
499 goto error_out;
500 }
502 #define _p(a) ((void *) (a))
504 printf("VIRTUAL MEMORY ARRANGEMENT:\n"
505 " Loaded kernel: %p->%p\n"
506 " Init. ramdisk: %p->%p\n"
507 " TOTAL: %p->%p\n",
508 _p(dsi.v_kernstart), _p(dsi.v_kernend),
509 _p(vinitrd_start), _p(vinitrd_end),
510 _p(dsi.v_start), _p(v_end));
511 printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
513 (load_funcs.loadimage)(image, image_size, xc_handle, dom, page_array,
514 &dsi);
516 if ( load_initrd(xc_handle, dom, initrd,
517 vinitrd_start - dsi.v_start, page_array) )
518 goto error_out;
520 *pvke = dsi.v_kernentry;
522 /* Now need to retrieve machine pfn for system pages:
523 * start_info/store/console
524 */
525 pgnr = 3;
526 if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array,
527 nr_pages - 3, pgnr) != pgnr )
528 {
529 PERROR("Could not get page frame for xenstore");
530 goto error_out;
531 }
533 *store_mfn = page_array[1];
534 *console_mfn = page_array[2];
535 printf("start_info: 0x%lx at 0x%lx, "
536 "store_mfn: 0x%lx at 0x%lx, "
537 "console_mfn: 0x%lx at 0x%lx\n",
538 page_array[0], nr_pages,
539 *store_mfn, nr_pages - 2,
540 *console_mfn, nr_pages - 1);
542 start_info = xc_map_foreign_range(
543 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[0]);
544 memset(start_info, 0, sizeof(*start_info));
545 rc = xc_version(xc_handle, XENVER_version, NULL);
546 sprintf(start_info->magic, "xen-%i.%i-ia64", rc >> 16, rc & (0xFFFF));
547 start_info->flags = flags;
548 start_info->store_mfn = nr_pages - 2;
549 start_info->store_evtchn = store_evtchn;
550 start_info->console_mfn = nr_pages - 1;
551 start_info->console_evtchn = console_evtchn;
552 start_info->nr_pages = nr_pages; // FIXME?: nr_pages - 2 ????
553 if ( initrd->len != 0 )
554 {
555 ctxt->initrd.start = vinitrd_start;
556 ctxt->initrd.size = initrd->len;
557 }
558 else
559 {
560 ctxt->initrd.start = 0;
561 ctxt->initrd.size = 0;
562 }
563 if ( cmdline != NULL )
564 {
565 strncpy((char *)ctxt->cmdline, cmdline, IA64_COMMAND_LINE_SIZE);
566 ctxt->cmdline[IA64_COMMAND_LINE_SIZE-1] = '\0';
567 }
568 munmap(start_info, PAGE_SIZE);
570 free(page_array);
571 return 0;
573 error_out:
574 free(page_array);
575 return -1;
576 }
577 #else /* x86 */
579 /* Check if the platform supports the guest kernel format */
580 static int compat_check(int xc_handle, struct domain_setup_info *dsi)
581 {
582 xen_capabilities_info_t xen_caps = "";
584 if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0) {
585 ERROR("Cannot determine host capabilities.");
586 return 0;
587 }
589 if (strstr(xen_caps, "xen-3.0-x86_32p")) {
590 if (!dsi->pae_kernel) {
591 ERROR("Non PAE-kernel on PAE host.");
592 return 0;
593 }
594 } else if (dsi->pae_kernel) {
595 ERROR("PAE-kernel on non-PAE host.");
596 return 0;
597 }
599 return 1;
600 }
603 static int setup_guest(int xc_handle,
604 uint32_t dom,
605 const char *image, unsigned long image_size,
606 struct initrd_info *initrd,
607 unsigned long nr_pages,
608 unsigned long *pvsi, unsigned long *pvke,
609 unsigned long *pvss, vcpu_guest_context_t *ctxt,
610 const char *cmdline,
611 unsigned long shared_info_frame,
612 unsigned long flags,
613 unsigned int store_evtchn, unsigned long *store_mfn,
614 unsigned int console_evtchn, unsigned long *console_mfn,
615 uint32_t required_features[XENFEAT_NR_SUBMAPS])
616 {
617 unsigned long *page_array = NULL;
618 unsigned long count, i, hypercall_pfn;
619 start_info_t *start_info;
620 shared_info_t *shared_info;
621 xc_mmu_t *mmu = NULL;
622 char *p;
623 DECLARE_DOM0_OP;
624 int rc;
626 unsigned long nr_pt_pages;
627 unsigned long physmap_pfn;
628 unsigned long *physmap, *physmap_e;
630 struct load_funcs load_funcs;
631 struct domain_setup_info dsi;
632 unsigned long vinitrd_start;
633 unsigned long vphysmap_start;
634 unsigned long vstartinfo_start;
635 unsigned long vstoreinfo_start;
636 unsigned long vconsole_start;
637 unsigned long vsharedinfo_start = 0; /* XXX gcc */
638 unsigned long vstack_start;
639 unsigned long vstack_end;
640 unsigned long vpt_start;
641 unsigned long vpt_end;
642 unsigned long v_end;
643 unsigned long guest_store_mfn, guest_console_mfn, guest_shared_info_mfn;
644 unsigned long shadow_mode_enabled;
645 uint32_t supported_features[XENFEAT_NR_SUBMAPS] = { 0, };
647 rc = probeimageformat(image, image_size, &load_funcs);
648 if ( rc != 0 )
649 goto error_out;
651 memset(&dsi, 0, sizeof(struct domain_setup_info));
653 rc = (load_funcs.parseimage)(image, image_size, &dsi);
654 if ( rc != 0 )
655 goto error_out;
657 if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
658 {
659 PERROR("Guest OS must load to a page boundary.");
660 goto error_out;
661 }
663 if (!compat_check(xc_handle, &dsi))
664 goto error_out;
666 /* Parse and validate kernel features. */
667 p = strstr(dsi.xen_guest_string, "FEATURES=");
668 if ( p != NULL )
669 {
670 if ( !parse_features(p + strlen("FEATURES="),
671 supported_features,
672 required_features) )
673 {
674 ERROR("Failed to parse guest kernel features.");
675 goto error_out;
676 }
678 printf("Supported features = { %08x }.\n", supported_features[0]);
679 printf("Required features = { %08x }.\n", required_features[0]);
680 }
682 for ( i = 0; i < XENFEAT_NR_SUBMAPS; i++ )
683 {
684 if ( (supported_features[i]&required_features[i]) != required_features[i] )
685 {
686 ERROR("Guest kernel does not support a required feature.");
687 goto error_out;
688 }
689 }
691 shadow_mode_enabled = test_feature_bit(XENFEAT_auto_translated_physmap,
692 required_features);
694 /*
695 * Why do we need this? The number of page-table frames depends on the
696 * size of the bootstrap address space. But the size of the address space
697 * depends on the number of page-table frames (since each one is mapped
698 * read-only). We have a pair of simultaneous equations in two unknowns,
699 * which we solve by exhaustive search.
700 */
701 v_end = round_pgup(dsi.v_end);
702 vinitrd_start = v_end;
703 v_end += round_pgup(initrd->len);
704 vphysmap_start = v_end;
705 v_end += round_pgup(nr_pages * sizeof(unsigned long));
706 vstartinfo_start = v_end;
707 v_end += PAGE_SIZE;
708 vstoreinfo_start = v_end;
709 v_end += PAGE_SIZE;
710 vconsole_start = v_end;
711 v_end += PAGE_SIZE;
712 if ( shadow_mode_enabled ) {
713 vsharedinfo_start = v_end;
714 v_end += PAGE_SIZE;
715 }
716 vpt_start = v_end;
718 for ( nr_pt_pages = 2; ; nr_pt_pages++ )
719 {
720 vpt_end = vpt_start + (nr_pt_pages * PAGE_SIZE);
721 vstack_start = vpt_end;
722 vstack_end = vstack_start + PAGE_SIZE;
723 v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
724 if ( (v_end - vstack_end) < (512UL << 10) )
725 v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
726 #define NR(_l,_h,_s) \
727 (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
728 ((_l) & ~((1UL<<(_s))-1))) >> (_s))
729 #if defined(__i386__)
730 if ( dsi.pae_kernel )
731 {
732 if ( (1 + /* # L3 */
733 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT_PAE) + /* # L2 */
734 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT_PAE) + /* # L1 */
735 /* Include a fourth mid-level page directory for Xen. */
736 (v_end <= (3 << L3_PAGETABLE_SHIFT_PAE)))
737 <= nr_pt_pages )
738 break;
739 }
740 else
741 {
742 if ( (1 + /* # L2 */
743 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
744 <= nr_pt_pages )
745 break;
746 }
747 #elif defined(__x86_64__)
748 if ( (1 + /* # L4 */
749 NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
750 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
751 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
752 <= nr_pt_pages )
753 break;
754 #endif
755 }
757 #define _p(a) ((void *) (a))
759 printf("VIRTUAL MEMORY ARRANGEMENT:\n");
760 printf(" Loaded kernel: %p->%p\n", _p(dsi.v_kernstart),
761 _p(dsi.v_kernend));
762 if ( initrd->len )
763 printf(" Initial ramdisk: %p->%p\n", _p(vinitrd_start),
764 _p(vinitrd_start + initrd->len));
765 printf(" Phys-Mach map: %p\n", _p(vphysmap_start));
766 printf(" Start info: %p\n", _p(vstartinfo_start));
767 printf(" Store page: %p\n", _p(vstoreinfo_start));
768 printf(" Console page: %p\n", _p(vconsole_start));
769 if ( shadow_mode_enabled )
770 printf(" Shared Info page: %p\n", _p(vsharedinfo_start));
771 printf(" Page tables: %p\n", _p(vpt_start));
772 printf(" Boot stack: %p\n", _p(vstack_start));
773 printf(" TOTAL: %p->%p\n", _p(dsi.v_start), _p(v_end));
774 printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
776 if ( ((v_end - dsi.v_start)>>PAGE_SHIFT) > nr_pages )
777 {
778 PERROR("Initial guest OS requires too much space\n"
779 "(%luMB is greater than %luMB limit)\n",
780 (v_end-dsi.v_start)>>20, nr_pages>>(20-PAGE_SHIFT));
781 goto error_out;
782 }
784 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
785 {
786 PERROR("Could not allocate memory");
787 goto error_out;
788 }
790 if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
791 {
792 PERROR("Could not get the page frame list");
793 goto error_out;
794 }
796 rc = (load_funcs.loadimage)(image, image_size,
797 xc_handle, dom, page_array,
798 &dsi);
799 if ( rc != 0 )
800 goto error_out;
802 if ( load_initrd(xc_handle, dom, initrd,
803 vinitrd_start - dsi.v_start, page_array) )
804 goto error_out;
806 /* setup page tables */
807 #if defined(__i386__)
808 if (dsi.pae_kernel)
809 rc = setup_pg_tables_pae(xc_handle, dom, ctxt,
810 dsi.v_start, v_end,
811 page_array, vpt_start, vpt_end,
812 shadow_mode_enabled);
813 else
814 rc = setup_pg_tables(xc_handle, dom, ctxt,
815 dsi.v_start, v_end,
816 page_array, vpt_start, vpt_end,
817 shadow_mode_enabled);
818 #endif
819 #if defined(__x86_64__)
820 rc = setup_pg_tables_64(xc_handle, dom, ctxt,
821 dsi.v_start, v_end,
822 page_array, vpt_start, vpt_end,
823 shadow_mode_enabled);
824 #endif
825 if (0 != rc)
826 goto error_out;
828 #if defined(__i386__)
829 /*
830 * Pin down l2tab addr as page dir page - causes hypervisor to provide
831 * correct protection for the page
832 */
833 if ( !shadow_mode_enabled )
834 {
835 if ( dsi.pae_kernel )
836 {
837 if ( pin_table(xc_handle, MMUEXT_PIN_L3_TABLE,
838 ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
839 goto error_out;
840 }
841 else
842 {
843 if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE,
844 ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
845 goto error_out;
846 }
847 }
848 #endif
850 #if defined(__x86_64__)
851 /*
852 * Pin down l4tab addr as page dir page - causes hypervisor to provide
853 * correct protection for the page
854 */
855 if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE,
856 ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
857 goto error_out;
858 #endif
860 if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
861 goto error_out;
863 /* Write the phys->machine and machine->phys table entries. */
864 physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
865 physmap = physmap_e = xc_map_foreign_range(
866 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
867 page_array[physmap_pfn++]);
869 for ( count = 0; count < nr_pages; count++ )
870 {
871 if ( xc_add_mmu_update(
872 xc_handle, mmu,
873 ((uint64_t)page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
874 count) )
875 {
876 fprintf(stderr,"m2p update failure p=%lx m=%lx\n",
877 count, page_array[count]);
878 munmap(physmap, PAGE_SIZE);
879 goto error_out;
880 }
881 *physmap_e++ = page_array[count];
882 if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
883 {
884 munmap(physmap, PAGE_SIZE);
885 physmap = physmap_e = xc_map_foreign_range(
886 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
887 page_array[physmap_pfn++]);
888 }
889 }
890 munmap(physmap, PAGE_SIZE);
892 /* Send the page update requests down to the hypervisor. */
893 if ( xc_finish_mmu_updates(xc_handle, mmu) )
894 goto error_out;
896 if ( shadow_mode_enabled )
897 {
898 struct xen_add_to_physmap xatp;
900 /* Enable shadow translate mode */
901 if ( xc_shadow_control(xc_handle, dom,
902 DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE,
903 NULL, 0, NULL) < 0 )
904 {
905 PERROR("Could not enable translation mode");
906 goto error_out;
907 }
909 guest_shared_info_mfn = (vsharedinfo_start-dsi.v_start) >> PAGE_SHIFT;
911 /* Map shared info frame into guest physmap. */
912 xatp.domid = dom;
913 xatp.space = XENMAPSPACE_shared_info;
914 xatp.idx = 0;
915 xatp.gpfn = guest_shared_info_mfn;
916 rc = xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
917 if ( rc != 0 )
918 {
919 PERROR("Cannot map shared info pfn");
920 goto error_out;
921 }
923 /* Map grant table frames into guest physmap. */
924 for ( i = 0; ; i++ )
925 {
926 xatp.domid = dom;
927 xatp.space = XENMAPSPACE_grant_table;
928 xatp.idx = i;
929 xatp.gpfn = nr_pages + i;
930 rc = xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
931 if ( rc != 0 )
932 {
933 if ( errno == EINVAL )
934 break; /* done all grant tables */
935 PERROR("Cannot map grant table pfn");
936 goto error_out;
937 }
938 }
939 }
940 else
941 {
942 guest_shared_info_mfn = shared_info_frame;
943 }
945 *store_mfn = page_array[(vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT];
946 *console_mfn = page_array[(vconsole_start-dsi.v_start) >> PAGE_SHIFT];
947 if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) ||
948 xc_clear_domain_page(xc_handle, dom, *console_mfn) )
949 goto error_out;
950 if ( shadow_mode_enabled )
951 {
952 guest_store_mfn = (vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT;
953 guest_console_mfn = (vconsole_start-dsi.v_start) >> PAGE_SHIFT;
954 }
955 else
956 {
957 guest_store_mfn = *store_mfn;
958 guest_console_mfn = *console_mfn;
959 }
961 start_info = xc_map_foreign_range(
962 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
963 page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
964 /*shared_info, start_info */
965 memset(start_info, 0, sizeof(*start_info));
966 rc = xc_version(xc_handle, XENVER_version, NULL);
967 sprintf(start_info->magic, "xen-%i.%i-x86_%d%s",
968 rc >> 16, rc & (0xFFFF), (unsigned int)sizeof(long)*8,
969 dsi.pae_kernel ? "p" : "");
970 start_info->nr_pages = nr_pages;
971 start_info->shared_info = guest_shared_info_mfn << PAGE_SHIFT;
972 start_info->flags = flags;
973 start_info->pt_base = vpt_start;
974 start_info->nr_pt_frames = nr_pt_pages;
975 start_info->mfn_list = vphysmap_start;
976 start_info->store_mfn = guest_store_mfn;
977 start_info->store_evtchn = store_evtchn;
978 start_info->console_mfn = guest_console_mfn;
979 start_info->console_evtchn = console_evtchn;
980 if ( initrd->len != 0 )
981 {
982 start_info->mod_start = vinitrd_start;
983 start_info->mod_len = initrd->len;
984 }
985 if ( cmdline != NULL )
986 {
987 strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
988 start_info->cmd_line[MAX_GUEST_CMDLINE-1] = '\0';
989 }
990 munmap(start_info, PAGE_SIZE);
992 /* shared_info page starts its life empty. */
993 shared_info = xc_map_foreign_range(
994 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
995 memset(shared_info, 0, sizeof(shared_info_t));
996 /* Mask all upcalls... */
997 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
998 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
1000 munmap(shared_info, PAGE_SIZE);
1002 /* Send the page update requests down to the hypervisor. */
1003 if ( xc_finish_mmu_updates(xc_handle, mmu) )
1004 goto error_out;
1006 p = strstr(dsi.xen_guest_string, "HYPERCALL_PAGE=");
1007 if ( p != NULL )
1009 p += strlen("HYPERCALL_PAGE=");
1010 hypercall_pfn = strtoul(p, NULL, 16);
1011 if ( hypercall_pfn >= nr_pages )
1012 goto error_out;
1013 op.u.hypercall_init.domain = (domid_t)dom;
1014 op.u.hypercall_init.mfn = page_array[hypercall_pfn];
1015 op.cmd = DOM0_HYPERCALL_INIT;
1016 if ( xc_dom0_op(xc_handle, &op) )
1017 goto error_out;
1020 free(mmu);
1021 free(page_array);
1023 *pvsi = vstartinfo_start;
1024 *pvss = vstack_start;
1025 *pvke = dsi.v_kernentry;
1027 return 0;
1029 error_out:
1030 free(mmu);
1031 free(page_array);
1032 return -1;
1034 #endif
1036 static int xc_linux_build_internal(int xc_handle,
1037 uint32_t domid,
1038 char *image,
1039 unsigned long image_size,
1040 struct initrd_info *initrd,
1041 const char *cmdline,
1042 const char *features,
1043 unsigned long flags,
1044 unsigned int store_evtchn,
1045 unsigned long *store_mfn,
1046 unsigned int console_evtchn,
1047 unsigned long *console_mfn)
1049 dom0_op_t launch_op;
1050 DECLARE_DOM0_OP;
1051 int rc, i;
1052 vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
1053 unsigned long nr_pages;
1054 unsigned long vstartinfo_start, vkern_entry, vstack_start;
1055 uint32_t features_bitmap[XENFEAT_NR_SUBMAPS] = { 0, };
1057 if ( features != NULL )
1059 if ( !parse_features(features, features_bitmap, NULL) )
1061 PERROR("Failed to parse configured features\n");
1062 goto error_out;
1066 if ( (nr_pages = get_tot_pages(xc_handle, domid)) < 0 )
1068 PERROR("Could not find total pages for domain");
1069 goto error_out;
1072 #ifdef VALGRIND
1073 memset(&st_ctxt, 0, sizeof(st_ctxt));
1074 #endif
1076 if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
1078 PERROR("%s: ctxt mlock failed", __func__);
1079 return 1;
1082 op.cmd = DOM0_GETDOMAININFO;
1083 op.u.getdomaininfo.domain = (domid_t)domid;
1084 if ( (xc_dom0_op(xc_handle, &op) < 0) ||
1085 ((uint16_t)op.u.getdomaininfo.domain != domid) )
1087 PERROR("Could not get info on domain");
1088 goto error_out;
1091 memset(ctxt, 0, sizeof(*ctxt));
1093 if ( setup_guest(xc_handle, domid, image, image_size,
1094 initrd,
1095 nr_pages,
1096 &vstartinfo_start, &vkern_entry,
1097 &vstack_start, ctxt, cmdline,
1098 op.u.getdomaininfo.shared_info_frame,
1099 flags, store_evtchn, store_mfn,
1100 console_evtchn, console_mfn,
1101 features_bitmap) < 0 )
1103 ERROR("Error constructing guest OS");
1104 goto error_out;
1107 #ifdef __ia64__
1108 /* based on new_thread in xen/arch/ia64/domain.c */
1109 ctxt->flags = 0;
1110 ctxt->shared.flags = flags;
1111 ctxt->shared.start_info_pfn = nr_pages - 3; /* metaphysical */
1112 ctxt->regs.cr_ipsr = 0; /* all necessary bits filled by hypervisor */
1113 ctxt->regs.cr_iip = vkern_entry;
1114 ctxt->regs.cr_ifs = 1UL << 63;
1115 ctxt->regs.ar_fpsr = xc_ia64_fpsr_default();
1116 /* currently done by hypervisor, should move here */
1117 /* ctxt->regs.r28 = dom_fw_setup(); */
1118 ctxt->privregs = 0;
1119 ctxt->sys_pgnr = 3;
1120 i = 0; /* silence unused variable warning */
1121 #else /* x86 */
1122 /*
1123 * Initial register values:
1124 * DS,ES,FS,GS = FLAT_KERNEL_DS
1125 * CS:EIP = FLAT_KERNEL_CS:start_pc
1126 * SS:ESP = FLAT_KERNEL_DS:start_stack
1127 * ESI = start_info
1128 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
1129 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
1130 */
1131 ctxt->user_regs.ds = FLAT_KERNEL_DS;
1132 ctxt->user_regs.es = FLAT_KERNEL_DS;
1133 ctxt->user_regs.fs = FLAT_KERNEL_DS;
1134 ctxt->user_regs.gs = FLAT_KERNEL_DS;
1135 ctxt->user_regs.ss = FLAT_KERNEL_SS;
1136 ctxt->user_regs.cs = FLAT_KERNEL_CS;
1137 ctxt->user_regs.eip = vkern_entry;
1138 ctxt->user_regs.esp = vstack_start + PAGE_SIZE;
1139 ctxt->user_regs.esi = vstartinfo_start;
1140 ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
1142 ctxt->flags = VGCF_IN_KERNEL;
1144 /* FPU is set up to default initial state. */
1145 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
1147 /* Virtual IDT is empty at start-of-day. */
1148 for ( i = 0; i < 256; i++ )
1150 ctxt->trap_ctxt[i].vector = i;
1151 ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
1154 /* No LDT. */
1155 ctxt->ldt_ents = 0;
1157 /* Use the default Xen-provided GDT. */
1158 ctxt->gdt_ents = 0;
1160 /* Ring 1 stack is the initial stack. */
1161 ctxt->kernel_ss = FLAT_KERNEL_SS;
1162 ctxt->kernel_sp = vstack_start + PAGE_SIZE;
1164 /* No debugging. */
1165 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
1167 /* No callback handlers. */
1168 #if defined(__i386__)
1169 ctxt->event_callback_cs = FLAT_KERNEL_CS;
1170 ctxt->event_callback_eip = 0;
1171 ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
1172 ctxt->failsafe_callback_eip = 0;
1173 #elif defined(__x86_64__)
1174 ctxt->event_callback_eip = 0;
1175 ctxt->failsafe_callback_eip = 0;
1176 ctxt->syscall_callback_eip = 0;
1177 #endif
1178 #endif /* x86 */
1180 memset( &launch_op, 0, sizeof(launch_op) );
1182 launch_op.u.setvcpucontext.domain = (domid_t)domid;
1183 launch_op.u.setvcpucontext.vcpu = 0;
1184 set_xen_guest_handle(launch_op.u.setvcpucontext.ctxt, ctxt);
1186 launch_op.cmd = DOM0_SETVCPUCONTEXT;
1187 rc = xc_dom0_op(xc_handle, &launch_op);
1189 return rc;
1191 error_out:
1192 return -1;
1195 int xc_linux_build_mem(int xc_handle,
1196 uint32_t domid,
1197 const char *image_buffer,
1198 unsigned long image_size,
1199 const char *initrd,
1200 unsigned long initrd_len,
1201 const char *cmdline,
1202 const char *features,
1203 unsigned long flags,
1204 unsigned int store_evtchn,
1205 unsigned long *store_mfn,
1206 unsigned int console_evtchn,
1207 unsigned long *console_mfn)
1209 int sts;
1210 char *img_buf;
1211 unsigned long img_len;
1212 struct initrd_info initrd_info = { .type = INITRD_none };
1214 /* A kernel buffer is required */
1215 if ( (image_buffer == NULL) || (image_size == 0) )
1217 ERROR("kernel image buffer not present");
1218 return -1;
1221 /* If it's gzipped, inflate it; otherwise, use as is */
1222 /* xc_inflate_buffer may return the same buffer pointer if */
1223 /* the buffer is already inflated */
1224 img_buf = xc_inflate_buffer(image_buffer, image_size, &img_len);
1225 if ( img_buf == NULL )
1227 ERROR("unable to inflate kernel image buffer");
1228 return -1;
1231 /* RAM disks are optional; if we get one, inflate it */
1232 if ( initrd != NULL )
1234 initrd_info.type = INITRD_mem;
1235 initrd_info.u.mem_addr = xc_inflate_buffer(
1236 initrd, initrd_len, &initrd_info.len);
1237 if ( initrd_info.u.mem_addr == NULL )
1239 ERROR("unable to inflate ram disk buffer");
1240 sts = -1;
1241 goto out;
1245 sts = xc_linux_build_internal(xc_handle, domid, img_buf, img_len,
1246 &initrd_info, cmdline, features, flags,
1247 store_evtchn, store_mfn,
1248 console_evtchn, console_mfn);
1250 out:
1251 /* The inflation routines may pass back the same buffer so be */
1252 /* sure that we have a buffer and that it's not the one passed in. */
1253 /* Don't unnecessarily annoy/surprise/confound the caller */
1254 if ( (img_buf != NULL) && (img_buf != image_buffer) )
1255 free(img_buf);
1256 if ( (initrd_info.u.mem_addr != NULL) &&
1257 (initrd_info.u.mem_addr != initrd) )
1258 free(initrd_info.u.mem_addr);
1260 return sts;
1263 int xc_linux_build(int xc_handle,
1264 uint32_t domid,
1265 const char *image_name,
1266 const char *initrd_name,
1267 const char *cmdline,
1268 const char *features,
1269 unsigned long flags,
1270 unsigned int store_evtchn,
1271 unsigned long *store_mfn,
1272 unsigned int console_evtchn,
1273 unsigned long *console_mfn)
1275 char *image = NULL;
1276 unsigned long image_size;
1277 struct initrd_info initrd_info = { .type = INITRD_none };
1278 int fd = -1, sts = -1;
1280 if ( (image_name == NULL) ||
1281 ((image = xc_read_image(image_name, &image_size)) == NULL ))
1282 return -1;
1284 if ( (initrd_name != NULL) && (strlen(initrd_name) != 0) )
1286 initrd_info.type = INITRD_file;
1288 if ( (fd = open(initrd_name, O_RDONLY)) < 0 )
1290 PERROR("Could not open the initial ramdisk image");
1291 goto error_out;
1294 initrd_info.len = xc_get_filesz(fd);
1295 if ( (initrd_info.u.file_handle = gzdopen(fd, "rb")) == NULL )
1297 PERROR("Could not allocate decompression state for initrd");
1298 goto error_out;
1302 sts = xc_linux_build_internal(xc_handle, domid, image, image_size,
1303 &initrd_info, cmdline, features, flags,
1304 store_evtchn, store_mfn,
1305 console_evtchn, console_mfn);
1307 error_out:
1308 free(image);
1309 if ( fd >= 0 )
1310 close(fd);
1311 if ( initrd_info.u.file_handle )
1312 gzclose(initrd_info.u.file_handle);
1314 return sts;
1317 /*
1318 * Local variables:
1319 * mode: C
1320 * c-set-style: "BSD"
1321 * c-basic-offset: 4
1322 * tab-width: 4
1323 * indent-tabs-mode: nil
1324 * End:
1325 */