ia64/xen-unstable

view tools/libxc/xc_linux_build.c @ 9488:0a6f5527ca4b

[IA64] set itv handoff as masked and enable reading irr[0-3]

Set initial vcpu itv handoff state to mask the timer vector.
This seems to match hardware and makes logical sense from a
spurious interrupt perspective. Enable vcpu_get_irr[0-3]
functions as they seem to work and have the proper backing.
This enables the check_sal_cache_flush() in arch/ia64/kernel.sal.c
to work unmodified, allowing us to remove the Xen changes from
the file (and thus the file from the sparse tree).

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author awilliam@xenbuild.aw
date Tue Apr 04 09:39:45 2006 -0600 (2006-04-04)
parents 0ed4a312765b
children 86db4688d514
line source
1 /******************************************************************************
2 * xc_linux_build.c
3 */
5 #include "xg_private.h"
6 #include "xc_private.h"
7 #include <xenctrl.h>
9 #if defined(__i386__)
10 #define ELFSIZE 32
11 #endif
13 #if defined(__x86_64__) || defined(__ia64__)
14 #define ELFSIZE 64
15 #endif
17 #include "xc_elf.h"
18 #include "xc_aout9.h"
19 #include <stdlib.h>
20 #include <unistd.h>
21 #include <zlib.h>
23 #if defined(__i386__)
24 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
25 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
26 #define L3_PROT (_PAGE_PRESENT)
27 #endif
29 #if defined(__x86_64__)
30 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
31 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
32 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
33 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
34 #endif
36 #ifdef __ia64__
37 #define get_tot_pages xc_get_max_pages
38 #else
39 #define get_tot_pages xc_get_tot_pages
40 #endif
42 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
43 #define round_pgdown(_p) ((_p)&PAGE_MASK)
45 #ifdef __ia64__
46 #define probe_aout9(image,image_size,load_funcs) 1
47 #endif
49 struct initrd_info {
50 enum { INITRD_none, INITRD_file, INITRD_mem } type;
51 unsigned long len;
52 union {
53 gzFile file_handle;
54 char *mem_addr;
55 } u;
56 };
58 static const char *feature_names[XENFEAT_NR_SUBMAPS*32] = {
59 [XENFEAT_writable_page_tables] = "writable_page_tables",
60 [XENFEAT_writable_descriptor_tables] = "writable_descriptor_tables",
61 [XENFEAT_auto_translated_physmap] = "auto_translated_physmap",
62 [XENFEAT_supervisor_mode_kernel] = "supervisor_mode_kernel",
63 [XENFEAT_pae_pgdir_above_4gb] = "pae_pgdir_above_4gb"
64 };
66 static inline void set_feature_bit (int nr, uint32_t *addr)
67 {
68 addr[nr>>5] |= (1<<(nr&31));
69 }
71 static inline int test_feature_bit(int nr, uint32_t *addr)
72 {
73 return !!(addr[nr>>5] & (1<<(nr&31)));
74 }
76 static int parse_features(
77 const char *feats,
78 uint32_t supported[XENFEAT_NR_SUBMAPS],
79 uint32_t required[XENFEAT_NR_SUBMAPS])
80 {
81 const char *end, *p;
82 int i, req;
84 if ( (end = strchr(feats, ',')) == NULL )
85 end = feats + strlen(feats);
87 while ( feats < end )
88 {
89 p = strchr(feats, '|');
90 if ( (p == NULL) || (p > end) )
91 p = end;
93 req = (*feats == '!');
94 if ( req )
95 feats++;
97 for ( i = 0; i < XENFEAT_NR_SUBMAPS*32; i++ )
98 {
99 if ( feature_names[i] == NULL )
100 continue;
102 if ( strncmp(feature_names[i], feats, p-feats) == 0 )
103 {
104 set_feature_bit(i, supported);
105 if ( required && req )
106 set_feature_bit(i, required);
107 break;
108 }
109 }
111 if ( i == XENFEAT_NR_SUBMAPS*32 )
112 {
113 ERROR("Unknown feature \"%.*s\".\n", (int)(p-feats), feats);
114 if ( req )
115 {
116 ERROR("Kernel requires an unknown hypervisor feature.\n");
117 return -EINVAL;
118 }
119 }
121 feats = p;
122 if ( *feats == '|' )
123 feats++;
124 }
126 return -EINVAL;
127 }
129 static int probeimageformat(const char *image,
130 unsigned long image_size,
131 struct load_funcs *load_funcs)
132 {
133 if ( probe_elf(image, image_size, load_funcs) &&
134 probe_bin(image, image_size, load_funcs) &&
135 probe_aout9(image, image_size, load_funcs) )
136 {
137 ERROR( "Unrecognized image format" );
138 return -EINVAL;
139 }
141 return 0;
142 }
144 int load_initrd(int xc_handle, domid_t dom,
145 struct initrd_info *initrd,
146 unsigned long physbase,
147 unsigned long *phys_to_mach)
148 {
149 char page[PAGE_SIZE];
150 unsigned long pfn_start, pfn, nr_pages;
152 if ( initrd->type == INITRD_none )
153 return 0;
155 pfn_start = physbase >> PAGE_SHIFT;
156 nr_pages = (initrd->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
158 for ( pfn = pfn_start; pfn < (pfn_start + nr_pages); pfn++ )
159 {
160 if ( initrd->type == INITRD_mem )
161 {
162 xc_copy_to_domain_page(
163 xc_handle, dom, phys_to_mach[pfn],
164 &initrd->u.mem_addr[(pfn - pfn_start) << PAGE_SHIFT]);
165 }
166 else
167 {
168 if ( gzread(initrd->u.file_handle, page, PAGE_SIZE) == -1 )
169 {
170 PERROR("Error reading initrd image, could not");
171 return -EINVAL;
172 }
173 xc_copy_to_domain_page(xc_handle, dom, phys_to_mach[pfn], page);
174 }
175 }
177 return 0;
178 }
180 #define alloc_pt(ltab, vltab, pltab) \
181 do { \
182 pltab = ppt_alloc++; \
183 ltab = (uint64_t)page_array[pltab] << PAGE_SHIFT; \
184 pltab <<= PAGE_SHIFT; \
185 if ( vltab != NULL ) \
186 munmap(vltab, PAGE_SIZE); \
187 if ( (vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, \
188 PROT_READ|PROT_WRITE, \
189 ltab >> PAGE_SHIFT)) == NULL ) \
190 goto error_out; \
191 memset(vltab, 0x0, PAGE_SIZE); \
192 } while ( 0 )
194 #if defined(__i386__)
196 static int setup_pg_tables(int xc_handle, uint32_t dom,
197 vcpu_guest_context_t *ctxt,
198 unsigned long dsi_v_start,
199 unsigned long v_end,
200 unsigned long *page_array,
201 unsigned long vpt_start,
202 unsigned long vpt_end,
203 unsigned shadow_mode_enabled)
204 {
205 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
206 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
207 unsigned long l1tab = 0, pl1tab;
208 unsigned long l2tab = 0, pl2tab;
209 unsigned long ppt_alloc;
210 unsigned long count;
212 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
213 alloc_pt(l2tab, vl2tab, pl2tab);
214 vl2e = &vl2tab[l2_table_offset(dsi_v_start)];
215 if (shadow_mode_enabled)
216 ctxt->ctrlreg[3] = pl2tab;
217 else
218 ctxt->ctrlreg[3] = l2tab;
220 for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++ )
221 {
222 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
223 {
224 alloc_pt(l1tab, vl1tab, pl1tab);
225 vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
226 if (shadow_mode_enabled)
227 *vl2e = pl1tab | L2_PROT;
228 else
229 *vl2e = l1tab | L2_PROT;
230 vl2e++;
231 }
233 if ( shadow_mode_enabled )
234 {
235 *vl1e = (count << PAGE_SHIFT) | L1_PROT;
236 }
237 else
238 {
239 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
240 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
241 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
242 *vl1e &= ~_PAGE_RW;
243 }
244 vl1e++;
245 }
246 munmap(vl1tab, PAGE_SIZE);
247 munmap(vl2tab, PAGE_SIZE);
248 return 0;
250 error_out:
251 if (vl1tab)
252 munmap(vl1tab, PAGE_SIZE);
253 if (vl2tab)
254 munmap(vl2tab, PAGE_SIZE);
255 return -1;
256 }
258 static int setup_pg_tables_pae(int xc_handle, uint32_t dom,
259 vcpu_guest_context_t *ctxt,
260 unsigned long dsi_v_start,
261 unsigned long v_end,
262 unsigned long *page_array,
263 unsigned long vpt_start,
264 unsigned long vpt_end,
265 unsigned shadow_mode_enabled)
266 {
267 l1_pgentry_64_t *vl1tab = NULL, *vl1e = NULL;
268 l2_pgentry_64_t *vl2tab = NULL, *vl2e = NULL;
269 l3_pgentry_64_t *vl3tab = NULL, *vl3e = NULL;
270 uint64_t l1tab, l2tab, l3tab, pl1tab, pl2tab, pl3tab;
271 unsigned long ppt_alloc, count, nmfn;
273 /* First allocate page for page dir. */
274 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
276 if ( page_array[ppt_alloc] > 0xfffff )
277 {
278 nmfn = xc_make_page_below_4G(xc_handle, dom, page_array[ppt_alloc]);
279 if ( nmfn == 0 )
280 {
281 fprintf(stderr, "Couldn't get a page below 4GB :-(\n");
282 goto error_out;
283 }
284 page_array[ppt_alloc] = nmfn;
285 }
287 alloc_pt(l3tab, vl3tab, pl3tab);
288 vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
289 if (shadow_mode_enabled)
290 ctxt->ctrlreg[3] = pl3tab;
291 else
292 ctxt->ctrlreg[3] = l3tab;
294 for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++)
295 {
296 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
297 {
298 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
299 {
300 alloc_pt(l2tab, vl2tab, pl2tab);
301 vl2e = &vl2tab[l2_table_offset_pae(
302 dsi_v_start + (count << PAGE_SHIFT))];
303 if (shadow_mode_enabled)
304 *vl3e = pl2tab | L3_PROT;
305 else
306 *vl3e++ = l2tab | L3_PROT;
307 }
309 alloc_pt(l1tab, vl1tab, pl1tab);
310 vl1e = &vl1tab[l1_table_offset_pae(
311 dsi_v_start + (count << PAGE_SHIFT))];
312 if (shadow_mode_enabled)
313 *vl2e = pl1tab | L2_PROT;
314 else
315 *vl2e++ = l1tab | L2_PROT;
316 }
318 if ( shadow_mode_enabled )
319 {
320 *vl1e = (count << PAGE_SHIFT) | L1_PROT;
321 }
322 else
323 {
324 *vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
325 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
326 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
327 *vl1e &= ~_PAGE_RW;
328 }
329 vl1e++;
330 }
332 munmap(vl1tab, PAGE_SIZE);
333 munmap(vl2tab, PAGE_SIZE);
334 munmap(vl3tab, PAGE_SIZE);
335 return 0;
337 error_out:
338 if (vl1tab)
339 munmap(vl1tab, PAGE_SIZE);
340 if (vl2tab)
341 munmap(vl2tab, PAGE_SIZE);
342 if (vl3tab)
343 munmap(vl3tab, PAGE_SIZE);
344 return -1;
345 }
347 #endif
349 #if defined(__x86_64__)
351 static int setup_pg_tables_64(int xc_handle, uint32_t dom,
352 vcpu_guest_context_t *ctxt,
353 unsigned long dsi_v_start,
354 unsigned long v_end,
355 unsigned long *page_array,
356 unsigned long vpt_start,
357 unsigned long vpt_end,
358 int shadow_mode_enabled)
359 {
360 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
361 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
362 l3_pgentry_t *vl3tab=NULL, *vl3e=NULL;
363 l4_pgentry_t *vl4tab=NULL, *vl4e=NULL;
364 unsigned long l2tab = 0, pl2tab;
365 unsigned long l1tab = 0, pl1tab;
366 unsigned long l3tab = 0, pl3tab;
367 unsigned long l4tab = 0, pl4tab;
368 unsigned long ppt_alloc;
369 unsigned long count;
371 /* First allocate page for page dir. */
372 ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
373 alloc_pt(l4tab, vl4tab, pl4tab);
374 vl4e = &vl4tab[l4_table_offset(dsi_v_start)];
375 if (shadow_mode_enabled)
376 ctxt->ctrlreg[3] = pl4tab;
377 else
378 ctxt->ctrlreg[3] = l4tab;
380 for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
381 {
382 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
383 {
384 alloc_pt(l1tab, vl1tab, pl1tab);
386 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
387 {
388 alloc_pt(l2tab, vl2tab, pl2tab);
389 if ( !((unsigned long)vl3e & (PAGE_SIZE-1)) )
390 {
391 alloc_pt(l3tab, vl3tab, pl3tab);
392 vl3e = &vl3tab[l3_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
393 if (shadow_mode_enabled)
394 *vl4e = pl3tab | L4_PROT;
395 else
396 *vl4e = l3tab | L4_PROT;
397 vl4e++;
398 }
399 vl2e = &vl2tab[l2_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
400 if (shadow_mode_enabled)
401 *vl3e = pl2tab | L3_PROT;
402 else
403 *vl3e = l2tab | L3_PROT;
404 vl3e++;
405 }
406 vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
407 if (shadow_mode_enabled)
408 *vl2e = pl1tab | L2_PROT;
409 else
410 *vl2e = l1tab | L2_PROT;
411 vl2e++;
412 }
414 if ( shadow_mode_enabled )
415 {
416 *vl1e = (count << PAGE_SHIFT) | L1_PROT;
417 }
418 else
419 {
420 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
421 if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
422 (count < ((vpt_end -dsi_v_start)>>PAGE_SHIFT)) )
423 {
424 *vl1e &= ~_PAGE_RW;
425 }
426 }
427 vl1e++;
428 }
430 munmap(vl1tab, PAGE_SIZE);
431 munmap(vl2tab, PAGE_SIZE);
432 munmap(vl3tab, PAGE_SIZE);
433 munmap(vl4tab, PAGE_SIZE);
434 return 0;
436 error_out:
437 if (vl1tab)
438 munmap(vl1tab, PAGE_SIZE);
439 if (vl2tab)
440 munmap(vl2tab, PAGE_SIZE);
441 if (vl3tab)
442 munmap(vl3tab, PAGE_SIZE);
443 if (vl4tab)
444 munmap(vl4tab, PAGE_SIZE);
445 return -1;
446 }
447 #endif
449 #ifdef __ia64__
450 extern unsigned long xc_ia64_fpsr_default(void);
452 static int setup_guest(int xc_handle,
453 uint32_t dom,
454 const char *image, unsigned long image_size,
455 struct initrd_info *initrd,
456 unsigned long nr_pages,
457 unsigned long *pvsi, unsigned long *pvke,
458 unsigned long *pvss, vcpu_guest_context_t *ctxt,
459 const char *cmdline,
460 unsigned long shared_info_frame,
461 unsigned long flags,
462 unsigned int store_evtchn, unsigned long *store_mfn,
463 unsigned int console_evtchn, unsigned long *console_mfn,
464 uint32_t required_features[XENFEAT_NR_SUBMAPS])
465 {
466 unsigned long *page_array = NULL;
467 struct load_funcs load_funcs;
468 struct domain_setup_info dsi;
469 unsigned long vinitrd_start;
470 unsigned long vinitrd_end;
471 unsigned long v_end;
472 unsigned long start_page, pgnr;
473 start_info_t *start_info;
474 int rc;
476 rc = probeimageformat(image, image_size, &load_funcs);
477 if ( rc != 0 )
478 goto error_out;
480 memset(&dsi, 0, sizeof(struct domain_setup_info));
482 rc = (load_funcs.parseimage)(image, image_size, &dsi);
483 if ( rc != 0 )
484 goto error_out;
486 dsi.v_start = round_pgdown(dsi.v_start);
487 vinitrd_start = round_pgup(dsi.v_end);
488 vinitrd_end = vinitrd_start + initrd->len;
489 v_end = round_pgup(vinitrd_end);
491 start_page = dsi.v_start >> PAGE_SHIFT;
492 pgnr = (v_end - dsi.v_start) >> PAGE_SHIFT;
493 if ( (page_array = malloc(pgnr * sizeof(unsigned long))) == NULL )
494 {
495 PERROR("Could not allocate memory");
496 goto error_out;
497 }
499 if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array,
500 start_page, pgnr) != pgnr )
501 {
502 PERROR("Could not get the page frame list");
503 goto error_out;
504 }
506 #define _p(a) ((void *) (a))
508 printf("VIRTUAL MEMORY ARRANGEMENT:\n"
509 " Loaded kernel: %p->%p\n"
510 " Init. ramdisk: %p->%p\n"
511 " TOTAL: %p->%p\n",
512 _p(dsi.v_kernstart), _p(dsi.v_kernend),
513 _p(vinitrd_start), _p(vinitrd_end),
514 _p(dsi.v_start), _p(v_end));
515 printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
517 (load_funcs.loadimage)(image, image_size, xc_handle, dom, page_array,
518 &dsi);
520 if ( load_initrd(xc_handle, dom, initrd,
521 vinitrd_start - dsi.v_start, page_array) )
522 goto error_out;
524 *pvke = dsi.v_kernentry;
526 /* Now need to retrieve machine pfn for system pages:
527 * start_info/store/console
528 */
529 pgnr = 3;
530 if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array,
531 nr_pages - 3, pgnr) != pgnr )
532 {
533 PERROR("Could not get page frame for xenstore");
534 goto error_out;
535 }
537 *store_mfn = page_array[1];
538 *console_mfn = page_array[2];
539 printf("start_info: 0x%lx at 0x%lx, "
540 "store_mfn: 0x%lx at 0x%lx, "
541 "console_mfn: 0x%lx at 0x%lx\n",
542 page_array[0], nr_pages,
543 *store_mfn, nr_pages - 2,
544 *console_mfn, nr_pages - 1);
546 start_info = xc_map_foreign_range(
547 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[0]);
548 memset(start_info, 0, sizeof(*start_info));
549 rc = xc_version(xc_handle, XENVER_version, NULL);
550 sprintf(start_info->magic, "xen-%i.%i-ia64", rc >> 16, rc & (0xFFFF));
551 start_info->flags = flags;
552 start_info->store_mfn = nr_pages - 2;
553 start_info->store_evtchn = store_evtchn;
554 start_info->console_mfn = nr_pages - 1;
555 start_info->console_evtchn = console_evtchn;
556 start_info->nr_pages = nr_pages; // FIXME?: nr_pages - 2 ????
557 if ( initrd->len != 0 )
558 {
559 ctxt->initrd.start = vinitrd_start;
560 ctxt->initrd.size = initrd->len;
561 }
562 else
563 {
564 ctxt->initrd.start = 0;
565 ctxt->initrd.size = 0;
566 }
567 if ( cmdline != NULL )
568 {
569 strncpy((char *)ctxt->cmdline, cmdline, IA64_COMMAND_LINE_SIZE);
570 ctxt->cmdline[IA64_COMMAND_LINE_SIZE-1] = '\0';
571 }
572 munmap(start_info, PAGE_SIZE);
574 free(page_array);
575 return 0;
577 error_out:
578 free(page_array);
579 return -1;
580 }
581 #else /* x86 */
582 static int setup_guest(int xc_handle,
583 uint32_t dom,
584 const char *image, unsigned long image_size,
585 struct initrd_info *initrd,
586 unsigned long nr_pages,
587 unsigned long *pvsi, unsigned long *pvke,
588 unsigned long *pvss, vcpu_guest_context_t *ctxt,
589 const char *cmdline,
590 unsigned long shared_info_frame,
591 unsigned long flags,
592 unsigned int store_evtchn, unsigned long *store_mfn,
593 unsigned int console_evtchn, unsigned long *console_mfn,
594 uint32_t required_features[XENFEAT_NR_SUBMAPS])
595 {
596 unsigned long *page_array = NULL;
597 unsigned long count, i, hypercall_pfn;
598 start_info_t *start_info;
599 shared_info_t *shared_info;
600 xc_mmu_t *mmu = NULL;
601 char *p;
602 DECLARE_DOM0_OP;
603 int rc;
605 unsigned long nr_pt_pages;
606 unsigned long physmap_pfn;
607 unsigned long *physmap, *physmap_e;
609 struct load_funcs load_funcs;
610 struct domain_setup_info dsi;
611 unsigned long vinitrd_start;
612 unsigned long vphysmap_start;
613 unsigned long vstartinfo_start;
614 unsigned long vstoreinfo_start;
615 unsigned long vconsole_start;
616 unsigned long vsharedinfo_start = 0; /* XXX gcc */
617 unsigned long vstack_start;
618 unsigned long vstack_end;
619 unsigned long vpt_start;
620 unsigned long vpt_end;
621 unsigned long v_end;
622 unsigned long guest_store_mfn, guest_console_mfn, guest_shared_info_mfn;
623 unsigned long shadow_mode_enabled;
624 uint32_t supported_features[XENFEAT_NR_SUBMAPS] = { 0, };
626 rc = probeimageformat(image, image_size, &load_funcs);
627 if ( rc != 0 )
628 goto error_out;
630 memset(&dsi, 0, sizeof(struct domain_setup_info));
632 rc = (load_funcs.parseimage)(image, image_size, &dsi);
633 if ( rc != 0 )
634 goto error_out;
636 if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
637 {
638 PERROR("Guest OS must load to a page boundary.\n");
639 goto error_out;
640 }
642 /* Parse and validate kernel features. */
643 p = strstr(dsi.xen_guest_string, "FEATURES=");
644 if ( p != NULL )
645 {
646 if ( !parse_features(p + strlen("FEATURES="),
647 supported_features,
648 required_features) )
649 {
650 ERROR("Failed to parse guest kernel features.\n");
651 goto error_out;
652 }
654 printf("Supported features = { %08x }.\n", supported_features[0]);
655 printf("Required features = { %08x }.\n", required_features[0]);
656 }
658 for ( i = 0; i < XENFEAT_NR_SUBMAPS; i++ )
659 {
660 if ( (supported_features[i]&required_features[i]) != required_features[i] )
661 {
662 ERROR("Guest kernel does not support a required feature.\n");
663 goto error_out;
664 }
665 }
667 shadow_mode_enabled = test_feature_bit(XENFEAT_auto_translated_physmap,
668 required_features);
670 /*
671 * Why do we need this? The number of page-table frames depends on the
672 * size of the bootstrap address space. But the size of the address space
673 * depends on the number of page-table frames (since each one is mapped
674 * read-only). We have a pair of simultaneous equations in two unknowns,
675 * which we solve by exhaustive search.
676 */
677 v_end = round_pgup(dsi.v_end);
678 vinitrd_start = v_end;
679 v_end += round_pgup(initrd->len);
680 vphysmap_start = v_end;
681 v_end += round_pgup(nr_pages * sizeof(unsigned long));
682 vstartinfo_start = v_end;
683 v_end += PAGE_SIZE;
684 vstoreinfo_start = v_end;
685 v_end += PAGE_SIZE;
686 vconsole_start = v_end;
687 v_end += PAGE_SIZE;
688 if ( shadow_mode_enabled ) {
689 vsharedinfo_start = v_end;
690 v_end += PAGE_SIZE;
691 }
692 vpt_start = v_end;
694 for ( nr_pt_pages = 2; ; nr_pt_pages++ )
695 {
696 vpt_end = vpt_start + (nr_pt_pages * PAGE_SIZE);
697 vstack_start = vpt_end;
698 vstack_end = vstack_start + PAGE_SIZE;
699 v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
700 if ( (v_end - vstack_end) < (512UL << 10) )
701 v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
702 #if defined(__i386__)
703 if ( dsi.pae_kernel )
704 {
705 /* FIXME: assumes one L2 pgtable @ 0xc0000000 */
706 if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT_PAE)-1)) >>
707 L2_PAGETABLE_SHIFT_PAE) + 2) <= nr_pt_pages )
708 break;
709 }
710 else
711 {
712 if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >>
713 L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
714 break;
715 }
716 #endif
717 #if defined(__x86_64__)
718 #define NR(_l,_h,_s) \
719 (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
720 ((_l) & ~((1UL<<(_s))-1))) >> (_s))
721 if ( (1 + /* # L4 */
722 NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
723 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
724 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
725 <= nr_pt_pages )
726 break;
727 #endif
728 }
730 #define _p(a) ((void *) (a))
732 printf("VIRTUAL MEMORY ARRANGEMENT:\n");
733 printf(" Loaded kernel: %p->%p\n", _p(dsi.v_kernstart),
734 _p(dsi.v_kernend));
735 if ( initrd->len )
736 printf(" Initial ramdisk: %p->%p\n", _p(vinitrd_start),
737 _p(vinitrd_start + initrd->len));
738 printf(" Phys-Mach map: %p\n", _p(vphysmap_start));
739 printf(" Start info: %p\n", _p(vstartinfo_start));
740 printf(" Store page: %p\n", _p(vstoreinfo_start));
741 printf(" Console page: %p\n", _p(vconsole_start));
742 if ( shadow_mode_enabled )
743 printf(" Shared Info page: %p\n", _p(vsharedinfo_start));
744 printf(" Page tables: %p\n", _p(vpt_start));
745 printf(" Boot stack: %p\n", _p(vstack_start));
746 printf(" TOTAL: %p->%p\n", _p(dsi.v_start), _p(v_end));
747 printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
749 if ( ((v_end - dsi.v_start)>>PAGE_SHIFT) > nr_pages )
750 {
751 PERROR("Initial guest OS requires too much space\n"
752 "(%luMB is greater than %luMB limit)\n",
753 (v_end-dsi.v_start)>>20, nr_pages>>(20-PAGE_SHIFT));
754 goto error_out;
755 }
757 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
758 {
759 PERROR("Could not allocate memory");
760 goto error_out;
761 }
763 if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
764 {
765 PERROR("Could not get the page frame list");
766 goto error_out;
767 }
769 (load_funcs.loadimage)(image, image_size,
770 xc_handle, dom, page_array,
771 &dsi);
773 if ( load_initrd(xc_handle, dom, initrd,
774 vinitrd_start - dsi.v_start, page_array) )
775 goto error_out;
777 /* setup page tables */
778 #if defined(__i386__)
779 if (dsi.pae_kernel)
780 rc = setup_pg_tables_pae(xc_handle, dom, ctxt,
781 dsi.v_start, v_end,
782 page_array, vpt_start, vpt_end,
783 shadow_mode_enabled);
784 else
785 rc = setup_pg_tables(xc_handle, dom, ctxt,
786 dsi.v_start, v_end,
787 page_array, vpt_start, vpt_end,
788 shadow_mode_enabled);
789 #endif
790 #if defined(__x86_64__)
791 rc = setup_pg_tables_64(xc_handle, dom, ctxt,
792 dsi.v_start, v_end,
793 page_array, vpt_start, vpt_end,
794 shadow_mode_enabled);
795 #endif
796 if (0 != rc)
797 goto error_out;
799 #if defined(__i386__)
800 /*
801 * Pin down l2tab addr as page dir page - causes hypervisor to provide
802 * correct protection for the page
803 */
804 if ( !shadow_mode_enabled )
805 {
806 if ( dsi.pae_kernel )
807 {
808 if ( pin_table(xc_handle, MMUEXT_PIN_L3_TABLE,
809 ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
810 goto error_out;
811 }
812 else
813 {
814 if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE,
815 ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
816 goto error_out;
817 }
818 }
819 #endif
821 #if defined(__x86_64__)
822 /*
823 * Pin down l4tab addr as page dir page - causes hypervisor to provide
824 * correct protection for the page
825 */
826 if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE,
827 ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
828 goto error_out;
829 #endif
831 if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
832 goto error_out;
834 /* Write the phys->machine and machine->phys table entries. */
835 physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
836 physmap = physmap_e = xc_map_foreign_range(
837 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
838 page_array[physmap_pfn++]);
840 for ( count = 0; count < nr_pages; count++ )
841 {
842 if ( xc_add_mmu_update(
843 xc_handle, mmu,
844 ((uint64_t)page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
845 count) )
846 {
847 fprintf(stderr,"m2p update failure p=%lx m=%lx\n",
848 count, page_array[count]);
849 munmap(physmap, PAGE_SIZE);
850 goto error_out;
851 }
852 *physmap_e++ = page_array[count];
853 if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
854 {
855 munmap(physmap, PAGE_SIZE);
856 physmap = physmap_e = xc_map_foreign_range(
857 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
858 page_array[physmap_pfn++]);
859 }
860 }
861 munmap(physmap, PAGE_SIZE);
863 /* Send the page update requests down to the hypervisor. */
864 if ( xc_finish_mmu_updates(xc_handle, mmu) )
865 goto error_out;
867 if ( shadow_mode_enabled )
868 {
869 struct xen_add_to_physmap xatp;
871 /* Enable shadow translate mode */
872 if ( xc_shadow_control(xc_handle, dom,
873 DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE,
874 NULL, 0, NULL) < 0 )
875 {
876 PERROR("Could not enable translation mode");
877 goto error_out;
878 }
880 guest_shared_info_mfn = (vsharedinfo_start-dsi.v_start) >> PAGE_SHIFT;
882 /* Map shared info frame into guest physmap. */
883 xatp.domid = dom;
884 xatp.space = XENMAPSPACE_shared_info;
885 xatp.idx = 0;
886 xatp.gpfn = guest_shared_info_mfn;
887 rc = xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
888 if ( rc != 0 )
889 {
890 PERROR("Cannot map shared info pfn");
891 goto error_out;
892 }
894 /* Map grant table frames into guest physmap. */
895 for ( i = 0; ; i++ )
896 {
897 xatp.domid = dom;
898 xatp.space = XENMAPSPACE_grant_table;
899 xatp.idx = i;
900 xatp.gpfn = nr_pages + i;
901 rc = xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
902 if ( rc != 0 )
903 {
904 if ( errno == EINVAL )
905 break; /* done all grant tables */
906 PERROR("Cannot map grant table pfn");
907 goto error_out;
908 }
909 }
910 }
911 else
912 {
913 guest_shared_info_mfn = shared_info_frame;
914 }
916 *store_mfn = page_array[(vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT];
917 *console_mfn = page_array[(vconsole_start-dsi.v_start) >> PAGE_SHIFT];
918 if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) ||
919 xc_clear_domain_page(xc_handle, dom, *console_mfn) )
920 goto error_out;
921 if ( shadow_mode_enabled )
922 {
923 guest_store_mfn = (vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT;
924 guest_console_mfn = (vconsole_start-dsi.v_start) >> PAGE_SHIFT;
925 }
926 else
927 {
928 guest_store_mfn = *store_mfn;
929 guest_console_mfn = *console_mfn;
930 }
932 start_info = xc_map_foreign_range(
933 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
934 page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
935 /*shared_info, start_info */
936 memset(start_info, 0, sizeof(*start_info));
937 rc = xc_version(xc_handle, XENVER_version, NULL);
938 sprintf(start_info->magic, "xen-%i.%i-x86_%d%s",
939 rc >> 16, rc & (0xFFFF), (unsigned int)sizeof(long)*8,
940 dsi.pae_kernel ? "p" : "");
941 start_info->nr_pages = nr_pages;
942 start_info->shared_info = guest_shared_info_mfn << PAGE_SHIFT;
943 start_info->flags = flags;
944 start_info->pt_base = vpt_start;
945 start_info->nr_pt_frames = nr_pt_pages;
946 start_info->mfn_list = vphysmap_start;
947 start_info->store_mfn = guest_store_mfn;
948 start_info->store_evtchn = store_evtchn;
949 start_info->console_mfn = guest_console_mfn;
950 start_info->console_evtchn = console_evtchn;
951 if ( initrd->len != 0 )
952 {
953 start_info->mod_start = vinitrd_start;
954 start_info->mod_len = initrd->len;
955 }
956 if ( cmdline != NULL )
957 {
958 strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
959 start_info->cmd_line[MAX_GUEST_CMDLINE-1] = '\0';
960 }
961 munmap(start_info, PAGE_SIZE);
963 /* shared_info page starts its life empty. */
964 shared_info = xc_map_foreign_range(
965 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
966 memset(shared_info, 0, sizeof(shared_info_t));
967 /* Mask all upcalls... */
968 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
969 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
971 munmap(shared_info, PAGE_SIZE);
973 /* Send the page update requests down to the hypervisor. */
974 if ( xc_finish_mmu_updates(xc_handle, mmu) )
975 goto error_out;
977 p = strstr(dsi.xen_guest_string, "HYPERCALL_PAGE=");
978 if ( p != NULL )
979 {
980 p += strlen("HYPERCALL_PAGE=");
981 hypercall_pfn = strtoul(p, NULL, 16);
982 if ( hypercall_pfn >= nr_pages )
983 goto error_out;
984 op.u.hypercall_init.domain = (domid_t)dom;
985 op.u.hypercall_init.mfn = page_array[hypercall_pfn];
986 op.cmd = DOM0_HYPERCALL_INIT;
987 if ( xc_dom0_op(xc_handle, &op) )
988 goto error_out;
989 }
991 free(mmu);
992 free(page_array);
994 *pvsi = vstartinfo_start;
995 *pvss = vstack_start;
996 *pvke = dsi.v_kernentry;
998 return 0;
1000 error_out:
1001 free(mmu);
1002 free(page_array);
1003 return -1;
1005 #endif
1007 static int xc_linux_build_internal(int xc_handle,
1008 uint32_t domid,
1009 char *image,
1010 unsigned long image_size,
1011 struct initrd_info *initrd,
1012 const char *cmdline,
1013 const char *features,
1014 unsigned long flags,
1015 unsigned int store_evtchn,
1016 unsigned long *store_mfn,
1017 unsigned int console_evtchn,
1018 unsigned long *console_mfn)
1020 dom0_op_t launch_op;
1021 DECLARE_DOM0_OP;
1022 int rc, i;
1023 vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
1024 unsigned long nr_pages;
1025 unsigned long vstartinfo_start, vkern_entry, vstack_start;
1026 uint32_t features_bitmap[XENFEAT_NR_SUBMAPS] = { 0, };
1028 if ( features != NULL )
1030 if ( !parse_features(features, features_bitmap, NULL) )
1032 PERROR("Failed to parse configured features\n");
1033 goto error_out;
1037 if ( (nr_pages = get_tot_pages(xc_handle, domid)) < 0 )
1039 PERROR("Could not find total pages for domain");
1040 goto error_out;
1043 #ifdef VALGRIND
1044 memset(&st_ctxt, 0, sizeof(st_ctxt));
1045 #endif
1047 if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
1049 PERROR("%s: ctxt mlock failed", __func__);
1050 return 1;
1053 op.cmd = DOM0_GETDOMAININFO;
1054 op.u.getdomaininfo.domain = (domid_t)domid;
1055 if ( (xc_dom0_op(xc_handle, &op) < 0) ||
1056 ((uint16_t)op.u.getdomaininfo.domain != domid) )
1058 PERROR("Could not get info on domain");
1059 goto error_out;
1062 memset(ctxt, 0, sizeof(*ctxt));
1064 if ( setup_guest(xc_handle, domid, image, image_size,
1065 initrd,
1066 nr_pages,
1067 &vstartinfo_start, &vkern_entry,
1068 &vstack_start, ctxt, cmdline,
1069 op.u.getdomaininfo.shared_info_frame,
1070 flags, store_evtchn, store_mfn,
1071 console_evtchn, console_mfn,
1072 features_bitmap) < 0 )
1074 ERROR("Error constructing guest OS");
1075 goto error_out;
1078 #ifdef __ia64__
1079 /* based on new_thread in xen/arch/ia64/domain.c */
1080 ctxt->flags = 0;
1081 ctxt->shared.flags = flags;
1082 ctxt->shared.start_info_pfn = nr_pages - 3; /* metaphysical */
1083 ctxt->regs.cr_ipsr = 0; /* all necessary bits filled by hypervisor */
1084 ctxt->regs.cr_iip = vkern_entry;
1085 ctxt->regs.cr_ifs = 1UL << 63;
1086 ctxt->regs.ar_fpsr = xc_ia64_fpsr_default();
1087 /* currently done by hypervisor, should move here */
1088 /* ctxt->regs.r28 = dom_fw_setup(); */
1089 ctxt->vcpu.privregs = 0;
1090 ctxt->sys_pgnr = 3;
1091 i = 0; /* silence unused variable warning */
1092 #else /* x86 */
1093 /*
1094 * Initial register values:
1095 * DS,ES,FS,GS = FLAT_KERNEL_DS
1096 * CS:EIP = FLAT_KERNEL_CS:start_pc
1097 * SS:ESP = FLAT_KERNEL_DS:start_stack
1098 * ESI = start_info
1099 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
1100 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
1101 */
1102 ctxt->user_regs.ds = FLAT_KERNEL_DS;
1103 ctxt->user_regs.es = FLAT_KERNEL_DS;
1104 ctxt->user_regs.fs = FLAT_KERNEL_DS;
1105 ctxt->user_regs.gs = FLAT_KERNEL_DS;
1106 ctxt->user_regs.ss = FLAT_KERNEL_SS;
1107 ctxt->user_regs.cs = FLAT_KERNEL_CS;
1108 ctxt->user_regs.eip = vkern_entry;
1109 ctxt->user_regs.esp = vstack_start + PAGE_SIZE;
1110 ctxt->user_regs.esi = vstartinfo_start;
1111 ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
1113 ctxt->flags = VGCF_IN_KERNEL;
1115 /* FPU is set up to default initial state. */
1116 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
1118 /* Virtual IDT is empty at start-of-day. */
1119 for ( i = 0; i < 256; i++ )
1121 ctxt->trap_ctxt[i].vector = i;
1122 ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
1125 /* No LDT. */
1126 ctxt->ldt_ents = 0;
1128 /* Use the default Xen-provided GDT. */
1129 ctxt->gdt_ents = 0;
1131 /* Ring 1 stack is the initial stack. */
1132 ctxt->kernel_ss = FLAT_KERNEL_SS;
1133 ctxt->kernel_sp = vstack_start + PAGE_SIZE;
1135 /* No debugging. */
1136 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
1138 /* No callback handlers. */
1139 #if defined(__i386__)
1140 ctxt->event_callback_cs = FLAT_KERNEL_CS;
1141 ctxt->event_callback_eip = 0;
1142 ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
1143 ctxt->failsafe_callback_eip = 0;
1144 #elif defined(__x86_64__)
1145 ctxt->event_callback_eip = 0;
1146 ctxt->failsafe_callback_eip = 0;
1147 ctxt->syscall_callback_eip = 0;
1148 #endif
1149 #endif /* x86 */
1151 memset( &launch_op, 0, sizeof(launch_op) );
1153 launch_op.u.setvcpucontext.domain = (domid_t)domid;
1154 launch_op.u.setvcpucontext.vcpu = 0;
1155 launch_op.u.setvcpucontext.ctxt = ctxt;
1157 launch_op.cmd = DOM0_SETVCPUCONTEXT;
1158 rc = xc_dom0_op(xc_handle, &launch_op);
1160 return rc;
1162 error_out:
1163 return -1;
1166 int xc_linux_build_mem(int xc_handle,
1167 uint32_t domid,
1168 const char *image_buffer,
1169 unsigned long image_size,
1170 const char *initrd,
1171 unsigned long initrd_len,
1172 const char *cmdline,
1173 const char *features,
1174 unsigned long flags,
1175 unsigned int store_evtchn,
1176 unsigned long *store_mfn,
1177 unsigned int console_evtchn,
1178 unsigned long *console_mfn)
1180 int sts;
1181 char *img_buf;
1182 unsigned long img_len;
1183 struct initrd_info initrd_info = { .type = INITRD_none };
1185 /* A kernel buffer is required */
1186 if ( (image_buffer == NULL) || (image_size == 0) )
1188 ERROR("kernel image buffer not present");
1189 return -1;
1192 /* If it's gzipped, inflate it; otherwise, use as is */
1193 /* xc_inflate_buffer may return the same buffer pointer if */
1194 /* the buffer is already inflated */
1195 img_buf = xc_inflate_buffer(image_buffer, image_size, &img_len);
1196 if ( img_buf == NULL )
1198 ERROR("unable to inflate kernel image buffer");
1199 return -1;
1202 /* RAM disks are optional; if we get one, inflate it */
1203 if ( initrd != NULL )
1205 initrd_info.type = INITRD_mem;
1206 initrd_info.u.mem_addr = xc_inflate_buffer(
1207 initrd, initrd_len, &initrd_info.len);
1208 if ( initrd_info.u.mem_addr == NULL )
1210 ERROR("unable to inflate ram disk buffer");
1211 sts = -1;
1212 goto out;
1216 sts = xc_linux_build_internal(xc_handle, domid, img_buf, img_len,
1217 &initrd_info, cmdline, features, flags,
1218 store_evtchn, store_mfn,
1219 console_evtchn, console_mfn);
1221 out:
1222 /* The inflation routines may pass back the same buffer so be */
1223 /* sure that we have a buffer and that it's not the one passed in. */
1224 /* Don't unnecessarily annoy/surprise/confound the caller */
1225 if ( (img_buf != NULL) && (img_buf != image_buffer) )
1226 free(img_buf);
1227 if ( (initrd_info.u.mem_addr != NULL) &&
1228 (initrd_info.u.mem_addr != initrd) )
1229 free(initrd_info.u.mem_addr);
1231 return sts;
1234 int xc_linux_build(int xc_handle,
1235 uint32_t domid,
1236 const char *image_name,
1237 const char *initrd_name,
1238 const char *cmdline,
1239 const char *features,
1240 unsigned long flags,
1241 unsigned int store_evtchn,
1242 unsigned long *store_mfn,
1243 unsigned int console_evtchn,
1244 unsigned long *console_mfn)
1246 char *image = NULL;
1247 unsigned long image_size;
1248 struct initrd_info initrd_info = { .type = INITRD_none };
1249 int fd = -1, sts = -1;
1251 if ( (image_name == NULL) ||
1252 ((image = xc_read_image(image_name, &image_size)) == NULL ))
1253 return -1;
1255 if ( (initrd_name != NULL) && (strlen(initrd_name) != 0) )
1257 initrd_info.type = INITRD_file;
1259 if ( (fd = open(initrd_name, O_RDONLY)) < 0 )
1261 PERROR("Could not open the initial ramdisk image");
1262 goto error_out;
1265 initrd_info.len = xc_get_filesz(fd);
1266 if ( (initrd_info.u.file_handle = gzdopen(fd, "rb")) == NULL )
1268 PERROR("Could not allocate decompression state for initrd");
1269 goto error_out;
1273 sts = xc_linux_build_internal(xc_handle, domid, image, image_size,
1274 &initrd_info, cmdline, features, flags,
1275 store_evtchn, store_mfn,
1276 console_evtchn, console_mfn);
1278 error_out:
1279 free(image);
1280 if ( fd >= 0 )
1281 close(fd);
1282 if ( initrd_info.u.file_handle )
1283 gzclose(initrd_info.u.file_handle);
1285 return sts;
1288 /*
1289 * Local variables:
1290 * mode: C
1291 * c-set-style: "BSD"
1292 * c-basic-offset: 4
1293 * tab-width: 4
1294 * indent-tabs-mode: nil
1295 * End:
1296 */