ia64/xen-unstable

view tools/libxc/xc_linux_build.c @ 5688:d231efdaa66d

manual merge
author iap10@freefall.cl.cam.ac.uk
date Wed Jul 06 18:55:16 2005 +0000 (2005-07-06)
parents 0b5f09002630 43e8e30cbea7
children 32fb371cc283 707fcf42a5ae
line source
1 /******************************************************************************
2 * xc_linux_build.c
3 */
5 #include "xc_private.h"
7 #if defined(__i386__)
8 #define ELFSIZE 32
9 #endif
11 #if defined(__x86_64__)
12 #define ELFSIZE 64
13 #endif
16 #include "xc_elf.h"
17 #include "xc_aout9.h"
18 #include <stdlib.h>
19 #include <zlib.h>
21 #if defined(__i386__)
22 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
23 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
24 #endif
26 #if defined(__x86_64__)
27 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
28 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
29 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
30 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
31 #endif
34 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
35 #define round_pgdown(_p) ((_p)&PAGE_MASK)
37 static int probeimageformat(char *image,
38 unsigned long image_size,
39 struct load_funcs *load_funcs)
40 {
41 if ( probe_elf(image, image_size, load_funcs) &&
42 probe_bin(image, image_size, load_funcs) &&
43 probe_aout9(image, image_size, load_funcs) )
44 {
45 ERROR( "Unrecognized image format" );
46 return -EINVAL;
47 }
49 return 0;
50 }
52 static int setup_guest(int xc_handle,
53 u32 dom,
54 char *image, unsigned long image_size,
55 gzFile initrd_gfd, unsigned long initrd_len,
56 unsigned long nr_pages,
57 unsigned long *pvsi, unsigned long *pvke,
58 unsigned long *pvss, vcpu_guest_context_t *ctxt,
59 const char *cmdline,
60 unsigned long shared_info_frame,
61 unsigned int control_evtchn,
62 unsigned long flags,
63 unsigned int vcpus,
64 unsigned int store_evtchn, unsigned long *store_mfn)
65 {
66 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
67 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
68 #if defined(__x86_64__)
69 l3_pgentry_t *vl3tab=NULL, *vl3e=NULL;
70 l4_pgentry_t *vl4tab=NULL, *vl4e=NULL;
71 #endif
72 unsigned long *page_array = NULL;
73 unsigned long l2tab = 0;
74 unsigned long l1tab = 0;
75 #if defined(__x86_64__)
76 unsigned long l3tab = 0;
77 unsigned long l4tab = 0;
78 #endif
79 unsigned long count, i;
80 start_info_t *start_info;
81 shared_info_t *shared_info;
82 mmu_t *mmu = NULL;
83 int rc;
85 unsigned long nr_pt_pages;
86 unsigned long ppt_alloc, physmap_pfn;
87 u32 *physmap, *physmap_e;
89 struct load_funcs load_funcs;
90 struct domain_setup_info dsi;
91 unsigned long vinitrd_start;
92 unsigned long vinitrd_end;
93 unsigned long vphysmap_start;
94 unsigned long vphysmap_end;
95 unsigned long vstartinfo_start;
96 unsigned long vstartinfo_end;
97 unsigned long vstoreinfo_start;
98 unsigned long vstoreinfo_end;
99 unsigned long vstack_start;
100 unsigned long vstack_end;
101 unsigned long vpt_start;
102 unsigned long vpt_end;
103 unsigned long v_end;
105 rc = probeimageformat(image, image_size, &load_funcs);
106 if ( rc != 0 )
107 goto error_out;
109 memset(&dsi, 0, sizeof(struct domain_setup_info));
111 rc = (load_funcs.parseimage)(image, image_size, &dsi);
112 if ( rc != 0 )
113 goto error_out;
115 if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
116 {
117 PERROR("Guest OS must load to a page boundary.\n");
118 goto error_out;
119 }
121 /*
122 * Why do we need this? The number of page-table frames depends on the
123 * size of the bootstrap address space. But the size of the address space
124 * depends on the number of page-table frames (since each one is mapped
125 * read-only). We have a pair of simultaneous equations in two unknowns,
126 * which we solve by exhaustive search.
127 */
128 vinitrd_start = round_pgup(dsi.v_end);
129 vinitrd_end = vinitrd_start + initrd_len;
130 vphysmap_start = round_pgup(vinitrd_end);
131 vphysmap_end = vphysmap_start + (nr_pages * sizeof(unsigned long));
132 vstoreinfo_start = round_pgup(vphysmap_end);
133 vstoreinfo_end = vstoreinfo_start + PAGE_SIZE;
134 vpt_start = vstoreinfo_end;
136 for ( nr_pt_pages = 2; ; nr_pt_pages++ )
137 {
138 vpt_end = vpt_start + (nr_pt_pages * PAGE_SIZE);
139 vstartinfo_start = vpt_end;
140 vstartinfo_end = vstartinfo_start + PAGE_SIZE;
141 vstack_start = vstartinfo_end;
142 vstack_end = vstack_start + PAGE_SIZE;
143 v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
144 if ( (v_end - vstack_end) < (512UL << 10) )
145 v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
146 #if defined(__i386__)
147 if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >>
148 L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
149 break;
150 #endif
151 #if defined(__x86_64__)
152 #define NR(_l,_h,_s) \
153 (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
154 ((_l) & ~((1UL<<(_s))-1))) >> (_s))
155 if ( (1 + /* # L4 */
156 NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
157 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
158 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
159 <= nr_pt_pages )
160 break;
161 #endif
162 }
164 #define _p(a) ((void *) (a))
166 printf("VIRTUAL MEMORY ARRANGEMENT:\n"
167 " Loaded kernel: %p->%p\n"
168 " Init. ramdisk: %p->%p\n"
169 " Phys-Mach map: %p->%p\n"
170 " Store page: %p->%p\n"
171 " Page tables: %p->%p\n"
172 " Start info: %p->%p\n"
173 " Boot stack: %p->%p\n"
174 " TOTAL: %p->%p\n",
175 _p(dsi.v_kernstart), _p(dsi.v_kernend),
176 _p(vinitrd_start), _p(vinitrd_end),
177 _p(vphysmap_start), _p(vphysmap_end),
178 _p(vstoreinfo_start), _p(vstoreinfo_end),
179 _p(vpt_start), _p(vpt_end),
180 _p(vstartinfo_start), _p(vstartinfo_end),
181 _p(vstack_start), _p(vstack_end),
182 _p(dsi.v_start), _p(v_end));
183 printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
185 if ( (v_end - dsi.v_start) > (nr_pages * PAGE_SIZE) )
186 {
187 printf("Initial guest OS requires too much space\n"
188 "(%luMB is greater than %luMB limit)\n",
189 (v_end-dsi.v_start)>>20, (nr_pages<<PAGE_SHIFT)>>20);
190 goto error_out;
191 }
193 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
194 {
195 PERROR("Could not allocate memory");
196 goto error_out;
197 }
199 if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
200 {
201 PERROR("Could not get the page frame list");
202 goto error_out;
203 }
205 (load_funcs.loadimage)(image, image_size, xc_handle, dom, page_array,
206 &dsi);
208 /* Load the initial ramdisk image. */
209 if ( initrd_len != 0 )
210 {
211 for ( i = (vinitrd_start - dsi.v_start);
212 i < (vinitrd_end - dsi.v_start); i += PAGE_SIZE )
213 {
214 char page[PAGE_SIZE];
215 if ( gzread(initrd_gfd, page, PAGE_SIZE) == -1 )
216 {
217 PERROR("Error reading initrd image, could not");
218 goto error_out;
219 }
220 xc_copy_to_domain_page(xc_handle, dom,
221 page_array[i>>PAGE_SHIFT], page);
222 }
223 }
225 if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
226 goto error_out;
228 #if defined(__i386__)
229 /* First allocate page for page dir. */
230 ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
231 l2tab = page_array[ppt_alloc++] << PAGE_SHIFT;
232 ctxt->ctrlreg[3] = l2tab;
234 /* Initialise the page tables. */
235 if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
236 PROT_READ|PROT_WRITE,
237 l2tab >> PAGE_SHIFT)) == NULL )
238 goto error_out;
239 memset(vl2tab, 0, PAGE_SIZE);
240 vl2e = &vl2tab[l2_table_offset(dsi.v_start)];
241 for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
242 {
243 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
244 {
245 l1tab = page_array[ppt_alloc++] << PAGE_SHIFT;
246 if ( vl1tab != NULL )
247 munmap(vl1tab, PAGE_SIZE);
248 if ( (vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
249 PROT_READ|PROT_WRITE,
250 l1tab >> PAGE_SHIFT)) == NULL )
251 {
252 munmap(vl2tab, PAGE_SIZE);
253 goto error_out;
254 }
255 memset(vl1tab, 0, PAGE_SIZE);
256 vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
257 *vl2e++ = l1tab | L2_PROT;
258 }
260 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
261 if ( (count >= ((vpt_start-dsi.v_start)>>PAGE_SHIFT)) &&
262 (count < ((vpt_end -dsi.v_start)>>PAGE_SHIFT)) )
263 *vl1e &= ~_PAGE_RW;
264 vl1e++;
265 }
266 munmap(vl1tab, PAGE_SIZE);
267 munmap(vl2tab, PAGE_SIZE);
268 #endif
269 #if defined(__x86_64__)
271 #define alloc_pt(ltab, vltab) \
272 ltab = page_array[ppt_alloc++] << PAGE_SHIFT; \
273 if (vltab != NULL) { \
274 munmap(vltab, PAGE_SIZE); \
275 } \
276 if ((vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, \
277 PROT_READ|PROT_WRITE, \
278 ltab >> PAGE_SHIFT)) == NULL) { \
279 munmap(vltab, PAGE_SIZE); \
280 goto error_out; \
281 } \
282 memset(vltab, 0, PAGE_SIZE);
284 /* First allocate page for page dir. */
285 ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
286 l4tab = page_array[ppt_alloc++] << PAGE_SHIFT;
287 ctxt->ctrlreg[3] = l4tab;
289 /* Intiliaize page table */
290 if ( (vl4tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
291 PROT_READ|PROT_WRITE,
292 l4tab >> PAGE_SHIFT)) == NULL )
293 goto error_out;
294 memset(vl4tab, 0, PAGE_SIZE);
295 vl4e = &vl4tab[l4_table_offset(dsi.v_start)];
297 for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++)
298 {
299 if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
300 {
301 alloc_pt(l1tab, vl1tab);
303 if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
304 {
305 alloc_pt(l2tab, vl2tab);
306 if ( !((unsigned long)vl3e & (PAGE_SIZE-1)) )
307 {
308 alloc_pt(l3tab, vl3tab);
309 vl3e = &vl3tab[l3_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
310 *vl4e = l3tab | L4_PROT;
311 vl4e++;
312 }
313 vl2e = &vl2tab[l2_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
314 *vl3e = l2tab | L3_PROT;
315 vl3e++;
316 }
317 vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
318 *vl2e = l1tab | L2_PROT;
319 vl2e++;
320 }
322 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
323 if ( (count >= ((vpt_start-dsi.v_start)>>PAGE_SHIFT)) &&
324 (count < ((vpt_end -dsi.v_start)>>PAGE_SHIFT)) )
325 {
326 *vl1e &= ~_PAGE_RW;
327 }
328 vl1e++;
329 }
331 munmap(vl1tab, PAGE_SIZE);
332 munmap(vl2tab, PAGE_SIZE);
333 munmap(vl3tab, PAGE_SIZE);
334 munmap(vl4tab, PAGE_SIZE);
335 #endif
337 /* Write the phys->machine and machine->phys table entries. */
338 physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
339 physmap = physmap_e = xc_map_foreign_range(
340 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
341 page_array[physmap_pfn++]);
342 for ( count = 0; count < nr_pages; count++ )
343 {
344 if ( add_mmu_update(xc_handle, mmu,
345 (page_array[count] << PAGE_SHIFT) |
346 MMU_MACHPHYS_UPDATE, count) )
347 {
348 munmap(physmap, PAGE_SIZE);
349 goto error_out;
350 }
351 *physmap_e++ = page_array[count];
352 if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
353 {
354 munmap(physmap, PAGE_SIZE);
355 physmap = physmap_e = xc_map_foreign_range(
356 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
357 page_array[physmap_pfn++]);
358 }
359 }
360 munmap(physmap, PAGE_SIZE);
362 #if defined(__i386__)
363 /*
364 * Pin down l2tab addr as page dir page - causes hypervisor to provide
365 * correct protection for the page
366 */
367 if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE, l2tab>>PAGE_SHIFT, dom) )
368 goto error_out;
369 #endif
371 #if defined(__x86_64__)
372 /*
373 * Pin down l4tab addr as page dir page - causes hypervisor to provide
374 * correct protection for the page
375 */
376 if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE, l4tab>>PAGE_SHIFT, dom) )
377 goto error_out;
378 #endif
379 start_info = xc_map_foreign_range(
380 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
381 page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
382 memset(start_info, 0, sizeof(*start_info));
383 start_info->nr_pages = nr_pages;
384 start_info->shared_info = shared_info_frame << PAGE_SHIFT;
385 start_info->flags = flags;
386 start_info->pt_base = vpt_start;
387 start_info->nr_pt_frames = nr_pt_pages;
388 start_info->mfn_list = vphysmap_start;
389 start_info->domain_controller_evtchn = control_evtchn;
390 start_info->store_page = vstoreinfo_start;
391 start_info->store_evtchn = store_evtchn;
392 if ( initrd_len != 0 )
393 {
394 start_info->mod_start = vinitrd_start;
395 start_info->mod_len = initrd_len;
396 }
397 strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
398 start_info->cmd_line[MAX_GUEST_CMDLINE-1] = '\0';
399 munmap(start_info, PAGE_SIZE);
401 /* Tell our caller where we told domain store page was. */
402 *store_mfn = page_array[((vstoreinfo_start-dsi.v_start)>>PAGE_SHIFT)];
404 /* shared_info page starts its life empty. */
405 shared_info = xc_map_foreign_range(
406 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
407 memset(shared_info, 0, sizeof(shared_info_t));
408 /* Mask all upcalls... */
409 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
410 shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
412 shared_info->n_vcpu = vcpus;
413 printf(" VCPUS: %d\n", shared_info->n_vcpu);
415 munmap(shared_info, PAGE_SIZE);
417 /* Send the page update requests down to the hypervisor. */
418 if ( finish_mmu_updates(xc_handle, mmu) )
419 goto error_out;
421 free(mmu);
422 free(page_array);
424 *pvsi = vstartinfo_start;
425 *pvss = vstack_start;
426 *pvke = dsi.v_kernentry;
428 return 0;
430 error_out:
431 if ( mmu != NULL )
432 free(mmu);
433 if ( page_array != NULL )
434 free(page_array);
435 return -1;
436 }
438 int xc_linux_build(int xc_handle,
439 u32 domid,
440 const char *image_name,
441 const char *ramdisk_name,
442 const char *cmdline,
443 unsigned int control_evtchn,
444 unsigned long flags,
445 unsigned int vcpus,
446 unsigned int store_evtchn,
447 unsigned long *store_mfn)
448 {
449 dom0_op_t launch_op, op;
450 int initrd_fd = -1;
451 gzFile initrd_gfd = NULL;
452 int rc, i;
453 vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
454 unsigned long nr_pages;
455 char *image = NULL;
456 unsigned long image_size, initrd_size=0;
457 unsigned long vstartinfo_start, vkern_entry, vstack_start;
459 if ( (nr_pages = xc_get_tot_pages(xc_handle, domid)) < 0 )
460 {
461 PERROR("Could not find total pages for domain");
462 goto error_out;
463 }
465 if ( (image = xc_read_kernel_image(image_name, &image_size)) == NULL )
466 goto error_out;
468 if ( (ramdisk_name != NULL) && (strlen(ramdisk_name) != 0) )
469 {
470 if ( (initrd_fd = open(ramdisk_name, O_RDONLY)) < 0 )
471 {
472 PERROR("Could not open the initial ramdisk image");
473 goto error_out;
474 }
476 initrd_size = xc_get_filesz(initrd_fd);
478 if ( (initrd_gfd = gzdopen(initrd_fd, "rb")) == NULL )
479 {
480 PERROR("Could not allocate decompression state for initrd");
481 goto error_out;
482 }
483 }
485 if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
486 {
487 PERROR("xc_linux_build: ctxt mlock failed");
488 return 1;
489 }
491 op.cmd = DOM0_GETDOMAININFO;
492 op.u.getdomaininfo.domain = (domid_t)domid;
493 if ( (do_dom0_op(xc_handle, &op) < 0) ||
494 ((u16)op.u.getdomaininfo.domain != domid) )
495 {
496 PERROR("Could not get info on domain");
497 goto error_out;
498 }
500 if ( xc_domain_get_vcpu_context(xc_handle, domid, 0, ctxt) )
501 {
502 PERROR("Could not get vcpu context");
503 goto error_out;
504 }
506 if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) ||
507 (ctxt->ctrlreg[3] != 0) )
508 {
509 ERROR("Domain is already constructed");
510 goto error_out;
511 }
513 if ( setup_guest(xc_handle, domid, image, image_size,
514 initrd_gfd, initrd_size, nr_pages,
515 &vstartinfo_start, &vkern_entry,
516 &vstack_start, ctxt, cmdline,
517 op.u.getdomaininfo.shared_info_frame,
518 control_evtchn, flags, vcpus,
519 store_evtchn, store_mfn) < 0 )
520 {
521 ERROR("Error constructing guest OS");
522 goto error_out;
523 }
525 if ( initrd_fd >= 0 )
526 close(initrd_fd);
527 if ( initrd_gfd )
528 gzclose(initrd_gfd);
529 if ( image != NULL )
530 free(image);
532 /*
533 * Initial register values:
534 * DS,ES,FS,GS = FLAT_KERNEL_DS
535 * CS:EIP = FLAT_KERNEL_CS:start_pc
536 * SS:ESP = FLAT_KERNEL_DS:start_stack
537 * ESI = start_info
538 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
539 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
540 */
541 ctxt->user_regs.ds = FLAT_KERNEL_DS;
542 ctxt->user_regs.es = FLAT_KERNEL_DS;
543 ctxt->user_regs.fs = FLAT_KERNEL_DS;
544 ctxt->user_regs.gs = FLAT_KERNEL_DS;
545 ctxt->user_regs.ss = FLAT_KERNEL_SS;
546 ctxt->user_regs.cs = FLAT_KERNEL_CS;
547 ctxt->user_regs.eip = vkern_entry;
548 ctxt->user_regs.esp = vstack_start + PAGE_SIZE;
549 ctxt->user_regs.esi = vstartinfo_start;
550 ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
552 /* FPU is set up to default initial state. */
553 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
555 /* Virtual IDT is empty at start-of-day. */
556 for ( i = 0; i < 256; i++ )
557 {
558 ctxt->trap_ctxt[i].vector = i;
559 ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
560 }
562 /* No LDT. */
563 ctxt->ldt_ents = 0;
565 /* Use the default Xen-provided GDT. */
566 ctxt->gdt_ents = 0;
568 /* Ring 1 stack is the initial stack. */
569 ctxt->kernel_ss = FLAT_KERNEL_SS;
570 ctxt->kernel_sp = vstack_start + PAGE_SIZE;
572 /* No debugging. */
573 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
575 /* No callback handlers. */
576 #if defined(__i386__)
577 ctxt->event_callback_cs = FLAT_KERNEL_CS;
578 ctxt->event_callback_eip = 0;
579 ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
580 ctxt->failsafe_callback_eip = 0;
581 #elif defined(__x86_64__)
582 ctxt->event_callback_eip = 0;
583 ctxt->failsafe_callback_eip = 0;
584 ctxt->syscall_callback_eip = 0;
585 #endif
587 memset( &launch_op, 0, sizeof(launch_op) );
589 launch_op.u.setdomaininfo.domain = (domid_t)domid;
590 launch_op.u.setdomaininfo.vcpu = 0;
591 launch_op.u.setdomaininfo.ctxt = ctxt;
593 launch_op.cmd = DOM0_SETDOMAININFO;
594 rc = do_dom0_op(xc_handle, &launch_op);
596 return rc;
598 error_out:
599 if ( initrd_gfd != NULL )
600 gzclose(initrd_gfd);
601 else if ( initrd_fd >= 0 )
602 close(initrd_fd);
603 if ( image != NULL )
604 free(image);
606 return -1;
607 }