direct-io.hg

view tools/libxc/xc_hvm_build.c @ 11225:d3a9bcf61c33

[hvm] Export gpfn of store page to guest - not mfn.

Signed-off-by: Christian Limpach <Christian.Limpach@xensource.com>
author Christian Limpach <Christian.Limpach@xensource.com>
date Tue Aug 22 15:13:07 2006 +0100 (2006-08-22)
parents 078bfd250677
children 0ea9a824c16c
line source
1 /******************************************************************************
2 * xc_hvm_build.c
3 */
5 #define ELFSIZE 32
6 #include <stddef.h>
7 #include <inttypes.h>
8 #include "xg_private.h"
9 #include "xc_private.h"
10 #include "xc_elf.h"
11 #include <stdlib.h>
12 #include <unistd.h>
13 #include <zlib.h>
14 #include <xen/hvm/hvm_info_table.h>
15 #include <xen/hvm/ioreq.h>
16 #include <xen/hvm/params.h>
17 #include <xen/hvm/e820.h>
19 #define HVM_LOADER_ENTR_ADDR 0x00100000
21 static int
22 parseelfimage(
23 char *elfbase, unsigned long elfsize, struct domain_setup_info *dsi);
24 static int
25 loadelfimage(
26 char *elfbase, int xch, uint32_t dom, unsigned long *parray,
27 struct domain_setup_info *dsi);
29 static void xc_set_hvm_param(int handle,
30 domid_t dom, int param, unsigned long value)
31 {
32 DECLARE_HYPERCALL;
33 xen_hvm_param_t arg;
34 int rc;
36 hypercall.op = __HYPERVISOR_hvm_op;
37 hypercall.arg[0] = HVMOP_set_param;
38 hypercall.arg[1] = (unsigned long)&arg;
39 arg.domid = dom;
40 arg.index = param;
41 arg.value = value;
42 if ( mlock(&arg, sizeof(arg)) != 0 )
43 {
44 PERROR("Could not lock memory for set parameter");
45 return;
46 }
47 rc = do_xen_hypercall(handle, &hypercall);
48 safe_munlock(&arg, sizeof(arg));
49 if (rc < 0)
50 PERROR("set HVM parameter failed (%d)", rc);
51 }
53 static void build_e820map(void *e820_page, unsigned long long mem_size)
54 {
55 struct e820entry *e820entry =
56 (struct e820entry *)(((unsigned char *)e820_page) + E820_MAP_OFFSET);
57 unsigned long long extra_mem_size = 0;
58 unsigned char nr_map = 0;
60 /*
61 * physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
62 * for PCI devices MMIO. So if HVM has more than HVM_BELOW_4G_RAM_END
63 * RAM, memory beyond HVM_BELOW_4G_RAM_END will go to 4G above.
64 */
65 if ( mem_size > HVM_BELOW_4G_RAM_END ) {
66 extra_mem_size = mem_size - HVM_BELOW_4G_RAM_END;
67 mem_size = HVM_BELOW_4G_RAM_END;
68 }
70 e820entry[nr_map].addr = 0x0;
71 e820entry[nr_map].size = 0x9F000;
72 e820entry[nr_map].type = E820_RAM;
73 nr_map++;
75 e820entry[nr_map].addr = 0x9F000;
76 e820entry[nr_map].size = 0x1000;
77 e820entry[nr_map].type = E820_RESERVED;
78 nr_map++;
80 e820entry[nr_map].addr = 0xA0000;
81 e820entry[nr_map].size = 0x20000;
82 e820entry[nr_map].type = E820_IO;
83 nr_map++;
85 e820entry[nr_map].addr = 0xF0000;
86 e820entry[nr_map].size = 0x10000;
87 e820entry[nr_map].type = E820_RESERVED;
88 nr_map++;
90 /* ACPI data: 10 pages. */
91 #define ACPI_DATA_PAGES 10
92 /* ACPI NVS: 3 pages. */
93 #define ACPI_NVS_PAGES 3
94 /* buffered io page. */
95 #define BUFFERED_IO_PAGES 1
96 /* xenstore page. */
97 #define XENSTORE_PAGES 1
98 /* shared io page. */
99 #define SHARED_IO_PAGES 1
100 /* totally 16 static pages are reserved in E820 table */
102 /* Most of the ram goes here */
103 e820entry[nr_map].addr = 0x100000;
104 e820entry[nr_map].size = mem_size - 0x100000 - PAGE_SIZE *
105 (ACPI_DATA_PAGES +
106 ACPI_NVS_PAGES +
107 BUFFERED_IO_PAGES +
108 XENSTORE_PAGES +
109 SHARED_IO_PAGES);
110 e820entry[nr_map].type = E820_RAM;
111 nr_map++;
113 /* Statically allocated special pages */
115 /* For ACPI data */
116 e820entry[nr_map].addr = mem_size - PAGE_SIZE *
117 (ACPI_DATA_PAGES +
118 ACPI_NVS_PAGES +
119 BUFFERED_IO_PAGES +
120 XENSTORE_PAGES +
121 SHARED_IO_PAGES);
122 e820entry[nr_map].size = PAGE_SIZE * ACPI_DATA_PAGES;
123 e820entry[nr_map].type = E820_ACPI;
124 nr_map++;
126 /* For ACPI NVS */
127 e820entry[nr_map].addr = mem_size - PAGE_SIZE *
128 (ACPI_NVS_PAGES +
129 BUFFERED_IO_PAGES +
130 XENSTORE_PAGES +
131 SHARED_IO_PAGES);
132 e820entry[nr_map].size = PAGE_SIZE * ACPI_NVS_PAGES;
133 e820entry[nr_map].type = E820_NVS;
134 nr_map++;
136 /* For buffered IO requests */
137 e820entry[nr_map].addr = mem_size - PAGE_SIZE *
138 (BUFFERED_IO_PAGES +
139 XENSTORE_PAGES +
140 SHARED_IO_PAGES);
141 e820entry[nr_map].size = PAGE_SIZE * BUFFERED_IO_PAGES;
142 e820entry[nr_map].type = E820_BUFFERED_IO;
143 nr_map++;
145 /* For xenstore */
146 e820entry[nr_map].addr = mem_size - PAGE_SIZE *
147 (XENSTORE_PAGES +
148 SHARED_IO_PAGES);
149 e820entry[nr_map].size = PAGE_SIZE * XENSTORE_PAGES;
150 e820entry[nr_map].type = E820_XENSTORE;
151 nr_map++;
153 /* Shared ioreq_t page */
154 e820entry[nr_map].addr = mem_size - PAGE_SIZE * SHARED_IO_PAGES;
155 e820entry[nr_map].size = PAGE_SIZE * SHARED_IO_PAGES;
156 e820entry[nr_map].type = E820_SHARED_PAGE;
157 nr_map++;
159 e820entry[nr_map].addr = 0xFEC00000;
160 e820entry[nr_map].size = 0x1400000;
161 e820entry[nr_map].type = E820_IO;
162 nr_map++;
164 if ( extra_mem_size ) {
165 e820entry[nr_map].addr = (1ULL << 32);
166 e820entry[nr_map].size = extra_mem_size;
167 e820entry[nr_map].type = E820_RAM;
168 nr_map++;
169 }
171 *(((unsigned char *)e820_page) + E820_MAP_NR_OFFSET) = nr_map;
172 }
174 static void set_hvm_info_checksum(struct hvm_info_table *t)
175 {
176 uint8_t *ptr = (uint8_t *)t, sum = 0;
177 unsigned int i;
179 t->checksum = 0;
181 for (i = 0; i < t->length; i++)
182 sum += *ptr++;
184 t->checksum = -sum;
185 }
187 /*
188 * Use E820 reserved memory 0x9F800 to pass HVM info to hvmloader
189 * hvmloader will use this info to set BIOS accordingly
190 */
191 static int set_hvm_info(int xc_handle, uint32_t dom,
192 xen_pfn_t *pfn_list, unsigned int vcpus,
193 unsigned int acpi)
194 {
195 char *va_map;
196 struct hvm_info_table *va_hvm;
198 va_map = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
199 PROT_READ | PROT_WRITE,
200 pfn_list[HVM_INFO_PFN]);
202 if ( va_map == NULL )
203 return -1;
205 va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET);
206 memset(va_hvm, 0, sizeof(*va_hvm));
208 strncpy(va_hvm->signature, "HVM INFO", 8);
209 va_hvm->length = sizeof(struct hvm_info_table);
210 va_hvm->acpi_enabled = acpi;
211 va_hvm->nr_vcpus = vcpus;
213 set_hvm_info_checksum(va_hvm);
215 munmap(va_map, PAGE_SIZE);
217 return 0;
218 }
220 static int setup_guest(int xc_handle,
221 uint32_t dom, int memsize,
222 char *image, unsigned long image_size,
223 unsigned long nr_pages,
224 vcpu_guest_context_t *ctxt,
225 unsigned long shared_info_frame,
226 unsigned int vcpus,
227 unsigned int pae,
228 unsigned int acpi,
229 unsigned int apic,
230 unsigned int store_evtchn,
231 unsigned long *store_mfn)
232 {
233 xen_pfn_t *page_array = NULL;
234 unsigned long count, i;
235 unsigned long long ptr;
236 xc_mmu_t *mmu = NULL;
238 shared_info_t *shared_info;
239 void *e820_page;
241 struct domain_setup_info dsi;
242 uint64_t v_end;
244 unsigned long shared_page_nr;
246 memset(&dsi, 0, sizeof(struct domain_setup_info));
248 if ( (parseelfimage(image, image_size, &dsi)) != 0 )
249 goto error_out;
251 if ( (dsi.v_kernstart & (PAGE_SIZE - 1)) != 0 )
252 {
253 PERROR("Guest OS must load to a page boundary.\n");
254 goto error_out;
255 }
257 /* memsize is in megabytes */
258 v_end = (unsigned long long)memsize << 20;
260 IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
261 " Loaded HVM loader: %016"PRIx64"->%016"PRIx64"\n"
262 " TOTAL: %016"PRIx64"->%016"PRIx64"\n",
263 dsi.v_kernstart, dsi.v_kernend,
264 dsi.v_start, v_end);
265 IPRINTF(" ENTRY ADDRESS: %016"PRIx64"\n", dsi.v_kernentry);
267 if ( (v_end - dsi.v_start) > ((unsigned long long)nr_pages << PAGE_SHIFT) )
268 {
269 PERROR("Initial guest OS requires too much space: "
270 "(%lluMB is greater than %lluMB limit)\n",
271 (unsigned long long)(v_end - dsi.v_start) >> 20,
272 ((unsigned long long)nr_pages << PAGE_SHIFT) >> 20);
273 goto error_out;
274 }
276 if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL )
277 {
278 PERROR("Could not allocate memory.\n");
279 goto error_out;
280 }
282 if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
283 {
284 PERROR("Could not get the page frame list.\n");
285 goto error_out;
286 }
288 loadelfimage(image, xc_handle, dom, page_array, &dsi);
290 if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
291 goto error_out;
293 /* Write the machine->phys table entries. */
294 for ( count = 0; count < nr_pages; count++ )
295 {
296 unsigned long gpfn_count_skip;
298 ptr = (unsigned long long)page_array[count] << PAGE_SHIFT;
300 gpfn_count_skip = 0;
302 /*
303 * physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
304 * for PCI devices MMIO. So if HVM has more than HVM_BELOW_4G_RAM_END
305 * RAM, memory beyond HVM_BELOW_4G_RAM_END will go to 4G above.
306 */
307 if ( count >= (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT) )
308 gpfn_count_skip = HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
310 if ( xc_add_mmu_update(xc_handle, mmu,
311 ptr | MMU_MACHPHYS_UPDATE,
312 count + gpfn_count_skip) )
313 goto error_out;
314 }
316 if ( set_hvm_info(xc_handle, dom, page_array, vcpus, acpi) )
317 {
318 ERROR("Couldn't set hvm info for HVM guest.\n");
319 goto error_out;
320 }
322 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_PAE_ENABLED, pae);
323 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_APIC_ENABLED, apic);
325 if ( (e820_page = xc_map_foreign_range(
326 xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
327 page_array[E820_MAP_PAGE >> PAGE_SHIFT])) == NULL )
328 goto error_out;
329 memset(e820_page, 0, PAGE_SIZE);
330 build_e820map(e820_page, v_end);
331 munmap(e820_page, PAGE_SIZE);
333 /* shared_info page starts its life empty. */
334 if ( (shared_info = xc_map_foreign_range(
335 xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
336 shared_info_frame)) == NULL )
337 goto error_out;
338 memset(shared_info, 0, PAGE_SIZE);
339 /* Mask all upcalls... */
340 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
341 shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
342 munmap(shared_info, PAGE_SIZE);
344 if ( v_end > HVM_BELOW_4G_RAM_END )
345 shared_page_nr = (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT) - 1;
346 else
347 shared_page_nr = (v_end >> PAGE_SHIFT) - 1;
349 *store_mfn = page_array[shared_page_nr - 1];
351 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, shared_page_nr - 1);
352 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
354 /* Paranoia */
355 /* clean the shared IO requests page */
356 if ( xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr]) )
357 goto error_out;
359 /* clean the buffered IO requests page */
360 if ( xc_clear_domain_page(xc_handle, dom, page_array[shared_page_nr - 2]) )
361 goto error_out;
363 if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) )
364 goto error_out;
366 /* Send the page update requests down to the hypervisor. */
367 if ( xc_finish_mmu_updates(xc_handle, mmu) )
368 goto error_out;
370 free(mmu);
371 free(page_array);
373 /*
374 * Initial register values:
375 */
376 ctxt->user_regs.eip = dsi.v_kernentry;
378 return 0;
380 error_out:
381 free(mmu);
382 free(page_array);
383 return -1;
384 }
386 static int xc_hvm_build_internal(int xc_handle,
387 uint32_t domid,
388 int memsize,
389 char *image,
390 unsigned long image_size,
391 unsigned int vcpus,
392 unsigned int pae,
393 unsigned int acpi,
394 unsigned int apic,
395 unsigned int store_evtchn,
396 unsigned long *store_mfn)
397 {
398 dom0_op_t launch_op, op;
399 int rc, i;
400 vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
401 unsigned long nr_pages;
402 xen_capabilities_info_t xen_caps;
404 if ( (image == NULL) || (image_size == 0) )
405 {
406 ERROR("Image required");
407 goto error_out;
408 }
410 if ( (rc = xc_version(xc_handle, XENVER_capabilities, &xen_caps)) != 0 )
411 {
412 PERROR("Failed to get xen version info");
413 goto error_out;
414 }
416 if ( !strstr(xen_caps, "hvm") )
417 {
418 PERROR("CPU doesn't support HVM extensions or "
419 "the extensions are not enabled");
420 goto error_out;
421 }
423 if ( (nr_pages = xc_get_tot_pages(xc_handle, domid)) < 0 )
424 {
425 PERROR("Could not find total pages for domain");
426 goto error_out;
427 }
429 if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
430 {
431 PERROR("%s: ctxt mlock failed", __func__);
432 return 1;
433 }
435 op.cmd = DOM0_GETDOMAININFO;
436 op.u.getdomaininfo.domain = (domid_t)domid;
437 if ( (xc_dom0_op(xc_handle, &op) < 0) ||
438 ((uint16_t)op.u.getdomaininfo.domain != domid) )
439 {
440 PERROR("Could not get info on domain");
441 goto error_out;
442 }
444 /* HVM domains must be put into shadow2 mode at the start of day */
445 if ( xc_shadow_control(xc_handle, domid, DOM0_SHADOW2_CONTROL_OP_ENABLE,
446 NULL, 0, NULL,
447 DOM0_SHADOW2_CONTROL_FLAG_ENABLE
448 | DOM0_SHADOW2_CONTROL_FLAG_REFCOUNT
449 | DOM0_SHADOW2_CONTROL_FLAG_TRANSLATE
450 | DOM0_SHADOW2_CONTROL_FLAG_EXTERNAL,
451 NULL) )
452 {
453 PERROR("Could not enable shadow paging for domain.\n");
454 goto error_out;
455 }
457 memset(ctxt, 0, sizeof(*ctxt));
459 ctxt->flags = VGCF_HVM_GUEST;
460 if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
461 ctxt, op.u.getdomaininfo.shared_info_frame,
462 vcpus, pae, acpi, apic, store_evtchn, store_mfn) < 0)
463 {
464 ERROR("Error constructing guest OS");
465 goto error_out;
466 }
468 /* FPU is set up to default initial state. */
469 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
471 /* Virtual IDT is empty at start-of-day. */
472 for ( i = 0; i < 256; i++ )
473 {
474 ctxt->trap_ctxt[i].vector = i;
475 ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
476 }
478 /* No LDT. */
479 ctxt->ldt_ents = 0;
481 /* Use the default Xen-provided GDT. */
482 ctxt->gdt_ents = 0;
484 /* No debugging. */
485 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
487 /* No callback handlers. */
488 #if defined(__i386__)
489 ctxt->event_callback_cs = FLAT_KERNEL_CS;
490 ctxt->event_callback_eip = 0;
491 ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
492 ctxt->failsafe_callback_eip = 0;
493 #elif defined(__x86_64__)
494 ctxt->event_callback_eip = 0;
495 ctxt->failsafe_callback_eip = 0;
496 ctxt->syscall_callback_eip = 0;
497 #endif
499 memset( &launch_op, 0, sizeof(launch_op) );
501 launch_op.u.setvcpucontext.domain = (domid_t)domid;
502 launch_op.u.setvcpucontext.vcpu = 0;
503 set_xen_guest_handle(launch_op.u.setvcpucontext.ctxt, ctxt);
505 launch_op.cmd = DOM0_SETVCPUCONTEXT;
506 rc = xc_dom0_op(xc_handle, &launch_op);
508 return rc;
510 error_out:
511 return -1;
512 }
514 static inline int is_loadable_phdr(Elf32_Phdr *phdr)
515 {
516 return ((phdr->p_type == PT_LOAD) &&
517 ((phdr->p_flags & (PF_W|PF_X)) != 0));
518 }
520 static int parseelfimage(char *elfbase,
521 unsigned long elfsize,
522 struct domain_setup_info *dsi)
523 {
524 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfbase;
525 Elf32_Phdr *phdr;
526 Elf32_Shdr *shdr;
527 unsigned long kernstart = ~0UL, kernend=0UL;
528 char *shstrtab;
529 int h;
531 if ( !IS_ELF(*ehdr) )
532 {
533 ERROR("Kernel image does not have an ELF header.");
534 return -EINVAL;
535 }
537 if ( (ehdr->e_phoff + (ehdr->e_phnum * ehdr->e_phentsize)) > elfsize )
538 {
539 ERROR("ELF program headers extend beyond end of image.");
540 return -EINVAL;
541 }
543 if ( (ehdr->e_shoff + (ehdr->e_shnum * ehdr->e_shentsize)) > elfsize )
544 {
545 ERROR("ELF section headers extend beyond end of image.");
546 return -EINVAL;
547 }
549 /* Find the section-header strings table. */
550 if ( ehdr->e_shstrndx == SHN_UNDEF )
551 {
552 ERROR("ELF image has no section-header strings table (shstrtab).");
553 return -EINVAL;
554 }
555 shdr = (Elf32_Shdr *)(elfbase + ehdr->e_shoff +
556 (ehdr->e_shstrndx*ehdr->e_shentsize));
557 shstrtab = elfbase + shdr->sh_offset;
559 for ( h = 0; h < ehdr->e_phnum; h++ )
560 {
561 phdr = (Elf32_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
562 if ( !is_loadable_phdr(phdr) )
563 continue;
564 if ( phdr->p_paddr < kernstart )
565 kernstart = phdr->p_paddr;
566 if ( (phdr->p_paddr + phdr->p_memsz) > kernend )
567 kernend = phdr->p_paddr + phdr->p_memsz;
568 }
570 if ( (kernstart > kernend) ||
571 (ehdr->e_entry < kernstart) ||
572 (ehdr->e_entry > kernend) )
573 {
574 ERROR("Malformed ELF image.");
575 return -EINVAL;
576 }
578 dsi->v_start = 0x00000000;
580 dsi->v_kernstart = kernstart;
581 dsi->v_kernend = kernend;
582 dsi->v_kernentry = HVM_LOADER_ENTR_ADDR;
584 dsi->v_end = dsi->v_kernend;
586 return 0;
587 }
589 static int
590 loadelfimage(
591 char *elfbase, int xch, uint32_t dom, unsigned long *parray,
592 struct domain_setup_info *dsi)
593 {
594 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfbase;
595 Elf32_Phdr *phdr;
596 int h;
598 char *va;
599 unsigned long pa, done, chunksz;
601 for ( h = 0; h < ehdr->e_phnum; h++ )
602 {
603 phdr = (Elf32_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
604 if ( !is_loadable_phdr(phdr) )
605 continue;
607 for ( done = 0; done < phdr->p_filesz; done += chunksz )
608 {
609 pa = (phdr->p_paddr + done) - dsi->v_start;
610 if ((va = xc_map_foreign_range(
611 xch, dom, PAGE_SIZE, PROT_WRITE,
612 parray[pa >> PAGE_SHIFT])) == 0)
613 return -1;
614 chunksz = phdr->p_filesz - done;
615 if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
616 chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
617 memcpy(va + (pa & (PAGE_SIZE-1)),
618 elfbase + phdr->p_offset + done, chunksz);
619 munmap(va, PAGE_SIZE);
620 }
622 for ( ; done < phdr->p_memsz; done += chunksz )
623 {
624 pa = (phdr->p_paddr + done) - dsi->v_start;
625 if ((va = xc_map_foreign_range(
626 xch, dom, PAGE_SIZE, PROT_WRITE,
627 parray[pa >> PAGE_SHIFT])) == 0)
628 return -1;
629 chunksz = phdr->p_memsz - done;
630 if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
631 chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
632 memset(va + (pa & (PAGE_SIZE-1)), 0, chunksz);
633 munmap(va, PAGE_SIZE);
634 }
635 }
637 return 0;
638 }
640 /* xc_hvm_build
641 *
642 * Create a domain for a virtualized Linux, using files/filenames
643 *
644 */
646 int xc_hvm_build(int xc_handle,
647 uint32_t domid,
648 int memsize,
649 const char *image_name,
650 unsigned int vcpus,
651 unsigned int pae,
652 unsigned int acpi,
653 unsigned int apic,
654 unsigned int store_evtchn,
655 unsigned long *store_mfn)
656 {
657 char *image;
658 int sts;
659 unsigned long image_size;
661 if ( (image_name == NULL) ||
662 ((image = xc_read_image(image_name, &image_size)) == NULL) )
663 return -1;
665 sts = xc_hvm_build_internal(xc_handle, domid, memsize,
666 image, image_size,
667 vcpus, pae, acpi, apic,
668 store_evtchn, store_mfn);
670 free(image);
672 return sts;
673 }
675 /* xc_hvm_build_mem
676 *
677 * Create a domain for a virtualized Linux, using buffers
678 *
679 */
681 int xc_hvm_build_mem(int xc_handle,
682 uint32_t domid,
683 int memsize,
684 const char *image_buffer,
685 unsigned long image_size,
686 unsigned int vcpus,
687 unsigned int pae,
688 unsigned int acpi,
689 unsigned int apic,
690 unsigned int store_evtchn,
691 unsigned long *store_mfn)
692 {
693 int sts;
694 unsigned long img_len;
695 char *img;
697 /* Validate that there is a kernel buffer */
699 if ( (image_buffer == NULL) || (image_size == 0) )
700 {
701 ERROR("kernel image buffer not present");
702 return -1;
703 }
705 img = xc_inflate_buffer(image_buffer, image_size, &img_len);
706 if (img == NULL)
707 {
708 ERROR("unable to inflate ram disk buffer");
709 return -1;
710 }
712 sts = xc_hvm_build_internal(xc_handle, domid, memsize,
713 img, img_len,
714 vcpus, pae, acpi, apic,
715 store_evtchn, store_mfn);
717 /* xc_inflate_buffer may return the original buffer pointer (for
718 for already inflated buffers), so exercise some care in freeing */
720 if ( (img != NULL) && (img != image_buffer) )
721 free(img);
723 return sts;
724 }
726 /*
727 * Local variables:
728 * mode: C
729 * c-set-style: "BSD"
730 * c-basic-offset: 4
731 * tab-width: 4
732 * indent-tabs-mode: nil
733 * End:
734 */