ia64/xen-unstable

view tools/libxc/xc_linux_build.c @ 1921:24ecc060e9d7

bitkeeper revision 1.1108.21.1 (41062740xHG36OEbpVAmVX5N9WCaNw)

make vmlinuz really stripped
author cl349@freefall.cl.cam.ac.uk
date Tue Jul 27 09:58:24 2004 +0000 (2004-07-27)
parents bdafa1768678
children a989641f2755 bd1640d9d7d4 0a4b76b6b5a0
line source
1 /******************************************************************************
2 * xc_linux_build.c
3 */
5 #include "xc_private.h"
6 #define ELFSIZE 32
7 #include "xc_elf.h"
8 #include <stdlib.h>
9 #include <zlib.h>
11 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
12 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
14 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
15 #define round_pgdown(_p) ((_p)&PAGE_MASK)
17 static int parseelfimage(char *elfbase,
18 unsigned long elfsize,
19 unsigned long *pvirtstart,
20 unsigned long *pkernstart,
21 unsigned long *pkernend,
22 unsigned long *pkernentry);
23 static int loadelfimage(char *elfbase, void *pmh, unsigned long *parray,
24 unsigned long vstart);
26 static long get_tot_pages(int xc_handle, u32 domid)
27 {
28 dom0_op_t op;
29 op.cmd = DOM0_GETDOMAININFO;
30 op.u.getdomaininfo.domain = (domid_t)domid;
31 op.u.getdomaininfo.ctxt = NULL;
32 return (do_dom0_op(xc_handle, &op) < 0) ?
33 -1 : op.u.getdomaininfo.tot_pages;
34 }
36 static int get_pfn_list(int xc_handle,
37 u32 domid,
38 unsigned long *pfn_buf,
39 unsigned long max_pfns)
40 {
41 dom0_op_t op;
42 int ret;
43 op.cmd = DOM0_GETMEMLIST;
44 op.u.getmemlist.domain = (domid_t)domid;
45 op.u.getmemlist.max_pfns = max_pfns;
46 op.u.getmemlist.buffer = pfn_buf;
48 if ( mlock(pfn_buf, max_pfns * sizeof(unsigned long)) != 0 )
49 return -1;
51 ret = do_dom0_op(xc_handle, &op);
53 (void)munlock(pfn_buf, max_pfns * sizeof(unsigned long));
55 return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
56 }
58 static int copy_to_domain_page(void *pm_handle,
59 unsigned long dst_pfn,
60 void *src_page)
61 {
62 void *vaddr = map_pfn_writeable(pm_handle, dst_pfn);
63 if ( vaddr == NULL )
64 return -1;
65 memcpy(vaddr, src_page, PAGE_SIZE);
66 unmap_pfn(pm_handle, vaddr);
67 return 0;
68 }
70 static int setup_guestos(int xc_handle,
71 u32 dom,
72 char *image, unsigned long image_size,
73 gzFile initrd_gfd, unsigned long initrd_len,
74 unsigned long nr_pages,
75 unsigned long *pvsi, unsigned long *pvke,
76 full_execution_context_t *ctxt,
77 const char *cmdline,
78 unsigned long shared_info_frame,
79 unsigned int control_evtchn,
80 unsigned long flags)
81 {
82 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
83 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
84 unsigned long *page_array = NULL;
85 unsigned long l2tab;
86 unsigned long l1tab;
87 unsigned long count, i;
88 extended_start_info_t *start_info;
89 shared_info_t *shared_info;
90 mmu_t *mmu = NULL;
91 void *pm_handle=NULL;
92 int rc;
94 unsigned long nr_pt_pages;
95 unsigned long ppt_alloc;
96 unsigned long *physmap, *physmap_e, physmap_pfn;
98 unsigned long v_start;
99 unsigned long vkern_start;
100 unsigned long vkern_entry;
101 unsigned long vkern_end;
102 unsigned long vinitrd_start;
103 unsigned long vinitrd_end;
104 unsigned long vphysmap_start;
105 unsigned long vphysmap_end;
106 unsigned long vstartinfo_start;
107 unsigned long vstartinfo_end;
108 unsigned long vstack_start;
109 unsigned long vstack_end;
110 unsigned long vpt_start;
111 unsigned long vpt_end;
112 unsigned long v_end;
114 rc = parseelfimage(image, image_size, &v_start,
115 &vkern_start, &vkern_end, &vkern_entry);
116 if ( rc != 0 )
117 goto error_out;
119 if ( (v_start & (PAGE_SIZE-1)) != 0 )
120 {
121 PERROR("Guest OS must load to a page boundary.\n");
122 goto error_out;
123 }
125 /*
126 * Why do we need this? The number of page-table frames depends on the
127 * size of the bootstrap address space. But the size of the address space
128 * depends on the number of page-table frames (since each one is mapped
129 * read-only). We have a pair of simultaneous equations in two unknowns,
130 * which we solve by exhaustive search.
131 */
132 for ( nr_pt_pages = 2; ; nr_pt_pages++ )
133 {
134 vinitrd_start = round_pgup(vkern_end);
135 vinitrd_end = vinitrd_start + initrd_len;
136 vphysmap_start = round_pgup(vinitrd_end);
137 vphysmap_end = vphysmap_start + (nr_pages * sizeof(unsigned long));
138 vpt_start = round_pgup(vphysmap_end);
139 vpt_end = vpt_start + (nr_pt_pages * PAGE_SIZE);
140 vstartinfo_start = vpt_end;
141 vstartinfo_end = vstartinfo_start + PAGE_SIZE;
142 vstack_start = vstartinfo_end;
143 vstack_end = vstack_start + PAGE_SIZE;
144 v_end = (vstack_end + (1<<22)-1) & ~((1<<22)-1);
145 if ( (v_end - vstack_end) < (512 << 10) )
146 v_end += 1 << 22; /* Add extra 4MB to get >= 512kB padding. */
147 if ( (((v_end - v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >>
148 L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
149 break;
150 }
152 printf("VIRTUAL MEMORY ARRANGEMENT:\n"
153 " Loaded kernel: %08lx->%08lx\n"
154 " Init. ramdisk: %08lx->%08lx\n"
155 " Phys-Mach map: %08lx->%08lx\n"
156 " Page tables: %08lx->%08lx\n"
157 " Start info: %08lx->%08lx\n"
158 " Boot stack: %08lx->%08lx\n"
159 " TOTAL: %08lx->%08lx\n",
160 vkern_start, vkern_end,
161 vinitrd_start, vinitrd_end,
162 vphysmap_start, vphysmap_end,
163 vpt_start, vpt_end,
164 vstartinfo_start, vstartinfo_end,
165 vstack_start, vstack_end,
166 v_start, v_end);
167 printf(" ENTRY ADDRESS: %08lx\n", vkern_entry);
169 if ( (v_end - v_start) > (nr_pages * PAGE_SIZE) )
170 {
171 printf("Initial guest OS requires too much space\n"
172 "(%luMB is greater than %luMB limit)\n",
173 (v_end-v_start)>>20, (nr_pages<<PAGE_SHIFT)>>20);
174 goto error_out;
175 }
177 if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
178 goto error_out;
180 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
181 {
182 PERROR("Could not allocate memory");
183 goto error_out;
184 }
186 if ( get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
187 {
188 PERROR("Could not get the page frame list");
189 goto error_out;
190 }
192 loadelfimage(image, pm_handle, page_array, v_start);
194 /* Load the initial ramdisk image. */
195 if ( initrd_len != 0 )
196 {
197 for ( i = (vinitrd_start - v_start);
198 i < (vinitrd_end - v_start); i += PAGE_SIZE )
199 {
200 char page[PAGE_SIZE];
201 if ( gzread(initrd_gfd, page, PAGE_SIZE) == -1 )
202 {
203 PERROR("Error reading initrd image, could not");
204 goto error_out;
205 }
206 copy_to_domain_page(pm_handle,
207 page_array[i>>PAGE_SHIFT], page);
208 }
209 }
211 if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
212 goto error_out;
214 /* First allocate page for page dir. */
215 ppt_alloc = (vpt_start - v_start) >> PAGE_SHIFT;
216 l2tab = page_array[ppt_alloc++] << PAGE_SHIFT;
217 ctxt->pt_base = l2tab;
219 /* Initialise the page tables. */
220 if ( (vl2tab = map_pfn_writeable(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
221 goto error_out;
222 memset(vl2tab, 0, PAGE_SIZE);
223 vl2e = &vl2tab[l2_table_offset(v_start)];
224 for ( count = 0; count < ((v_end-v_start)>>PAGE_SHIFT); count++ )
225 {
226 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
227 {
228 l1tab = page_array[ppt_alloc++] << PAGE_SHIFT;
229 if ( vl1tab != NULL )
230 unmap_pfn(pm_handle, vl1tab);
231 if ( (vl1tab = map_pfn_writeable(pm_handle,
232 l1tab >> PAGE_SHIFT)) == NULL )
233 goto error_out;
234 memset(vl1tab, 0, PAGE_SIZE);
235 vl1e = &vl1tab[l1_table_offset(v_start + (count<<PAGE_SHIFT))];
236 *vl2e++ = l1tab | L2_PROT;
237 }
239 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
240 if ( (count >= ((vpt_start-v_start)>>PAGE_SHIFT)) &&
241 (count < ((vpt_end -v_start)>>PAGE_SHIFT)) )
242 *vl1e &= ~_PAGE_RW;
243 vl1e++;
244 }
245 unmap_pfn(pm_handle, vl1tab);
246 unmap_pfn(pm_handle, vl2tab);
248 /* Write the phys->machine and machine->phys table entries. */
249 physmap_pfn = (vphysmap_start - v_start) >> PAGE_SHIFT;
250 physmap = physmap_e =
251 map_pfn_writeable(pm_handle, page_array[physmap_pfn++]);
252 for ( count = 0; count < nr_pages; count++ )
253 {
254 if ( add_mmu_update(xc_handle, mmu,
255 (page_array[count] << PAGE_SHIFT) |
256 MMU_MACHPHYS_UPDATE, count) )
257 goto error_out;
258 *physmap_e++ = page_array[count];
259 if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
260 {
261 unmap_pfn(pm_handle, physmap);
262 physmap = physmap_e =
263 map_pfn_writeable(pm_handle, page_array[physmap_pfn++]);
264 }
265 }
266 unmap_pfn(pm_handle, physmap);
268 /*
269 * Pin down l2tab addr as page dir page - causes hypervisor to provide
270 * correct protection for the page
271 */
272 if ( add_mmu_update(xc_handle, mmu,
273 l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) )
274 goto error_out;
276 start_info = map_pfn_writeable(
277 pm_handle, page_array[(vstartinfo_start-v_start)>>PAGE_SHIFT]);
278 memset(start_info, 0, sizeof(*start_info));
279 start_info->nr_pages = nr_pages;
280 start_info->shared_info = shared_info_frame << PAGE_SHIFT;
281 start_info->flags = flags;
282 start_info->pt_base = vpt_start;
283 start_info->nr_pt_frames = nr_pt_pages;
284 start_info->mfn_list = vphysmap_start;
285 start_info->domain_controller_evtchn = control_evtchn;
286 if ( initrd_len != 0 )
287 {
288 start_info->mod_start = vinitrd_start;
289 start_info->mod_len = initrd_len;
290 }
291 strncpy(start_info->cmd_line, cmdline, MAX_CMDLINE);
292 start_info->cmd_line[MAX_CMDLINE-1] = '\0';
293 unmap_pfn(pm_handle, start_info);
295 /* shared_info page starts its life empty. */
296 shared_info = map_pfn_writeable(pm_handle, shared_info_frame);
297 memset(shared_info, 0, sizeof(shared_info_t));
298 /* Mask all upcalls... */
299 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
300 shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
301 unmap_pfn(pm_handle, shared_info);
303 /* Send the page update requests down to the hypervisor. */
304 if ( finish_mmu_updates(xc_handle, mmu) )
305 goto error_out;
307 free(mmu);
308 (void)close_pfn_mapper(pm_handle);
309 free(page_array);
311 *pvsi = vstartinfo_start;
312 *pvke = vkern_entry;
314 return 0;
316 error_out:
317 if ( mmu != NULL )
318 free(mmu);
319 if ( pm_handle != NULL )
320 (void)close_pfn_mapper(pm_handle);
321 if ( page_array != NULL )
322 free(page_array);
323 return -1;
324 }
326 static unsigned long get_filesz(int fd)
327 {
328 u16 sig;
329 u32 _sz = 0;
330 unsigned long sz;
332 lseek(fd, 0, SEEK_SET);
333 read(fd, &sig, sizeof(sig));
334 sz = lseek(fd, 0, SEEK_END);
335 if ( sig == 0x8b1f ) /* GZIP signature? */
336 {
337 lseek(fd, -4, SEEK_END);
338 read(fd, &_sz, 4);
339 sz = _sz;
340 }
341 lseek(fd, 0, SEEK_SET);
343 return sz;
344 }
346 static char *read_kernel_image(const char *filename, unsigned long *size)
347 {
348 int kernel_fd = -1;
349 gzFile kernel_gfd = NULL;
350 char *image = NULL;
351 unsigned int bytes;
353 if ( (kernel_fd = open(filename, O_RDONLY)) < 0 )
354 {
355 PERROR("Could not open kernel image");
356 goto out;
357 }
359 *size = get_filesz(kernel_fd);
361 if ( (kernel_gfd = gzdopen(kernel_fd, "rb")) == NULL )
362 {
363 PERROR("Could not allocate decompression state for state file");
364 goto out;
365 }
367 if ( (image = malloc(*size)) == NULL )
368 {
369 PERROR("Could not allocate memory for kernel image");
370 goto out;
371 }
373 if ( (bytes = gzread(kernel_gfd, image, *size)) != *size )
374 {
375 PERROR("Error reading kernel image, could not"
376 " read the whole image (%d != %ld).", bytes, *size);
377 free(image);
378 image = NULL;
379 }
381 out:
382 if ( kernel_gfd != NULL )
383 gzclose(kernel_gfd);
384 else if ( kernel_fd >= 0 )
385 close(kernel_fd);
386 return image;
387 }
389 int xc_linux_build(int xc_handle,
390 u32 domid,
391 const char *image_name,
392 const char *ramdisk_name,
393 const char *cmdline,
394 unsigned int control_evtchn,
395 unsigned long flags)
396 {
397 dom0_op_t launch_op, op;
398 int initrd_fd = -1;
399 gzFile initrd_gfd = NULL;
400 int rc, i;
401 full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
402 unsigned long nr_pages;
403 char *image = NULL;
404 unsigned long image_size, initrd_size=0;
405 unsigned long vstartinfo_start, vkern_entry;
407 if ( (nr_pages = get_tot_pages(xc_handle, domid)) < 0 )
408 {
409 PERROR("Could not find total pages for domain");
410 goto error_out;
411 }
413 if ( (image = read_kernel_image(image_name, &image_size)) == NULL )
414 goto error_out;
416 if ( (ramdisk_name != NULL) && (strlen(ramdisk_name) != 0) )
417 {
418 if ( (initrd_fd = open(ramdisk_name, O_RDONLY)) < 0 )
419 {
420 PERROR("Could not open the initial ramdisk image");
421 goto error_out;
422 }
424 initrd_size = get_filesz(initrd_fd);
426 if ( (initrd_gfd = gzdopen(initrd_fd, "rb")) == NULL )
427 {
428 PERROR("Could not allocate decompression state for initrd");
429 goto error_out;
430 }
431 }
433 if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
434 {
435 PERROR("Unable to mlock ctxt");
436 return 1;
437 }
439 op.cmd = DOM0_GETDOMAININFO;
440 op.u.getdomaininfo.domain = (domid_t)domid;
441 op.u.getdomaininfo.ctxt = ctxt;
442 if ( (do_dom0_op(xc_handle, &op) < 0) ||
443 ((u32)op.u.getdomaininfo.domain != domid) )
444 {
445 PERROR("Could not get info on domain");
446 goto error_out;
447 }
448 if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) ||
449 (ctxt->pt_base != 0) )
450 {
451 ERROR("Domain is already constructed");
452 goto error_out;
453 }
455 if ( setup_guestos(xc_handle, domid, image, image_size,
456 initrd_gfd, initrd_size, nr_pages,
457 &vstartinfo_start, &vkern_entry,
458 ctxt, cmdline,
459 op.u.getdomaininfo.shared_info_frame,
460 control_evtchn, flags) < 0 )
461 {
462 ERROR("Error constructing guest OS");
463 goto error_out;
464 }
466 if ( initrd_fd >= 0 )
467 close(initrd_fd);
468 if ( initrd_gfd )
469 gzclose(initrd_gfd);
470 if ( image != NULL )
471 free(image);
473 ctxt->flags = 0;
475 /*
476 * Initial register values:
477 * DS,ES,FS,GS = FLAT_GUESTOS_DS
478 * CS:EIP = FLAT_GUESTOS_CS:start_pc
479 * SS:ESP = FLAT_GUESTOS_DS:start_stack
480 * ESI = start_info
481 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
482 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
483 */
484 ctxt->cpu_ctxt.ds = FLAT_GUESTOS_DS;
485 ctxt->cpu_ctxt.es = FLAT_GUESTOS_DS;
486 ctxt->cpu_ctxt.fs = FLAT_GUESTOS_DS;
487 ctxt->cpu_ctxt.gs = FLAT_GUESTOS_DS;
488 ctxt->cpu_ctxt.ss = FLAT_GUESTOS_DS;
489 ctxt->cpu_ctxt.cs = FLAT_GUESTOS_CS;
490 ctxt->cpu_ctxt.eip = vkern_entry;
491 ctxt->cpu_ctxt.esp = vstartinfo_start;
492 ctxt->cpu_ctxt.esi = vstartinfo_start;
493 ctxt->cpu_ctxt.eflags = (1<<9) | (1<<2);
495 /* FPU is set up to default initial state. */
496 memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
498 /* Virtual IDT is empty at start-of-day. */
499 for ( i = 0; i < 256; i++ )
500 {
501 ctxt->trap_ctxt[i].vector = i;
502 ctxt->trap_ctxt[i].cs = FLAT_GUESTOS_CS;
503 }
504 ctxt->fast_trap_idx = 0;
506 /* No LDT. */
507 ctxt->ldt_ents = 0;
509 /* Use the default Xen-provided GDT. */
510 ctxt->gdt_ents = 0;
512 /* Ring 1 stack is the initial stack. */
513 ctxt->guestos_ss = FLAT_GUESTOS_DS;
514 ctxt->guestos_esp = vstartinfo_start;
516 /* No debugging. */
517 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
519 /* No callback handlers. */
520 ctxt->event_callback_cs = FLAT_GUESTOS_CS;
521 ctxt->event_callback_eip = 0;
522 ctxt->failsafe_callback_cs = FLAT_GUESTOS_CS;
523 ctxt->failsafe_callback_eip = 0;
525 memset( &launch_op, 0, sizeof(launch_op) );
527 launch_op.u.builddomain.domain = (domid_t)domid;
528 launch_op.u.builddomain.ctxt = ctxt;
530 launch_op.cmd = DOM0_BUILDDOMAIN;
531 rc = do_dom0_op(xc_handle, &launch_op);
533 return rc;
535 error_out:
536 if ( initrd_gfd != NULL )
537 gzclose(initrd_gfd);
538 else if ( initrd_fd >= 0 )
539 close(initrd_fd);
540 if ( image != NULL )
541 free(image);
543 return -1;
544 }
546 static inline int is_loadable_phdr(Elf_Phdr *phdr)
547 {
548 return ((phdr->p_type == PT_LOAD) &&
549 ((phdr->p_flags & (PF_W|PF_X)) != 0));
550 }
552 static int parseelfimage(char *elfbase,
553 unsigned long elfsize,
554 unsigned long *pvirtstart,
555 unsigned long *pkernstart,
556 unsigned long *pkernend,
557 unsigned long *pkernentry)
558 {
559 Elf_Ehdr *ehdr = (Elf_Ehdr *)elfbase;
560 Elf_Phdr *phdr;
561 Elf_Shdr *shdr;
562 unsigned long kernstart = ~0UL, kernend=0UL;
563 char *shstrtab, *guestinfo=NULL, *p;
564 int h;
566 if ( !IS_ELF(*ehdr) )
567 {
568 ERROR("Kernel image does not have an ELF header.");
569 return -EINVAL;
570 }
572 if ( (ehdr->e_phoff + (ehdr->e_phnum * ehdr->e_phentsize)) > elfsize )
573 {
574 ERROR("ELF program headers extend beyond end of image.");
575 return -EINVAL;
576 }
578 if ( (ehdr->e_shoff + (ehdr->e_shnum * ehdr->e_shentsize)) > elfsize )
579 {
580 ERROR("ELF section headers extend beyond end of image.");
581 return -EINVAL;
582 }
584 /* Find the section-header strings table. */
585 if ( ehdr->e_shstrndx == SHN_UNDEF )
586 {
587 ERROR("ELF image has no section-header strings table (shstrtab).");
588 return -EINVAL;
589 }
590 shdr = (Elf_Shdr *)(elfbase + ehdr->e_shoff +
591 (ehdr->e_shstrndx*ehdr->e_shentsize));
592 shstrtab = elfbase + shdr->sh_offset;
594 /* Find the special '__xen_guest' section and check its contents. */
595 for ( h = 0; h < ehdr->e_shnum; h++ )
596 {
597 shdr = (Elf_Shdr *)(elfbase + ehdr->e_shoff + (h*ehdr->e_shentsize));
598 if ( strcmp(&shstrtab[shdr->sh_name], "__xen_guest") != 0 )
599 continue;
601 guestinfo = elfbase + shdr->sh_offset;
603 if ( (strstr(guestinfo, "GUEST_OS=linux") == NULL) ||
604 (strstr(guestinfo, "XEN_VER=1.3") == NULL) )
605 {
606 ERROR("Will only load Linux images built for Xen v1.3");
607 ERROR("Actually saw: '%s'", guestinfo);
608 return -EINVAL;
609 }
611 break;
612 }
613 if ( guestinfo == NULL )
614 {
615 ERROR("Not a Xen-ELF image: '__xen_guest' section not found.");
616 return -EINVAL;
617 }
619 for ( h = 0; h < ehdr->e_phnum; h++ )
620 {
621 phdr = (Elf_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
622 if ( !is_loadable_phdr(phdr) )
623 continue;
624 if ( phdr->p_vaddr < kernstart )
625 kernstart = phdr->p_vaddr;
626 if ( (phdr->p_vaddr + phdr->p_memsz) > kernend )
627 kernend = phdr->p_vaddr + phdr->p_memsz;
628 }
630 if ( (kernstart > kernend) ||
631 (ehdr->e_entry < kernstart) ||
632 (ehdr->e_entry > kernend) )
633 {
634 ERROR("Malformed ELF image.");
635 return -EINVAL;
636 }
638 *pvirtstart = kernstart;
639 if ( (p = strstr(guestinfo, "VIRT_BASE=")) != NULL )
640 *pvirtstart = strtoul(p+10, &p, 0);
642 *pkernstart = kernstart;
643 *pkernend = kernend;
644 *pkernentry = ehdr->e_entry;
646 return 0;
647 }
649 static int loadelfimage(char *elfbase, void *pmh, unsigned long *parray,
650 unsigned long vstart)
651 {
652 Elf_Ehdr *ehdr = (Elf_Ehdr *)elfbase;
653 Elf_Phdr *phdr;
654 int h;
656 char *va;
657 unsigned long pa, done, chunksz;
659 for ( h = 0; h < ehdr->e_phnum; h++ )
660 {
661 phdr = (Elf_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
662 if ( !is_loadable_phdr(phdr) )
663 continue;
665 for ( done = 0; done < phdr->p_filesz; done += chunksz )
666 {
667 pa = (phdr->p_vaddr + done) - vstart;
668 va = map_pfn_writeable(pmh, parray[pa>>PAGE_SHIFT]);
669 chunksz = phdr->p_filesz - done;
670 if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
671 chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
672 memcpy(va + (pa & (PAGE_SIZE-1)),
673 elfbase + phdr->p_offset + done, chunksz);
674 unmap_pfn(pmh, va);
675 }
677 for ( ; done < phdr->p_memsz; done += chunksz )
678 {
679 pa = (phdr->p_vaddr + done) - vstart;
680 va = map_pfn_writeable(pmh, parray[pa>>PAGE_SHIFT]);
681 chunksz = phdr->p_memsz - done;
682 if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
683 chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
684 memset(va + (pa & (PAGE_SIZE-1)), 0, chunksz);
685 unmap_pfn(pmh, va);
686 }
687 }
689 return 0;
690 }