ia64/xen-unstable

view tools/libxc/xc_netbsd_build.c @ 1820:3d4f8eb89670

bitkeeper revision 1.1106.1.2 (40faa780dekT3E5arFwcbQDu1MbX6g)

Cleaned up Xen's instruction emulator.
author kaf24@scramble.cl.cam.ac.uk
date Sun Jul 18 16:38:24 2004 +0000 (2004-07-18)
parents 7ee821f4caea
children a989641f2755 bd1640d9d7d4
line source
1 /******************************************************************************
2 * xc_netbsd_build.c
3 */
5 #include "xc_private.h"
6 #define ELFSIZE 32 /* XXX */
7 #include "xc_elf.h"
8 #include <zlib.h>
10 #ifdef DEBUG
11 #define DPRINTF(x) printf x
12 #else
13 #define DPRINTF(x)
14 #endif
16 static int loadelfimage(gzFile, void *, unsigned long *, unsigned long,
17 unsigned long *, unsigned long *,
18 unsigned long *, unsigned long *);
20 #define ELFROUND (ELFSIZE / 8)
22 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
23 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
25 static long get_tot_pages(int xc_handle, u32 domid)
26 {
27 dom0_op_t op;
28 op.cmd = DOM0_GETDOMAININFO;
29 op.u.getdomaininfo.domain = (domid_t)domid;
30 op.u.getdomaininfo.ctxt = NULL;
31 return (do_dom0_op(xc_handle, &op) < 0) ?
32 -1 : op.u.getdomaininfo.tot_pages;
33 }
35 static int get_pfn_list(int xc_handle,
36 u32 domid,
37 unsigned long *pfn_buf,
38 unsigned long max_pfns)
39 {
40 dom0_op_t op;
41 int ret;
42 op.cmd = DOM0_GETMEMLIST;
43 op.u.getmemlist.domain = (domid_t)domid;
44 op.u.getmemlist.max_pfns = max_pfns;
45 op.u.getmemlist.buffer = pfn_buf;
47 if ( mlock(pfn_buf, max_pfns * sizeof(unsigned long)) != 0 )
48 return -1;
50 ret = do_dom0_op(xc_handle, &op);
52 (void)munlock(pfn_buf, max_pfns * sizeof(unsigned long));
54 return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
55 }
57 static int setup_guestos(int xc_handle,
58 u32 dom,
59 gzFile kernel_gfd,
60 unsigned long tot_pages,
61 unsigned long *virt_startinfo_addr,
62 unsigned long *virt_load_addr,
63 full_execution_context_t *ctxt,
64 const char *cmdline,
65 unsigned long shared_info_frame,
66 unsigned int control_evtchn)
67 {
68 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
69 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
70 unsigned long *page_array = NULL;
71 int alloc_index, num_pt_pages;
72 unsigned long l2tab;
73 unsigned long l1tab;
74 unsigned long count, pt_start;
75 unsigned long symtab_addr = 0, symtab_len = 0;
76 extended_start_info_t *start_info;
77 shared_info_t *shared_info;
78 unsigned long ksize;
79 mmu_t *mmu = NULL;
80 void *pm_handle = NULL;
81 int i;
83 if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
84 goto error_out;
86 if ( (page_array = malloc(tot_pages * sizeof(unsigned long))) == NULL )
87 {
88 PERROR("Could not allocate memory");
89 goto error_out;
90 }
92 if ( get_pfn_list(xc_handle, dom, page_array, tot_pages) != tot_pages )
93 {
94 PERROR("Could not get the page frame list");
95 goto error_out;
96 }
98 if (loadelfimage(kernel_gfd, pm_handle, page_array, tot_pages,
99 virt_load_addr, &ksize, &symtab_addr, &symtab_len))
100 goto error_out;
102 /* ksize is kernel-image size rounded up to a page boundary. */
104 alloc_index = tot_pages - 1;
106 /* Count bottom-level PTs, rounding up. */
107 num_pt_pages = (l1_table_offset(*virt_load_addr) + tot_pages + 1023)
108 / 1024;
110 /* We must also count the page directory. */
111 num_pt_pages++;
113 /* Index of first PT page. */
114 pt_start = tot_pages - num_pt_pages;
116 /*
117 * First allocate page for page dir. Allocation goes backwards from the end
118 * of the allocated physical address space.
119 */
120 l2tab = page_array[alloc_index] << PAGE_SHIFT;
121 alloc_index--;
122 ctxt->pt_base = l2tab;
124 if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
125 goto error_out;
127 /* Initialise the page tables. */
128 if ( (vl2tab = map_pfn_writeable(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
129 goto error_out;
130 memset(vl2tab, 0, PAGE_SIZE);
131 vl2e = &vl2tab[l2_table_offset(*virt_load_addr)];
132 for ( count = 0; count < tot_pages; count++ )
133 {
134 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
135 {
136 l1tab = page_array[alloc_index--] << PAGE_SHIFT;
137 if ( vl1tab != NULL )
138 unmap_pfn(pm_handle, vl1tab);
139 if ( (vl1tab = map_pfn_writeable(pm_handle,
140 l1tab >> PAGE_SHIFT)) == NULL )
141 goto error_out;
142 memset(vl1tab, 0, PAGE_SIZE);
143 vl1e = &vl1tab[l1_table_offset(*virt_load_addr +
144 (count<<PAGE_SHIFT))];
145 *vl2e++ = l1tab | L2_PROT;
146 }
148 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
149 if ( count >= pt_start )
150 *vl1e &= ~_PAGE_RW;
151 vl1e++;
153 if ( add_mmu_update(xc_handle, mmu,
154 (page_array[count] << PAGE_SHIFT) |
155 MMU_MACHPHYS_UPDATE, count) )
156 goto error_out;
157 }
158 unmap_pfn(pm_handle, vl1tab);
159 unmap_pfn(pm_handle, vl2tab);
161 /*
162 * Pin down l2tab addr as page dir page - causes hypervisor to provide
163 * correct protection for the page
164 */
165 if ( add_mmu_update(xc_handle, mmu,
166 l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) )
167 goto error_out;
169 *virt_startinfo_addr =
170 *virt_load_addr + ((alloc_index-1) << PAGE_SHIFT);
172 start_info = map_pfn_writeable(pm_handle, page_array[alloc_index-1]);
173 memset(start_info, 0, sizeof(*start_info));
174 start_info->pt_base = *virt_load_addr + ((tot_pages-1) << PAGE_SHIFT);
175 start_info->mod_start = symtab_addr;
176 start_info->mod_len = symtab_len;
177 start_info->nr_pages = tot_pages;
178 start_info->shared_info = shared_info_frame << PAGE_SHIFT;
179 start_info->flags = 0;
180 start_info->domain_controller_evtchn = control_evtchn;
181 strncpy(start_info->cmd_line, cmdline, MAX_CMDLINE);
182 start_info->cmd_line[MAX_CMDLINE-1] = '\0';
183 unmap_pfn(pm_handle, start_info);
185 /* shared_info page starts its life empty. */
186 shared_info = map_pfn_writeable(pm_handle, shared_info_frame);
187 memset(shared_info, 0, PAGE_SIZE);
188 /* Mask all upcalls... */
189 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
190 shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
191 unmap_pfn(pm_handle, shared_info);
193 /* Send the page update requests down to the hypervisor. */
194 if ( finish_mmu_updates(xc_handle, mmu) )
195 goto error_out;
197 free(mmu);
198 (void)close_pfn_mapper(pm_handle);
199 free(page_array);
200 return 0;
202 error_out:
203 if ( mmu != NULL )
204 free(mmu);
205 if ( pm_handle != NULL )
206 (void)close_pfn_mapper(pm_handle);
207 if ( page_array == NULL )
208 free(page_array);
209 return -1;
210 }
212 int xc_netbsd_build(int xc_handle,
213 u32 domid,
214 const char *image_name,
215 const char *cmdline,
216 unsigned int control_evtchn)
217 {
218 dom0_op_t launch_op, op;
219 unsigned long load_addr;
220 long tot_pages;
221 int kernel_fd = -1;
222 gzFile kernel_gfd = NULL;
223 int rc, i;
224 full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
225 unsigned long virt_startinfo_addr;
227 if ( (tot_pages = get_tot_pages(xc_handle, domid)) < 0 )
228 {
229 PERROR("Could not find total pages for domain");
230 return 1;
231 }
233 kernel_fd = open(image_name, O_RDONLY);
234 if ( kernel_fd < 0 )
235 {
236 PERROR("Could not open kernel image");
237 return 1;
238 }
240 if ( (kernel_gfd = gzdopen(kernel_fd, "rb")) == NULL )
241 {
242 PERROR("Could not allocate decompression state for state file");
243 close(kernel_fd);
244 return 1;
245 }
247 if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
248 {
249 PERROR("Unable to mlock ctxt");
250 return 1;
251 }
253 op.cmd = DOM0_GETDOMAININFO;
254 op.u.getdomaininfo.domain = (domid_t)domid;
255 op.u.getdomaininfo.ctxt = ctxt;
256 if ( (do_dom0_op(xc_handle, &op) < 0) ||
257 ((u32)op.u.getdomaininfo.domain != domid) )
258 {
259 PERROR("Could not get info on domain");
260 goto error_out;
261 }
262 if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) ||
263 (op.u.getdomaininfo.ctxt->pt_base != 0) )
264 {
265 ERROR("Domain is already constructed");
266 goto error_out;
267 }
269 if ( setup_guestos(xc_handle, domid, kernel_gfd, tot_pages,
270 &virt_startinfo_addr,
271 &load_addr, &st_ctxt, cmdline,
272 op.u.getdomaininfo.shared_info_frame,
273 control_evtchn) < 0 )
274 {
275 ERROR("Error constructing guest OS");
276 goto error_out;
277 }
279 if ( kernel_fd >= 0 )
280 close(kernel_fd);
281 if( kernel_gfd )
282 gzclose(kernel_gfd);
284 ctxt->flags = 0;
286 /*
287 * Initial register values:
288 * DS,ES,FS,GS = FLAT_GUESTOS_DS
289 * CS:EIP = FLAT_GUESTOS_CS:start_pc
290 * SS:ESP = FLAT_GUESTOS_DS:start_stack
291 * ESI = start_info
292 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
293 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
294 */
295 ctxt->cpu_ctxt.ds = FLAT_GUESTOS_DS;
296 ctxt->cpu_ctxt.es = FLAT_GUESTOS_DS;
297 ctxt->cpu_ctxt.fs = FLAT_GUESTOS_DS;
298 ctxt->cpu_ctxt.gs = FLAT_GUESTOS_DS;
299 ctxt->cpu_ctxt.ss = FLAT_GUESTOS_DS;
300 ctxt->cpu_ctxt.cs = FLAT_GUESTOS_CS;
301 ctxt->cpu_ctxt.eip = load_addr;
302 ctxt->cpu_ctxt.esp = virt_startinfo_addr;
303 ctxt->cpu_ctxt.esi = virt_startinfo_addr;
304 ctxt->cpu_ctxt.eflags = (1<<9) | (1<<2);
306 /* FPU is set up to default initial state. */
307 memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
309 /* Virtual IDT is empty at start-of-day. */
310 for ( i = 0; i < 256; i++ )
311 {
312 ctxt->trap_ctxt[i].vector = i;
313 ctxt->trap_ctxt[i].cs = FLAT_GUESTOS_CS;
314 }
315 ctxt->fast_trap_idx = 0;
317 /* No LDT. */
318 ctxt->ldt_ents = 0;
320 /* Use the default Xen-provided GDT. */
321 ctxt->gdt_ents = 0;
323 /* Ring 1 stack is the initial stack. */
324 ctxt->guestos_ss = FLAT_GUESTOS_DS;
325 ctxt->guestos_esp = virt_startinfo_addr;
327 /* No debugging. */
328 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
330 /* No callback handlers. */
331 ctxt->event_callback_cs = FLAT_GUESTOS_CS;
332 ctxt->event_callback_eip = 0;
333 ctxt->failsafe_callback_cs = FLAT_GUESTOS_CS;
334 ctxt->failsafe_callback_eip = 0;
336 memset( &launch_op, 0, sizeof(launch_op) );
338 launch_op.u.builddomain.domain = (domid_t)domid;
339 launch_op.u.builddomain.ctxt = ctxt;
340 launch_op.cmd = DOM0_BUILDDOMAIN;
341 rc = do_dom0_op(xc_handle, &launch_op);
343 return rc;
345 error_out:
346 if ( kernel_fd >= 0 )
347 close(kernel_fd);
348 if( kernel_gfd )
349 gzclose(kernel_gfd);
351 return -1;
352 }
354 #define MYSEEK_BUFSIZE 1024
355 static off_t
356 myseek(gzFile gfd, off_t offset, int whence)
357 {
358 unsigned char tmp[MYSEEK_BUFSIZE];
359 int c;
361 if ( offset < 0 )
362 {
363 ERROR("seek back not supported");
364 return -1;
365 }
367 while ( offset != 0 )
368 {
369 c = offset;
370 if ( c > MYSEEK_BUFSIZE )
371 c = MYSEEK_BUFSIZE;
372 if ( gzread(gfd, tmp, c) != c )
373 {
374 PERROR("Error seeking in image.");
375 return -1;
376 }
377 offset -= c;
378 }
380 return 0; /* XXX */
381 }
383 /*
384 * NetBSD memory layout:
385 *
386 * ---------------- *virt_load_addr = ehdr.e_entry (0xc0100000)
387 * | kernel text |
388 * | |
389 * ----------------
390 * | kernel data |
391 * | |
392 * ----------------
393 * | kernel bss |
394 * | |
395 * ---------------- *symtab_addr
396 * | symtab size | = *symtab_len
397 * ----------------
398 * | elf header | offsets to symbol sections mangled to be relative
399 * | | to headers location
400 * ----------------
401 * | sym section |
402 * | headers |
403 * ----------------
404 * | sym sections |
405 * | |
406 * ---------------- *symtab_addr + *symtab_len
407 * | padding |
408 * ---------------- ehdr.e_entry + *ksize << PAGE_SHIFT
409 */
411 #define IS_TEXT(p) (p.p_flags & PF_X)
412 #define IS_DATA(p) (p.p_flags & PF_W)
413 #define IS_BSS(p) (p.p_filesz < p.p_memsz)
415 static int
416 loadelfimage(gzFile kernel_gfd, void *pm_handle, unsigned long *page_array,
417 unsigned long tot_pages, unsigned long *virt_load_addr,
418 unsigned long *ksize, unsigned long *symtab_addr,
419 unsigned long *symtab_len)
420 {
421 Elf_Ehdr ehdr;
422 Elf_Phdr *phdr;
423 Elf_Shdr *shdr;
424 void *vaddr;
425 char page[PAGE_SIZE], *p;
426 unsigned long iva, maxva, symva;
427 int c, curpos, h, i, ret, s;
429 ret = -1;
430 phdr = NULL;
431 p = NULL;
432 maxva = 0;
434 if ( gzread(kernel_gfd, &ehdr, sizeof(Elf_Ehdr)) != sizeof(Elf_Ehdr) )
435 {
436 PERROR("Error reading kernel image ELF header.");
437 goto out;
438 }
439 curpos = sizeof(Elf_Ehdr);
441 if ( !IS_ELF(ehdr) )
442 {
443 PERROR("Image does not have an ELF header.");
444 goto out;
445 }
447 *virt_load_addr = ehdr.e_entry;
449 if ( (*virt_load_addr & (PAGE_SIZE-1)) != 0 )
450 {
451 ERROR("We can only deal with page-aligned load addresses");
452 goto out;
453 }
455 if ( (*virt_load_addr + (tot_pages << PAGE_SHIFT)) >
456 HYPERVISOR_VIRT_START )
457 {
458 ERROR("Cannot map all domain memory without hitting Xen space");
459 goto out;
460 }
463 phdr = malloc(ehdr.e_phnum * sizeof(Elf_Phdr));
464 if ( phdr == NULL )
465 {
466 ERROR("Cannot allocate memory for Elf_Phdrs");
467 goto out;
468 }
470 if ( myseek(kernel_gfd, ehdr.e_phoff - curpos, SEEK_SET) == -1 )
471 {
472 ERROR("Seek to program header failed");
473 goto out;
474 }
475 curpos = ehdr.e_phoff;
477 if ( gzread(kernel_gfd, phdr, ehdr.e_phnum * sizeof(Elf_Phdr)) !=
478 ehdr.e_phnum * sizeof(Elf_Phdr) )
479 {
480 PERROR("Error reading kernel image ELF program header.");
481 goto out;
482 }
483 curpos += ehdr.e_phnum * sizeof(Elf_Phdr);
485 /* Copy run-time 'load' segments that are writeable and/or executable. */
486 for ( h = 0; h < ehdr.e_phnum; h++ )
487 {
488 if ( (phdr[h].p_type != PT_LOAD) ||
489 ((phdr[h].p_flags & (PF_W|PF_X)) == 0) )
490 continue;
492 if ( IS_TEXT(phdr[h]) || IS_DATA(phdr[h]) )
493 {
494 if ( myseek(kernel_gfd, phdr[h].p_offset - curpos,
495 SEEK_SET) == -1 )
496 {
497 ERROR("Seek to section failed");
498 goto out;
499 }
500 curpos = phdr[h].p_offset;
502 for ( iva = phdr[h].p_vaddr;
503 iva < phdr[h].p_vaddr + phdr[h].p_filesz;
504 iva += c)
505 {
506 c = PAGE_SIZE - (iva & (PAGE_SIZE - 1));
507 if (iva + c > phdr[h].p_vaddr + phdr[h].p_filesz)
508 c = phdr[h].p_vaddr + phdr[h].p_filesz - iva;
509 if ( gzread(kernel_gfd, page, c) != c )
510 {
511 PERROR("Error reading kernel image page.");
512 goto out;
513 }
514 curpos += c;
515 vaddr = map_pfn_writeable(pm_handle,
516 page_array[(iva - *virt_load_addr)
517 >> PAGE_SHIFT]);
518 if ( vaddr == NULL )
519 {
520 ERROR("Couldn't map guest memory");
521 goto out;
522 }
523 DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)iva,
524 vaddr + (iva & (PAGE_SIZE - 1)), c));
525 memcpy(vaddr + (iva & (PAGE_SIZE - 1)), page, c);
526 unmap_pfn(pm_handle, vaddr);
527 }
529 if ( phdr[h].p_vaddr + phdr[h].p_filesz > maxva )
530 maxva = phdr[h].p_vaddr + phdr[h].p_filesz;
531 }
533 if ( IS_BSS(phdr[h]) )
534 {
535 /* XXX maybe clear phdr[h].p_memsz bytes from
536 phdr[h].p_vaddr + phdr[h].p_filesz ??? */
537 if (phdr[h].p_vaddr + phdr[h].p_memsz > maxva)
538 maxva = phdr[h].p_vaddr + phdr[h].p_memsz;
539 DPRINTF(("bss from %p to %p, maxva %p\n",
540 (void *)(phdr[h].p_vaddr + phdr[h].p_filesz),
541 (void *)(phdr[h].p_vaddr + phdr[h].p_memsz),
542 (void *)maxva));
543 }
544 }
546 p = malloc(sizeof(int) + sizeof(Elf_Ehdr) +
547 ehdr.e_shnum * sizeof(Elf_Shdr));
548 if ( p == NULL )
549 {
550 ERROR("Cannot allocate memory for Elf_Shdrs");
551 goto out;
552 }
554 shdr = (Elf_Shdr *)(p + sizeof(int) + sizeof(Elf_Ehdr));
556 if ( myseek(kernel_gfd, ehdr.e_shoff - curpos, SEEK_SET) == -1 )
557 {
558 ERROR("Seek to symbol header failed");
559 goto out;
560 }
561 curpos = ehdr.e_shoff;
563 if ( gzread(kernel_gfd, shdr, ehdr.e_shnum * sizeof(Elf_Shdr)) !=
564 ehdr.e_shnum * sizeof(Elf_Shdr) )
565 {
566 PERROR("Error reading kernel image ELF symbol header.");
567 goto out;
568 }
569 curpos += ehdr.e_shnum * sizeof(Elf_Shdr);
571 maxva = (maxva + ELFROUND - 1) & ~(ELFROUND - 1);
572 symva = maxva;
573 maxva += sizeof(int);
574 *symtab_addr = maxva;
575 *symtab_len = 0;
576 maxva += sizeof(Elf_Ehdr) + ehdr.e_shnum * sizeof(Elf_Shdr);
577 maxva = (maxva + ELFROUND - 1) & ~(ELFROUND - 1);
579 /* Copy kernel string / symbol tables into physical memory */
580 for ( h = 0; h < ehdr.e_shnum; h++ )
581 {
582 if ( shdr[h].sh_type == SHT_STRTAB )
583 {
584 /* Look for a strtab @i linked to symtab @h. */
585 for ( i = 0; i < ehdr.e_shnum; i++ )
586 if ( (shdr[i].sh_type == SHT_SYMTAB) &&
587 (shdr[i].sh_link == h) )
588 break;
589 /* Skip symtab @h if we found no corresponding strtab @i. */
590 if ( i == ehdr.e_shnum )
591 {
592 shdr[h].sh_offset = 0;
593 continue;
594 }
595 }
597 if ( (shdr[h].sh_type == SHT_STRTAB) ||
598 (shdr[h].sh_type == SHT_SYMTAB) )
599 {
600 if ( myseek(kernel_gfd, shdr[h].sh_offset - curpos,
601 SEEK_SET) == -1 )
602 {
603 ERROR("Seek to symbol section failed");
604 goto out;
605 }
606 curpos = shdr[h].sh_offset;
608 /* Mangled to be based on ELF header location. */
609 shdr[h].sh_offset = maxva - *symtab_addr;
611 DPRINTF(("copy section %d, size 0x%x\n", h, shdr[h].sh_size));
612 for ( i = 0; i < shdr[h].sh_size; i += c, maxva += c )
613 {
614 c = PAGE_SIZE - (maxva & (PAGE_SIZE - 1));
615 if ( c > (shdr[h].sh_size - i) )
616 c = shdr[h].sh_size - i;
617 if ( gzread(kernel_gfd, page, c) != c )
618 {
619 PERROR("Error reading kernel image page.");
620 goto out;
621 }
622 curpos += c;
624 vaddr = map_pfn_writeable(pm_handle,
625 page_array[(maxva - *virt_load_addr)
626 >> PAGE_SHIFT]);
627 if ( vaddr == NULL )
628 {
629 ERROR("Couldn't map guest memory");
630 goto out;
631 }
632 DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)maxva,
633 vaddr + (maxva & (PAGE_SIZE - 1)), c));
634 memcpy(vaddr + (maxva & (PAGE_SIZE - 1)), page, c);
635 unmap_pfn(pm_handle, vaddr);
636 }
638 *symtab_len += shdr[h].sh_size;
639 maxva = (maxva + ELFROUND - 1) & ~(ELFROUND - 1);
641 }
642 shdr[h].sh_name = 0; /* Name is NULL. */
643 }
645 if ( *symtab_len == 0 )
646 {
647 DPRINTF(("no symbol table\n"));
648 *symtab_addr = 0;
649 ret = 0;
650 goto out;
651 }
653 DPRINTF(("sym header va %p from %p/%p size %x/%x\n", (void *)symva,
654 shdr, p, ehdr.e_shnum * sizeof(Elf_Shdr),
655 ehdr.e_shnum * sizeof(Elf_Shdr) + sizeof(Elf_Ehdr)));
656 ehdr.e_phoff = 0;
657 ehdr.e_shoff = sizeof(Elf_Ehdr);
658 ehdr.e_phentsize = 0;
659 ehdr.e_phnum = 0;
660 ehdr.e_shstrndx = SHN_UNDEF;
661 memcpy(p + sizeof(int), &ehdr, sizeof(Elf_Ehdr));
662 *(int *)p = maxva - *symtab_addr;
664 /* Copy total length, crafted ELF header and section header table */
665 s = sizeof(int) + sizeof(Elf_Ehdr) + ehdr.e_shnum * sizeof(Elf_Shdr);
666 for ( i = 0; i < s; i += c, symva += c )
667 {
668 c = PAGE_SIZE - (symva & (PAGE_SIZE - 1));
669 if ( c > s - i )
670 c = s - i;
671 vaddr = map_pfn_writeable(pm_handle,
672 page_array[(symva - *virt_load_addr)
673 >> PAGE_SHIFT]);
674 if ( vaddr == NULL )
675 {
676 ERROR("Couldn't map guest memory");
677 goto out;
678 }
679 DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)symva,
680 vaddr + (symva & (PAGE_SIZE - 1)), c));
681 memcpy(vaddr + (symva & (PAGE_SIZE - 1)), p + i,
682 c);
683 unmap_pfn(pm_handle, vaddr);
684 }
686 *symtab_len = maxva - *symtab_addr;
688 ret = 0;
690 out:
691 if ( ret == 0 )
692 {
693 maxva = (maxva + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
694 *ksize = (maxva - *virt_load_addr) >> PAGE_SHIFT;
696 DPRINTF(("virt_addr %p, kpages 0x%lx, symtab_addr %p, symtab_len %p\n",
697 (void *)*virt_load_addr, *ksize, (void *)*symtab_addr,
698 (void *)*symtab_len));
699 }
701 if ( phdr != NULL )
702 free(phdr);
703 if ( p != NULL )
704 free(p);
705 return ret;
706 }