ia64/xen-unstable

view tools/libxc/ia64/xc_ia64_linux_restore.c @ 19660:649226acc47e

[IA64] adjust ia64 xc_domain_restore() signature

This patch fixes the following error.
ia64/xc_ia64_linux_restore.c:546: error: conflicting types for
xc_domain_restore
./xenguest.h:49: error: previous declaration of xc_domain_restore was
here
make[4]: *** [ia64/xc_ia64_linux_restore.o] Error 1

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Keir Fraser <keir.fraser@citrix.com>
date Wed May 27 12:00:51 2009 +0100 (2009-05-27)
parents 4422219acd93
children 2f9e1348aa98
line source
1 /******************************************************************************
2 * xc_ia64_linux_restore.c
3 *
4 * Restore the state of a Linux session.
5 *
6 * Copyright (c) 2003, K A Fraser.
7 * Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net>
8 *
9 * Copyright (c) 2007 Isaku Yamahata <yamahata@valinux.co.jp>
10 * Use foreign p2m exposure.
11 * VTi domain support
12 */
14 #include <stdlib.h>
15 #include <unistd.h>
17 #include "xg_private.h"
18 #include "xc_ia64_save_restore.h"
19 #include "xc_ia64.h"
20 #include "xc_efi.h"
21 #include "xen/hvm/params.h"
23 #define PFN_TO_KB(_pfn) ((_pfn) << (PAGE_SHIFT - 10))
25 /* number of pfns this guest has (i.e. number of entries in the P2M) */
26 static unsigned long p2m_size;
28 /* number of 'in use' pfns in the guest (i.e. #P2M entries with a valid mfn) */
29 static unsigned long nr_pfns;
31 static int
32 populate_page_if_necessary(int xc_handle, uint32_t dom, unsigned long gmfn,
33 struct xen_ia64_p2m_table *p2m_table)
34 {
35 if (xc_ia64_p2m_present(p2m_table, gmfn))
36 return 0;
38 return xc_domain_memory_populate_physmap(xc_handle, dom, 1, 0, 0, &gmfn);
39 }
41 static int
42 read_page(int xc_handle, int io_fd, uint32_t dom, unsigned long pfn)
43 {
44 void *mem;
46 mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
47 PROT_READ|PROT_WRITE, pfn);
48 if (mem == NULL) {
49 ERROR("cannot map page");
50 return -1;
51 }
52 if (read_exact(io_fd, mem, PAGE_SIZE)) {
53 ERROR("Error when reading from state file (5)");
54 munmap(mem, PAGE_SIZE);
55 return -1;
56 }
57 munmap(mem, PAGE_SIZE);
58 return 0;
59 }
61 /*
62 * Get the list of PFNs that are not in the psuedo-phys map.
63 * Although we allocate pages on demand, balloon driver may
64 * decreased simaltenously. So we have to free the freed
65 * pages here.
66 */
67 static int
68 xc_ia64_recv_unallocated_list(int xc_handle, int io_fd, uint32_t dom,
69 struct xen_ia64_p2m_table *p2m_table)
70 {
71 int rc = -1;
72 unsigned int i;
73 unsigned int count;
74 unsigned long *pfntab = NULL;
75 unsigned int nr_frees;
77 if (read_exact(io_fd, &count, sizeof(count))) {
78 ERROR("Error when reading pfn count");
79 goto out;
80 }
82 pfntab = malloc(sizeof(unsigned long) * count);
83 if (pfntab == NULL) {
84 ERROR("Out of memory");
85 goto out;
86 }
88 if (read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) {
89 ERROR("Error when reading pfntab");
90 goto out;
91 }
93 nr_frees = 0;
94 for (i = 0; i < count; i++) {
95 if (xc_ia64_p2m_allocated(p2m_table, pfntab[i])) {
96 pfntab[nr_frees] = pfntab[i];
97 nr_frees++;
98 }
99 }
100 if (nr_frees > 0) {
101 if (xc_domain_memory_decrease_reservation(xc_handle, dom, nr_frees,
102 0, pfntab) < 0) {
103 PERROR("Could not decrease reservation");
104 goto out;
105 } else
106 DPRINTF("Decreased reservation by %d / %d pages\n",
107 nr_frees, count);
108 }
110 rc = 0;
112 out:
113 if (pfntab != NULL)
114 free(pfntab);
115 return rc;
116 }
118 static int
119 xc_ia64_recv_vcpu_context(int xc_handle, int io_fd, uint32_t dom,
120 uint32_t vcpu, vcpu_guest_context_any_t *ctxt_any)
121 {
122 vcpu_guest_context_t *ctxt = &ctxt_any->c;
123 if (read_exact(io_fd, ctxt, sizeof(*ctxt))) {
124 ERROR("Error when reading ctxt");
125 return -1;
126 }
128 fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt->regs.ip, ctxt->regs.b[0]);
130 /* Initialize and set registers. */
131 ctxt->flags = VGCF_EXTRA_REGS | VGCF_SET_CR_IRR | VGCF_online |
132 VGCF_SET_AR_ITC;
133 if (xc_vcpu_setcontext(xc_handle, dom, vcpu, ctxt_any) != 0) {
134 ERROR("Couldn't set vcpu context");
135 return -1;
136 }
138 /* Just a check. */
139 ctxt->flags = 0;
140 if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt_any)) {
141 ERROR("Could not get vcpu context");
142 return -1;
143 }
145 return 0;
146 }
148 /* Read shared info. */
149 static int
150 xc_ia64_recv_shared_info(int xc_handle, int io_fd, uint32_t dom,
151 unsigned long shared_info_frame,
152 unsigned long *start_info_pfn)
153 {
154 unsigned int i;
156 /* The new domain's shared-info frame. */
157 shared_info_t *shared_info;
159 /* Read shared info. */
160 shared_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
161 PROT_READ|PROT_WRITE,
162 shared_info_frame);
163 if (shared_info == NULL) {
164 ERROR("cannot map page");
165 return -1;
166 }
168 if (read_exact(io_fd, shared_info, PAGE_SIZE)) {
169 ERROR("Error when reading shared_info page");
170 munmap(shared_info, PAGE_SIZE);
171 return -1;
172 }
174 /* clear any pending events and the selector */
175 memset(&(shared_info->evtchn_pending[0]), 0,
176 sizeof (shared_info->evtchn_pending));
177 for (i = 0; i < MAX_VIRT_CPUS; i++)
178 shared_info->vcpu_info[i].evtchn_pending_sel = 0;
180 if (start_info_pfn != NULL)
181 *start_info_pfn = shared_info->arch.start_info_pfn;
183 munmap (shared_info, PAGE_SIZE);
185 return 0;
186 }
188 static int
189 xc_ia64_recv_vcpumap(const xc_dominfo_t *info, int io_fd, uint64_t **vcpumapp)
190 {
191 uint64_t max_virt_cpus;
192 unsigned long vcpumap_size;
193 uint64_t *vcpumap = NULL;
195 *vcpumapp = NULL;
197 if (read_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) {
198 ERROR("error reading max_virt_cpus");
199 return -1;
200 }
201 if (max_virt_cpus < info->max_vcpu_id) {
202 ERROR("too large max_virt_cpus %i < %i\n",
203 max_virt_cpus, info->max_vcpu_id);
204 return -1;
205 }
206 vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) /
207 sizeof(vcpumap[0]);
208 vcpumap = malloc(vcpumap_size);
209 if (vcpumap == NULL) {
210 ERROR("memory alloc for vcpumap");
211 return -1;
212 }
213 memset(vcpumap, 0, vcpumap_size);
214 if (read_exact(io_fd, vcpumap, vcpumap_size)) {
215 ERROR("read vcpumap");
216 free(vcpumap);
217 return -1;
218 }
220 *vcpumapp = vcpumap;
221 return 0;
222 }
224 static int
225 xc_ia64_pv_recv_vcpu_context(int xc_handle, int io_fd, int32_t dom,
226 uint32_t vcpu)
227 {
228 int rc = -1;
230 /* A copy of the CPU context of the guest. */
231 vcpu_guest_context_any_t ctxt_any;
232 vcpu_guest_context_t *ctxt = &ctxt_any.c;
234 if (lock_pages(&ctxt_any, sizeof(ctxt_any))) {
235 /* needed for build domctl, but might as well do early */
236 ERROR("Unable to lock_pages ctxt");
237 return -1;
238 }
240 if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, vcpu, &ctxt_any))
241 goto out;
243 /* Then get privreg page. */
244 if (read_page(xc_handle, io_fd, dom, ctxt->privregs_pfn) < 0) {
245 ERROR("Could not read vcpu privregs");
246 goto out;
247 }
249 rc = 0;
251 out:
252 unlock_pages(&ctxt, sizeof(ctxt));
253 return rc;
254 }
256 static int
257 xc_ia64_pv_recv_shared_info(int xc_handle, int io_fd, int32_t dom,
258 unsigned long shared_info_frame,
259 struct xen_ia64_p2m_table *p2m_table,
260 unsigned int store_evtchn,
261 unsigned long *store_mfn,
262 unsigned int console_evtchn,
263 unsigned long *console_mfn)
264 {
265 unsigned long gmfn;
267 /* A temporary mapping of the guest's start_info page. */
268 start_info_t *start_info;
270 /* Read shared info. */
271 if (xc_ia64_recv_shared_info(xc_handle, io_fd, dom,
272 shared_info_frame, &gmfn))
273 return -1;
275 /* Uncanonicalise the suspend-record frame number and poke resume rec. */
276 if (populate_page_if_necessary(xc_handle, dom, gmfn, p2m_table)) {
277 ERROR("cannot populate page 0x%lx", gmfn);
278 return -1;
279 }
280 start_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
281 PROT_READ | PROT_WRITE, gmfn);
282 if (start_info == NULL) {
283 ERROR("cannot map start_info page");
284 return -1;
285 }
286 start_info->nr_pages = p2m_size;
287 start_info->shared_info = shared_info_frame << PAGE_SHIFT;
288 start_info->flags = 0;
289 *store_mfn = start_info->store_mfn;
290 start_info->store_evtchn = store_evtchn;
291 *console_mfn = start_info->console.domU.mfn;
292 start_info->console.domU.evtchn = console_evtchn;
293 munmap(start_info, PAGE_SIZE);
295 return 0;
296 }
298 static int
299 xc_ia64_pv_recv_context_ver_one_or_two(int xc_handle, int io_fd, uint32_t dom,
300 unsigned long shared_info_frame,
301 struct xen_ia64_p2m_table *p2m_table,
302 unsigned int store_evtchn,
303 unsigned long *store_mfn,
304 unsigned int console_evtchn,
305 unsigned long *console_mfn)
306 {
307 int rc;
309 /* vcpu 0 context */
310 rc = xc_ia64_pv_recv_vcpu_context(xc_handle, io_fd, dom, 0);
311 if (rc)
312 return rc;
315 /* shared_info */
316 rc = xc_ia64_pv_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame,
317 p2m_table, store_evtchn, store_mfn,
318 console_evtchn, console_mfn);
319 return rc;
320 }
322 static int
323 xc_ia64_pv_recv_context_ver_three(int xc_handle, int io_fd, uint32_t dom,
324 unsigned long shared_info_frame,
325 struct xen_ia64_p2m_table *p2m_table,
326 unsigned int store_evtchn,
327 unsigned long *store_mfn,
328 unsigned int console_evtchn,
329 unsigned long *console_mfn)
330 {
331 int rc = -1;
332 xc_dominfo_t info;
333 unsigned int i;
335 /* vcpu map */
336 uint64_t *vcpumap = NULL;
338 if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
339 ERROR("Could not get domain info");
340 return -1;
341 }
342 rc = xc_ia64_recv_vcpumap(&info, io_fd, &vcpumap);
343 if (rc != 0)
344 goto out;
346 /* vcpu context */
347 for (i = 0; i <= info.max_vcpu_id; i++) {
348 if (!__test_bit(i, vcpumap))
349 continue;
351 rc = xc_ia64_pv_recv_vcpu_context(xc_handle, io_fd, dom, i);
352 if (rc != 0)
353 goto out;
354 }
356 /* shared_info */
357 rc = xc_ia64_pv_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame,
358 p2m_table, store_evtchn, store_mfn,
359 console_evtchn, console_mfn);
360 out:
361 if (vcpumap != NULL)
362 free(vcpumap);
363 return rc;
364 }
366 static int
367 xc_ia64_pv_recv_context(unsigned long format_version,
368 int xc_handle, int io_fd, uint32_t dom,
369 unsigned long shared_info_frame,
370 struct xen_ia64_p2m_table *p2m_table,
371 unsigned int store_evtchn,
372 unsigned long *store_mfn,
373 unsigned int console_evtchn,
374 unsigned long *console_mfn)
375 {
376 int rc;
377 switch (format_version) {
378 case XC_IA64_SR_FORMAT_VER_ONE:
379 case XC_IA64_SR_FORMAT_VER_TWO:
380 rc = xc_ia64_pv_recv_context_ver_one_or_two(xc_handle, io_fd, dom,
381 shared_info_frame,
382 p2m_table, store_evtchn,
383 store_mfn, console_evtchn,
384 console_mfn);
385 break;
386 case XC_IA64_SR_FORMAT_VER_THREE:
387 rc = xc_ia64_pv_recv_context_ver_three(xc_handle, io_fd, dom,
388 shared_info_frame,
389 p2m_table, store_evtchn,
390 store_mfn, console_evtchn,
391 console_mfn);
392 break;
393 default:
394 ERROR("Unsupported format version");
395 rc = -1;
396 break;
397 }
398 return rc;
399 }
401 static int
402 xc_ia64_hvm_recv_context(int xc_handle, int io_fd, uint32_t dom,
403 unsigned long shared_info_frame,
404 struct xen_ia64_p2m_table *p2m_table,
405 unsigned int store_evtchn, unsigned long *store_mfn,
406 unsigned int console_evtchn,
407 unsigned long *console_mfn)
408 {
409 int rc = -1;
410 xc_dominfo_t info;
411 unsigned int i;
413 /* cpumap */
414 uint64_t *vcpumap = NULL;
416 /* HVM: magic frames for ioreqs and xenstore comms */
417 const int hvm_params[] = {
418 HVM_PARAM_STORE_PFN,
419 HVM_PARAM_IOREQ_PFN,
420 HVM_PARAM_BUFIOREQ_PFN,
421 HVM_PARAM_BUFPIOREQ_PFN,
422 };
423 const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]);
424 /* ioreq_pfn, bufioreq_pfn, store_pfn */
425 uint64_t magic_pfns[NR_PARAMS];
427 /* HVM: a buffer for holding HVM contxt */
428 uint64_t rec_size = 0;
429 uint8_t *hvm_buf = NULL;
431 /* Read shared info. */
432 if (xc_ia64_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame,
433 NULL))
434 goto out;
436 /* vcpu map */
437 if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
438 ERROR("Could not get domain info");
439 goto out;
440 }
441 if (xc_ia64_recv_vcpumap(&info, io_fd, &vcpumap))
442 goto out;
444 /* vcpu context */
445 for (i = 0; i <= info.max_vcpu_id; i++) {
446 /* A copy of the CPU context of the guest. */
447 vcpu_guest_context_any_t ctxt_any;
449 if (!__test_bit(i, vcpumap))
450 continue;
452 if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, i, &ctxt_any))
453 goto out;
455 /* system context of vcpu is recieved as hvm context. */
456 }
458 /* Set HVM-specific parameters */
459 if (read_exact(io_fd, magic_pfns, sizeof(magic_pfns))) {
460 ERROR("error reading magic page addresses");
461 goto out;
462 }
464 /* These comms pages need to be zeroed at the start of day */
465 for (i = 0; i < NR_PARAMS; i++) {
466 rc = xc_clear_domain_page(xc_handle, dom, magic_pfns[i]);
467 if (rc != 0) {
468 ERROR("error zeroing magic pages: %i", rc);
469 goto out;
470 }
471 rc = xc_set_hvm_param(xc_handle, dom, hvm_params[i], magic_pfns[i]);
472 if (rc != 0) {
473 ERROR("error setting HVM params: %i", rc);
474 goto out;
475 }
476 }
477 rc = xc_set_hvm_param(xc_handle, dom,
478 HVM_PARAM_STORE_EVTCHN, store_evtchn);
479 if (rc != 0) {
480 ERROR("error setting HVM params: %i", rc);
481 goto out;
482 }
483 rc = -1;
484 *store_mfn = magic_pfns[0];
486 /* Read HVM context */
487 if (read_exact(io_fd, &rec_size, sizeof(rec_size))) {
488 ERROR("error read hvm context size!\n");
489 goto out;
490 }
492 hvm_buf = malloc(rec_size);
493 if (hvm_buf == NULL) {
494 ERROR("memory alloc for hvm context buffer failed");
495 errno = ENOMEM;
496 goto out;
497 }
499 if (read_exact(io_fd, hvm_buf, rec_size)) {
500 ERROR("error loading the HVM context");
501 goto out;
502 }
504 rc = xc_domain_hvm_setcontext(xc_handle, dom, hvm_buf, rec_size);
505 if (rc != 0) {
506 ERROR("error setting the HVM context");
507 goto out;
508 }
510 rc = 0;
512 out:
513 if (vcpumap != NULL)
514 free(vcpumap);
515 if (hvm_buf != NULL)
516 free(hvm_buf);
517 return rc;
518 }
520 /*
521 * hvm domain requires IO pages allocated when XEN_DOMCTL_arch_setup
522 */
523 static int
524 xc_ia64_hvm_domain_setup(int xc_handle, uint32_t dom)
525 {
526 int rc;
527 xen_pfn_t pfn_list[] = {
528 IO_PAGE_START >> PAGE_SHIFT,
529 BUFFER_IO_PAGE_START >> PAGE_SHIFT,
530 BUFFER_PIO_PAGE_START >> PAGE_SHIFT,
531 };
532 unsigned long nr_pages = sizeof(pfn_list) / sizeof(pfn_list[0]);
534 rc = xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
535 0, 0, &pfn_list[0]);
536 if (rc != 0)
537 PERROR("Could not allocate IO page or buffer io page.\n");
538 return rc;
539 }
541 int
542 xc_domain_restore(int xc_handle, int io_fd, uint32_t dom,
543 unsigned int store_evtchn, unsigned long *store_mfn,
544 unsigned int console_evtchn, unsigned long *console_mfn,
545 unsigned int hvm, unsigned int pae, int superpages)
546 {
547 DECLARE_DOMCTL;
548 int rc = 1;
549 unsigned long ver;
551 /* The new domain's shared-info frame number. */
552 unsigned long shared_info_frame;
554 struct xen_ia64_p2m_table p2m_table;
555 xc_ia64_p2m_init(&p2m_table);
557 /* For info only */
558 nr_pfns = 0;
560 if ( read_exact(io_fd, &p2m_size, sizeof(unsigned long)) )
561 {
562 ERROR("read: p2m_size");
563 goto out;
564 }
565 DPRINTF("xc_linux_restore start: p2m_size = %lx\n", p2m_size);
567 if (read_exact(io_fd, &ver, sizeof(unsigned long))) {
568 ERROR("Error when reading version");
569 goto out;
570 }
571 if (ver != XC_IA64_SR_FORMAT_VER_ONE &&
572 ver != XC_IA64_SR_FORMAT_VER_TWO &&
573 ver != XC_IA64_SR_FORMAT_VER_THREE) {
574 ERROR("version of save doesn't match");
575 goto out;
576 }
578 if (read_exact(io_fd, &domctl.u.arch_setup, sizeof(domctl.u.arch_setup))) {
579 ERROR("read: domain setup");
580 goto out;
581 }
583 if (hvm && xc_ia64_hvm_domain_setup(xc_handle, dom) != 0)
584 goto out;
586 /* Build firmware (will be overwritten). */
587 domctl.domain = (domid_t)dom;
588 domctl.u.arch_setup.flags &= ~XEN_DOMAINSETUP_query;
589 domctl.u.arch_setup.bp = 0; /* indicate domain restore */
591 domctl.cmd = XEN_DOMCTL_arch_setup;
592 if (xc_domctl(xc_handle, &domctl))
593 goto out;
595 /* Get the domain's shared-info frame. */
596 domctl.cmd = XEN_DOMCTL_getdomaininfo;
597 domctl.domain = (domid_t)dom;
598 if (xc_domctl(xc_handle, &domctl) < 0) {
599 ERROR("Could not get information on new domain");
600 goto out;
601 }
602 shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
604 if (ver == XC_IA64_SR_FORMAT_VER_THREE ||
605 ver == XC_IA64_SR_FORMAT_VER_TWO) {
606 unsigned int memmap_info_num_pages;
607 unsigned long memmap_size;
608 xen_ia64_memmap_info_t *memmap_info;
610 if (read_exact(io_fd, &memmap_info_num_pages,
611 sizeof(memmap_info_num_pages))) {
612 ERROR("read: memmap_info_num_pages");
613 goto out;
614 }
615 memmap_size = memmap_info_num_pages * PAGE_SIZE;
616 memmap_info = malloc(memmap_size);
617 if (memmap_info == NULL) {
618 ERROR("Could not allocate memory for memmap_info");
619 goto out;
620 }
621 if (read_exact(io_fd, memmap_info, memmap_size)) {
622 ERROR("read: memmap_info");
623 goto out;
624 }
625 if (xc_ia64_p2m_map(&p2m_table, xc_handle,
626 dom, memmap_info, IA64_DOM0VP_EFP_ALLOC_PTE)) {
627 ERROR("p2m mapping");
628 goto out;
629 }
630 free(memmap_info);
631 } else if (ver == XC_IA64_SR_FORMAT_VER_ONE) {
632 xen_ia64_memmap_info_t *memmap_info;
633 efi_memory_desc_t *memdesc;
634 uint64_t buffer[(sizeof(*memmap_info) + sizeof(*memdesc) +
635 sizeof(uint64_t) - 1) / sizeof(uint64_t)];
637 memset(buffer, 0, sizeof(buffer));
638 memmap_info = (xen_ia64_memmap_info_t *)buffer;
639 memdesc = (efi_memory_desc_t*)&memmap_info->memdesc[0];
640 memmap_info->efi_memmap_size = sizeof(*memdesc);
641 memmap_info->efi_memdesc_size = sizeof(*memdesc);
642 memmap_info->efi_memdesc_version = EFI_MEMORY_DESCRIPTOR_VERSION;
644 memdesc->type = EFI_MEMORY_DESCRIPTOR_VERSION;
645 memdesc->phys_addr = 0;
646 memdesc->virt_addr = 0;
647 memdesc->num_pages = nr_pfns << (PAGE_SHIFT - EFI_PAGE_SHIFT);
648 memdesc->attribute = EFI_MEMORY_WB;
650 if (xc_ia64_p2m_map(&p2m_table, xc_handle,
651 dom, memmap_info, IA64_DOM0VP_EFP_ALLOC_PTE)) {
652 ERROR("p2m mapping");
653 goto out;
654 }
655 } else {
656 ERROR("unknown version");
657 goto out;
658 }
660 DPRINTF("Reloading memory pages: 0%%\n");
662 while (1) {
663 unsigned long gmfn;
664 if (read_exact(io_fd, &gmfn, sizeof(unsigned long))) {
665 ERROR("Error when reading batch size");
666 goto out;
667 }
668 if (gmfn == INVALID_MFN)
669 break;
671 if (populate_page_if_necessary(xc_handle, dom, gmfn, &p2m_table) < 0) {
672 ERROR("can not populate page 0x%lx", gmfn);
673 goto out;
674 }
675 if (read_page(xc_handle, io_fd, dom, gmfn) < 0)
676 goto out;
677 }
679 DPRINTF("Received all pages\n");
681 if (xc_ia64_recv_unallocated_list(xc_handle, io_fd, dom, &p2m_table))
682 goto out;
684 if (!hvm)
685 rc = xc_ia64_pv_recv_context(ver,
686 xc_handle, io_fd, dom, shared_info_frame,
687 &p2m_table, store_evtchn, store_mfn,
688 console_evtchn, console_mfn);
689 else
690 rc = xc_ia64_hvm_recv_context(xc_handle, io_fd, dom, shared_info_frame,
691 &p2m_table, store_evtchn, store_mfn,
692 console_evtchn, console_mfn);
693 if (rc)
694 goto out;
696 /*
697 * Safety checking of saved context:
698 * 1. user_regs is fine, as Xen checks that on context switch.
699 * 2. fpu_ctxt is fine, as it can't hurt Xen.
700 * 3. trap_ctxt needs the code selectors checked.
701 * 4. ldt base must be page-aligned, no more than 8192 ents, ...
702 * 5. gdt already done, and further checking is done by Xen.
703 * 6. check that kernel_ss is safe.
704 * 7. pt_base is already done.
705 * 8. debugregs are checked by Xen.
706 * 9. callback code selectors need checking.
707 */
708 DPRINTF("Domain ready to be built.\n");
710 rc = 0;
712 out:
713 xc_ia64_p2m_unmap(&p2m_table);
715 if ((rc != 0) && (dom != 0))
716 xc_domain_destroy(xc_handle, dom);
718 DPRINTF("Restore exit with rc=%d\n", rc);
720 return rc;
721 }
723 /*
724 * Local variables:
725 * mode: C
726 * c-set-style: "BSD"
727 * c-basic-offset: 4
728 * tab-width: 4
729 * indent-tabs-mode: nil
730 * End:
731 */