ia64/xen-unstable

view tools/libxc/ia64/xc_ia64_linux_restore.c @ 18570:6208fcb4082f

[IA64] xc restore: fix domain restore.

Fix domain restore of version one format.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Thu Oct 02 17:27:57 2008 +0900 (2008-10-02)
parents 08f77df14cba
children 4422219acd93
line source
1 /******************************************************************************
2 * xc_ia64_linux_restore.c
3 *
4 * Restore the state of a Linux session.
5 *
6 * Copyright (c) 2003, K A Fraser.
7 * Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net>
8 *
9 * Copyright (c) 2007 Isaku Yamahata <yamahata@valinux.co.jp>
10 * Use foreign p2m exposure.
11 * VTi domain support
12 */
14 #include <stdlib.h>
15 #include <unistd.h>
17 #include "xg_private.h"
18 #include "xc_ia64_save_restore.h"
19 #include "xc_ia64.h"
20 #include "xc_efi.h"
21 #include "xen/hvm/params.h"
23 #define PFN_TO_KB(_pfn) ((_pfn) << (PAGE_SHIFT - 10))
25 /* number of pfns this guest has (i.e. number of entries in the P2M) */
26 static unsigned long p2m_size;
28 /* number of 'in use' pfns in the guest (i.e. #P2M entries with a valid mfn) */
29 static unsigned long nr_pfns;
31 static int
32 populate_page_if_necessary(int xc_handle, uint32_t dom, unsigned long gmfn,
33 struct xen_ia64_p2m_table *p2m_table)
34 {
35 if (xc_ia64_p2m_present(p2m_table, gmfn))
36 return 0;
38 return xc_domain_memory_populate_physmap(xc_handle, dom, 1, 0, 0, &gmfn);
39 }
41 static int
42 read_page(int xc_handle, int io_fd, uint32_t dom, unsigned long pfn)
43 {
44 void *mem;
46 mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
47 PROT_READ|PROT_WRITE, pfn);
48 if (mem == NULL) {
49 ERROR("cannot map page");
50 return -1;
51 }
52 if (read_exact(io_fd, mem, PAGE_SIZE)) {
53 ERROR("Error when reading from state file (5)");
54 munmap(mem, PAGE_SIZE);
55 return -1;
56 }
57 munmap(mem, PAGE_SIZE);
58 return 0;
59 }
61 /*
62 * Get the list of PFNs that are not in the psuedo-phys map.
63 * Although we allocate pages on demand, balloon driver may
64 * decreased simaltenously. So we have to free the freed
65 * pages here.
66 */
67 static int
68 xc_ia64_recv_unallocated_list(int xc_handle, int io_fd, uint32_t dom,
69 struct xen_ia64_p2m_table *p2m_table)
70 {
71 int rc = -1;
72 unsigned int i;
73 unsigned int count;
74 unsigned long *pfntab = NULL;
75 unsigned int nr_frees;
77 if (read_exact(io_fd, &count, sizeof(count))) {
78 ERROR("Error when reading pfn count");
79 goto out;
80 }
82 pfntab = malloc(sizeof(unsigned long) * count);
83 if (pfntab == NULL) {
84 ERROR("Out of memory");
85 goto out;
86 }
88 if (read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) {
89 ERROR("Error when reading pfntab");
90 goto out;
91 }
93 nr_frees = 0;
94 for (i = 0; i < count; i++) {
95 if (xc_ia64_p2m_allocated(p2m_table, pfntab[i])) {
96 pfntab[nr_frees] = pfntab[i];
97 nr_frees++;
98 }
99 }
100 if (nr_frees > 0) {
101 if (xc_domain_memory_decrease_reservation(xc_handle, dom, nr_frees,
102 0, pfntab) < 0) {
103 PERROR("Could not decrease reservation");
104 goto out;
105 } else
106 DPRINTF("Decreased reservation by %d / %d pages\n",
107 nr_frees, count);
108 }
110 rc = 0;
112 out:
113 if (pfntab != NULL)
114 free(pfntab);
115 return rc;
116 }
118 static int
119 xc_ia64_recv_vcpu_context(int xc_handle, int io_fd, uint32_t dom,
120 uint32_t vcpu, vcpu_guest_context_any_t *ctxt_any)
121 {
122 vcpu_guest_context_t *ctxt = &ctxt_any->c;
123 if (read_exact(io_fd, ctxt, sizeof(*ctxt))) {
124 ERROR("Error when reading ctxt");
125 return -1;
126 }
128 fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt->regs.ip, ctxt->regs.b[0]);
130 /* Initialize and set registers. */
131 ctxt->flags = VGCF_EXTRA_REGS | VGCF_SET_CR_IRR | VGCF_online;
132 if (xc_vcpu_setcontext(xc_handle, dom, vcpu, ctxt_any) != 0) {
133 ERROR("Couldn't set vcpu context");
134 return -1;
135 }
137 /* Just a check. */
138 ctxt->flags = 0;
139 if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt_any)) {
140 ERROR("Could not get vcpu context");
141 return -1;
142 }
144 return 0;
145 }
147 /* Read shared info. */
148 static int
149 xc_ia64_recv_shared_info(int xc_handle, int io_fd, uint32_t dom,
150 unsigned long shared_info_frame,
151 unsigned long *start_info_pfn)
152 {
153 unsigned int i;
155 /* The new domain's shared-info frame. */
156 shared_info_t *shared_info;
158 /* Read shared info. */
159 shared_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
160 PROT_READ|PROT_WRITE,
161 shared_info_frame);
162 if (shared_info == NULL) {
163 ERROR("cannot map page");
164 return -1;
165 }
167 if (read_exact(io_fd, shared_info, PAGE_SIZE)) {
168 ERROR("Error when reading shared_info page");
169 munmap(shared_info, PAGE_SIZE);
170 return -1;
171 }
173 /* clear any pending events and the selector */
174 memset(&(shared_info->evtchn_pending[0]), 0,
175 sizeof (shared_info->evtchn_pending));
176 for (i = 0; i < MAX_VIRT_CPUS; i++)
177 shared_info->vcpu_info[i].evtchn_pending_sel = 0;
179 if (start_info_pfn != NULL)
180 *start_info_pfn = shared_info->arch.start_info_pfn;
182 munmap (shared_info, PAGE_SIZE);
184 return 0;
185 }
187 static int
188 xc_ia64_recv_vcpumap(const xc_dominfo_t *info, int io_fd, uint64_t **vcpumapp)
189 {
190 uint64_t max_virt_cpus;
191 unsigned long vcpumap_size;
192 uint64_t *vcpumap = NULL;
194 *vcpumapp = NULL;
196 if (read_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) {
197 ERROR("error reading max_virt_cpus");
198 return -1;
199 }
200 if (max_virt_cpus < info->max_vcpu_id) {
201 ERROR("too large max_virt_cpus %i < %i\n",
202 max_virt_cpus, info->max_vcpu_id);
203 return -1;
204 }
205 vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) /
206 sizeof(vcpumap[0]);
207 vcpumap = malloc(vcpumap_size);
208 if (vcpumap == NULL) {
209 ERROR("memory alloc for vcpumap");
210 return -1;
211 }
212 memset(vcpumap, 0, vcpumap_size);
213 if (read_exact(io_fd, vcpumap, vcpumap_size)) {
214 ERROR("read vcpumap");
215 free(vcpumap);
216 return -1;
217 }
219 *vcpumapp = vcpumap;
220 return 0;
221 }
223 static int
224 xc_ia64_pv_recv_vcpu_context(int xc_handle, int io_fd, int32_t dom,
225 uint32_t vcpu)
226 {
227 int rc = -1;
229 /* A copy of the CPU context of the guest. */
230 vcpu_guest_context_any_t ctxt_any;
231 vcpu_guest_context_t *ctxt = &ctxt_any.c;
233 if (lock_pages(&ctxt_any, sizeof(ctxt_any))) {
234 /* needed for build domctl, but might as well do early */
235 ERROR("Unable to lock_pages ctxt");
236 return -1;
237 }
239 if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, vcpu, &ctxt_any))
240 goto out;
242 /* Then get privreg page. */
243 if (read_page(xc_handle, io_fd, dom, ctxt->privregs_pfn) < 0) {
244 ERROR("Could not read vcpu privregs");
245 goto out;
246 }
248 rc = 0;
250 out:
251 unlock_pages(&ctxt, sizeof(ctxt));
252 return rc;
253 }
255 static int
256 xc_ia64_pv_recv_shared_info(int xc_handle, int io_fd, int32_t dom,
257 unsigned long shared_info_frame,
258 struct xen_ia64_p2m_table *p2m_table,
259 unsigned int store_evtchn,
260 unsigned long *store_mfn,
261 unsigned int console_evtchn,
262 unsigned long *console_mfn)
263 {
264 unsigned long gmfn;
266 /* A temporary mapping of the guest's start_info page. */
267 start_info_t *start_info;
269 /* Read shared info. */
270 if (xc_ia64_recv_shared_info(xc_handle, io_fd, dom,
271 shared_info_frame, &gmfn))
272 return -1;
274 /* Uncanonicalise the suspend-record frame number and poke resume rec. */
275 if (populate_page_if_necessary(xc_handle, dom, gmfn, p2m_table)) {
276 ERROR("cannot populate page 0x%lx", gmfn);
277 return -1;
278 }
279 start_info = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
280 PROT_READ | PROT_WRITE, gmfn);
281 if (start_info == NULL) {
282 ERROR("cannot map start_info page");
283 return -1;
284 }
285 start_info->nr_pages = p2m_size;
286 start_info->shared_info = shared_info_frame << PAGE_SHIFT;
287 start_info->flags = 0;
288 *store_mfn = start_info->store_mfn;
289 start_info->store_evtchn = store_evtchn;
290 *console_mfn = start_info->console.domU.mfn;
291 start_info->console.domU.evtchn = console_evtchn;
292 munmap(start_info, PAGE_SIZE);
294 return 0;
295 }
297 static int
298 xc_ia64_pv_recv_context_ver_one_or_two(int xc_handle, int io_fd, uint32_t dom,
299 unsigned long shared_info_frame,
300 struct xen_ia64_p2m_table *p2m_table,
301 unsigned int store_evtchn,
302 unsigned long *store_mfn,
303 unsigned int console_evtchn,
304 unsigned long *console_mfn)
305 {
306 int rc;
308 /* vcpu 0 context */
309 rc = xc_ia64_pv_recv_vcpu_context(xc_handle, io_fd, dom, 0);
310 if (rc)
311 return rc;
314 /* shared_info */
315 rc = xc_ia64_pv_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame,
316 p2m_table, store_evtchn, store_mfn,
317 console_evtchn, console_mfn);
318 return rc;
319 }
321 static int
322 xc_ia64_pv_recv_context_ver_three(int xc_handle, int io_fd, uint32_t dom,
323 unsigned long shared_info_frame,
324 struct xen_ia64_p2m_table *p2m_table,
325 unsigned int store_evtchn,
326 unsigned long *store_mfn,
327 unsigned int console_evtchn,
328 unsigned long *console_mfn)
329 {
330 int rc = -1;
331 xc_dominfo_t info;
332 unsigned int i;
334 /* vcpu map */
335 uint64_t *vcpumap = NULL;
337 if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
338 ERROR("Could not get domain info");
339 return -1;
340 }
341 rc = xc_ia64_recv_vcpumap(&info, io_fd, &vcpumap);
342 if (rc != 0)
343 goto out;
345 /* vcpu context */
346 for (i = 0; i <= info.max_vcpu_id; i++) {
347 if (!__test_bit(i, vcpumap))
348 continue;
350 rc = xc_ia64_pv_recv_vcpu_context(xc_handle, io_fd, dom, i);
351 if (rc != 0)
352 goto out;
353 }
355 /* shared_info */
356 rc = xc_ia64_pv_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame,
357 p2m_table, store_evtchn, store_mfn,
358 console_evtchn, console_mfn);
359 out:
360 if (vcpumap != NULL)
361 free(vcpumap);
362 return rc;
363 }
365 static int
366 xc_ia64_pv_recv_context(unsigned long format_version,
367 int xc_handle, int io_fd, uint32_t dom,
368 unsigned long shared_info_frame,
369 struct xen_ia64_p2m_table *p2m_table,
370 unsigned int store_evtchn,
371 unsigned long *store_mfn,
372 unsigned int console_evtchn,
373 unsigned long *console_mfn)
374 {
375 int rc;
376 switch (format_version) {
377 case XC_IA64_SR_FORMAT_VER_ONE:
378 case XC_IA64_SR_FORMAT_VER_TWO:
379 rc = xc_ia64_pv_recv_context_ver_one_or_two(xc_handle, io_fd, dom,
380 shared_info_frame,
381 p2m_table, store_evtchn,
382 store_mfn, console_evtchn,
383 console_mfn);
384 break;
385 case XC_IA64_SR_FORMAT_VER_THREE:
386 rc = xc_ia64_pv_recv_context_ver_three(xc_handle, io_fd, dom,
387 shared_info_frame,
388 p2m_table, store_evtchn,
389 store_mfn, console_evtchn,
390 console_mfn);
391 break;
392 default:
393 ERROR("Unsupported format version");
394 rc = -1;
395 break;
396 }
397 return rc;
398 }
400 static int
401 xc_ia64_hvm_recv_context(int xc_handle, int io_fd, uint32_t dom,
402 unsigned long shared_info_frame,
403 struct xen_ia64_p2m_table *p2m_table,
404 unsigned int store_evtchn, unsigned long *store_mfn,
405 unsigned int console_evtchn,
406 unsigned long *console_mfn)
407 {
408 int rc = -1;
409 xc_dominfo_t info;
410 unsigned int i;
412 /* cpumap */
413 uint64_t *vcpumap = NULL;
415 /* HVM: magic frames for ioreqs and xenstore comms */
416 const int hvm_params[] = {
417 HVM_PARAM_STORE_PFN,
418 HVM_PARAM_IOREQ_PFN,
419 HVM_PARAM_BUFIOREQ_PFN,
420 HVM_PARAM_BUFPIOREQ_PFN,
421 };
422 const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]);
423 /* ioreq_pfn, bufioreq_pfn, store_pfn */
424 uint64_t magic_pfns[NR_PARAMS];
426 /* HVM: a buffer for holding HVM contxt */
427 uint64_t rec_size = 0;
428 uint8_t *hvm_buf = NULL;
430 /* Read shared info. */
431 if (xc_ia64_recv_shared_info(xc_handle, io_fd, dom, shared_info_frame,
432 NULL))
433 goto out;
435 /* vcpu map */
436 if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
437 ERROR("Could not get domain info");
438 goto out;
439 }
440 if (xc_ia64_recv_vcpumap(&info, io_fd, &vcpumap))
441 goto out;
443 /* vcpu context */
444 for (i = 0; i <= info.max_vcpu_id; i++) {
445 /* A copy of the CPU context of the guest. */
446 vcpu_guest_context_any_t ctxt_any;
448 if (!__test_bit(i, vcpumap))
449 continue;
451 if (xc_ia64_recv_vcpu_context(xc_handle, io_fd, dom, i, &ctxt_any))
452 goto out;
454 /* system context of vcpu is recieved as hvm context. */
455 }
457 /* Set HVM-specific parameters */
458 if (read_exact(io_fd, magic_pfns, sizeof(magic_pfns))) {
459 ERROR("error reading magic page addresses");
460 goto out;
461 }
463 /* These comms pages need to be zeroed at the start of day */
464 for (i = 0; i < NR_PARAMS; i++) {
465 rc = xc_clear_domain_page(xc_handle, dom, magic_pfns[i]);
466 if (rc != 0) {
467 ERROR("error zeroing magic pages: %i", rc);
468 goto out;
469 }
470 rc = xc_set_hvm_param(xc_handle, dom, hvm_params[i], magic_pfns[i]);
471 if (rc != 0) {
472 ERROR("error setting HVM params: %i", rc);
473 goto out;
474 }
475 }
476 rc = xc_set_hvm_param(xc_handle, dom,
477 HVM_PARAM_STORE_EVTCHN, store_evtchn);
478 if (rc != 0) {
479 ERROR("error setting HVM params: %i", rc);
480 goto out;
481 }
482 rc = -1;
483 *store_mfn = magic_pfns[0];
485 /* Read HVM context */
486 if (read_exact(io_fd, &rec_size, sizeof(rec_size))) {
487 ERROR("error read hvm context size!\n");
488 goto out;
489 }
491 hvm_buf = malloc(rec_size);
492 if (hvm_buf == NULL) {
493 ERROR("memory alloc for hvm context buffer failed");
494 errno = ENOMEM;
495 goto out;
496 }
498 if (read_exact(io_fd, hvm_buf, rec_size)) {
499 ERROR("error loading the HVM context");
500 goto out;
501 }
503 rc = xc_domain_hvm_setcontext(xc_handle, dom, hvm_buf, rec_size);
504 if (rc != 0) {
505 ERROR("error setting the HVM context");
506 goto out;
507 }
509 rc = 0;
511 out:
512 if (vcpumap != NULL)
513 free(vcpumap);
514 if (hvm_buf != NULL)
515 free(hvm_buf);
516 return rc;
517 }
519 /*
520 * hvm domain requires IO pages allocated when XEN_DOMCTL_arch_setup
521 */
522 static int
523 xc_ia64_hvm_domain_setup(int xc_handle, uint32_t dom)
524 {
525 int rc;
526 xen_pfn_t pfn_list[] = {
527 IO_PAGE_START >> PAGE_SHIFT,
528 BUFFER_IO_PAGE_START >> PAGE_SHIFT,
529 BUFFER_PIO_PAGE_START >> PAGE_SHIFT,
530 };
531 unsigned long nr_pages = sizeof(pfn_list) / sizeof(pfn_list[0]);
533 rc = xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
534 0, 0, &pfn_list[0]);
535 if (rc != 0)
536 PERROR("Could not allocate IO page or buffer io page.\n");
537 return rc;
538 }
540 int
541 xc_domain_restore(int xc_handle, int io_fd, uint32_t dom,
542 unsigned int store_evtchn, unsigned long *store_mfn,
543 unsigned int console_evtchn, unsigned long *console_mfn,
544 unsigned int hvm, unsigned int pae)
545 {
546 DECLARE_DOMCTL;
547 int rc = 1;
548 unsigned long ver;
550 /* The new domain's shared-info frame number. */
551 unsigned long shared_info_frame;
553 struct xen_ia64_p2m_table p2m_table;
554 xc_ia64_p2m_init(&p2m_table);
556 /* For info only */
557 nr_pfns = 0;
559 if ( read_exact(io_fd, &p2m_size, sizeof(unsigned long)) )
560 {
561 ERROR("read: p2m_size");
562 goto out;
563 }
564 DPRINTF("xc_linux_restore start: p2m_size = %lx\n", p2m_size);
566 if (read_exact(io_fd, &ver, sizeof(unsigned long))) {
567 ERROR("Error when reading version");
568 goto out;
569 }
570 if (ver != XC_IA64_SR_FORMAT_VER_ONE &&
571 ver != XC_IA64_SR_FORMAT_VER_TWO &&
572 ver != XC_IA64_SR_FORMAT_VER_THREE) {
573 ERROR("version of save doesn't match");
574 goto out;
575 }
577 if (read_exact(io_fd, &domctl.u.arch_setup, sizeof(domctl.u.arch_setup))) {
578 ERROR("read: domain setup");
579 goto out;
580 }
582 if (hvm && xc_ia64_hvm_domain_setup(xc_handle, dom) != 0)
583 goto out;
585 /* Build firmware (will be overwritten). */
586 domctl.domain = (domid_t)dom;
587 domctl.u.arch_setup.flags &= ~XEN_DOMAINSETUP_query;
588 domctl.u.arch_setup.bp = 0; /* indicate domain restore */
590 domctl.cmd = XEN_DOMCTL_arch_setup;
591 if (xc_domctl(xc_handle, &domctl))
592 goto out;
594 /* Get the domain's shared-info frame. */
595 domctl.cmd = XEN_DOMCTL_getdomaininfo;
596 domctl.domain = (domid_t)dom;
597 if (xc_domctl(xc_handle, &domctl) < 0) {
598 ERROR("Could not get information on new domain");
599 goto out;
600 }
601 shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
603 if (ver == XC_IA64_SR_FORMAT_VER_THREE ||
604 ver == XC_IA64_SR_FORMAT_VER_TWO) {
605 unsigned int memmap_info_num_pages;
606 unsigned long memmap_size;
607 xen_ia64_memmap_info_t *memmap_info;
609 if (read_exact(io_fd, &memmap_info_num_pages,
610 sizeof(memmap_info_num_pages))) {
611 ERROR("read: memmap_info_num_pages");
612 goto out;
613 }
614 memmap_size = memmap_info_num_pages * PAGE_SIZE;
615 memmap_info = malloc(memmap_size);
616 if (memmap_info == NULL) {
617 ERROR("Could not allocate memory for memmap_info");
618 goto out;
619 }
620 if (read_exact(io_fd, memmap_info, memmap_size)) {
621 ERROR("read: memmap_info");
622 goto out;
623 }
624 if (xc_ia64_p2m_map(&p2m_table, xc_handle,
625 dom, memmap_info, IA64_DOM0VP_EFP_ALLOC_PTE)) {
626 ERROR("p2m mapping");
627 goto out;
628 }
629 free(memmap_info);
630 } else if (ver == XC_IA64_SR_FORMAT_VER_ONE) {
631 xen_ia64_memmap_info_t *memmap_info;
632 efi_memory_desc_t *memdesc;
633 uint64_t buffer[(sizeof(*memmap_info) + sizeof(*memdesc) +
634 sizeof(uint64_t) - 1) / sizeof(uint64_t)];
636 memset(buffer, 0, sizeof(buffer));
637 memmap_info = (xen_ia64_memmap_info_t *)buffer;
638 memdesc = (efi_memory_desc_t*)&memmap_info->memdesc[0];
639 memmap_info->efi_memmap_size = sizeof(*memdesc);
640 memmap_info->efi_memdesc_size = sizeof(*memdesc);
641 memmap_info->efi_memdesc_version = EFI_MEMORY_DESCRIPTOR_VERSION;
643 memdesc->type = EFI_MEMORY_DESCRIPTOR_VERSION;
644 memdesc->phys_addr = 0;
645 memdesc->virt_addr = 0;
646 memdesc->num_pages = nr_pfns << (PAGE_SHIFT - EFI_PAGE_SHIFT);
647 memdesc->attribute = EFI_MEMORY_WB;
649 if (xc_ia64_p2m_map(&p2m_table, xc_handle,
650 dom, memmap_info, IA64_DOM0VP_EFP_ALLOC_PTE)) {
651 ERROR("p2m mapping");
652 goto out;
653 }
654 } else {
655 ERROR("unknown version");
656 goto out;
657 }
659 DPRINTF("Reloading memory pages: 0%%\n");
661 while (1) {
662 unsigned long gmfn;
663 if (read_exact(io_fd, &gmfn, sizeof(unsigned long))) {
664 ERROR("Error when reading batch size");
665 goto out;
666 }
667 if (gmfn == INVALID_MFN)
668 break;
670 if (populate_page_if_necessary(xc_handle, dom, gmfn, &p2m_table) < 0) {
671 ERROR("can not populate page 0x%lx", gmfn);
672 goto out;
673 }
674 if (read_page(xc_handle, io_fd, dom, gmfn) < 0)
675 goto out;
676 }
678 DPRINTF("Received all pages\n");
680 if (xc_ia64_recv_unallocated_list(xc_handle, io_fd, dom, &p2m_table))
681 goto out;
683 if (!hvm)
684 rc = xc_ia64_pv_recv_context(ver,
685 xc_handle, io_fd, dom, shared_info_frame,
686 &p2m_table, store_evtchn, store_mfn,
687 console_evtchn, console_mfn);
688 else
689 rc = xc_ia64_hvm_recv_context(xc_handle, io_fd, dom, shared_info_frame,
690 &p2m_table, store_evtchn, store_mfn,
691 console_evtchn, console_mfn);
692 if (rc)
693 goto out;
695 /*
696 * Safety checking of saved context:
697 * 1. user_regs is fine, as Xen checks that on context switch.
698 * 2. fpu_ctxt is fine, as it can't hurt Xen.
699 * 3. trap_ctxt needs the code selectors checked.
700 * 4. ldt base must be page-aligned, no more than 8192 ents, ...
701 * 5. gdt already done, and further checking is done by Xen.
702 * 6. check that kernel_ss is safe.
703 * 7. pt_base is already done.
704 * 8. debugregs are checked by Xen.
705 * 9. callback code selectors need checking.
706 */
707 DPRINTF("Domain ready to be built.\n");
709 rc = 0;
711 out:
712 xc_ia64_p2m_unmap(&p2m_table);
714 if ((rc != 0) && (dom != 0))
715 xc_domain_destroy(xc_handle, dom);
717 DPRINTF("Restore exit with rc=%d\n", rc);
719 return rc;
720 }
722 /*
723 * Local variables:
724 * mode: C
725 * c-set-style: "BSD"
726 * c-basic-offset: 4
727 * tab-width: 4
728 * indent-tabs-mode: nil
729 * End:
730 */