direct-io.hg

view tools/libxc/xc_hvm_restore.c @ 14099:aa1be6f5150e

x86 hvm domain builder, restore: set shared_info.arch.max_pfn for
dump-core to know the area to dump

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Keir Fraser <keir@xensource.com>
date Sat Feb 24 14:19:05 2007 +0000 (2007-02-24)
parents e21834bc78f2
children eedbddf55e51
line source
1 /******************************************************************************
2 * xc_hvm_restore.c
3 *
4 * Restore the state of a HVM guest.
5 *
6 * Copyright (c) 2003, K A Fraser.
7 * Copyright (c) 2006 Intel Corperation
8 * rewriten for hvm guest by Zhai Edwin <edwin.zhai@intel.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
21 * Place - Suite 330, Boston, MA 02111-1307 USA.
22 *
23 */
25 #include <stdlib.h>
26 #include <unistd.h>
28 #include "xg_private.h"
29 #include "xg_save_restore.h"
31 #include <xen/hvm/ioreq.h>
32 #include <xen/hvm/params.h>
33 #include <xen/hvm/e820.h>
35 /* max mfn of the whole machine */
36 static unsigned long max_mfn;
38 /* virtual starting address of the hypervisor */
39 static unsigned long hvirt_start;
41 /* #levels of page tables used by the currrent guest */
42 static unsigned int pt_levels;
44 /* A list of PFNs that exist, used when allocating memory to the guest */
45 static xen_pfn_t *pfns = NULL;
47 static ssize_t
48 read_exact(int fd, void *buf, size_t count)
49 {
50 int r = 0, s;
51 unsigned char *b = buf;
53 while (r < count) {
54 s = read(fd, &b[r], count - r);
55 if ((s == -1) && (errno == EINTR))
56 continue;
57 if (s <= 0) {
58 break;
59 }
60 r += s;
61 }
63 return (r == count) ? 1 : 0;
64 }
66 int xc_hvm_restore(int xc_handle, int io_fd,
67 uint32_t dom, unsigned long max_pfn,
68 unsigned int store_evtchn, unsigned long *store_mfn,
69 unsigned int pae, unsigned int apic)
70 {
71 DECLARE_DOMCTL;
73 /* The new domain's shared-info frame number. */
74 unsigned long shared_info_frame;
76 /* A copy of the CPU context of the guest. */
77 vcpu_guest_context_t ctxt;
79 char *region_base;
81 unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
83 xc_dominfo_t info;
84 unsigned int rc = 1, n, i;
85 uint32_t rec_len, nr_vcpus;
86 uint8_t *hvm_buf = NULL;
87 unsigned long long v_end, memsize;
88 unsigned long shared_page_nr;
89 shared_info_t *shared_info = NULL;
90 xen_pfn_t arch_max_pfn;
92 unsigned long pfn;
93 unsigned int prev_pc, this_pc;
94 int verify = 0;
96 /* Types of the pfns in the current region */
97 unsigned long region_pfn_type[MAX_BATCH_SIZE];
99 struct xen_add_to_physmap xatp;
101 /* Number of pages of memory the guest has. *Not* the same as max_pfn. */
102 unsigned long nr_pages;
104 /* hvm guest mem size (Mb) */
105 memsize = (unsigned long long)*store_mfn;
106 v_end = memsize << 20;
107 nr_pages = (unsigned long) memsize << (20 - PAGE_SHIFT);
109 DPRINTF("xc_hvm_restore:dom=%d, nr_pages=0x%lx, store_evtchn=%d, "
110 "*store_mfn=%ld, pae=%u, apic=%u.\n",
111 dom, nr_pages, store_evtchn, *store_mfn, pae, apic);
113 if(!get_platform_info(xc_handle, dom,
114 &max_mfn, &hvirt_start, &pt_levels)) {
115 ERROR("Unable to get platform info.");
116 return 1;
117 }
119 DPRINTF("xc_hvm_restore start: nr_pages = %lx, max_pfn = %lx, "
120 "max_mfn = %lx, hvirt_start=%lx, pt_levels=%d\n",
121 nr_pages, max_pfn, max_mfn, hvirt_start, pt_levels);
123 if (mlock(&ctxt, sizeof(ctxt))) {
124 /* needed for build dom0 op, but might as well do early */
125 ERROR("Unable to mlock ctxt");
126 return 1;
127 }
130 pfns = malloc(max_pfn * sizeof(xen_pfn_t));
131 if (pfns == NULL) {
132 ERROR("memory alloc failed");
133 errno = ENOMEM;
134 goto out;
135 }
137 if(xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(nr_pages)) != 0) {
138 errno = ENOMEM;
139 goto out;
140 }
142 for ( i = 0; i < max_pfn; i++ )
143 pfns[i] = i;
144 for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < max_pfn; i++ )
145 pfns[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
146 arch_max_pfn = pfns[max_pfn - 1];/* used later */
148 /* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. */
149 rc = xc_domain_memory_populate_physmap(
150 xc_handle, dom, (nr_pages > 0xa0) ? 0xa0 : nr_pages,
151 0, 0, &pfns[0x00]);
152 if ( (rc == 0) && (nr_pages > 0xc0) )
153 rc = xc_domain_memory_populate_physmap(
154 xc_handle, dom, nr_pages - 0xc0, 0, 0, &pfns[0xc0]);
155 if ( rc != 0 )
156 {
157 PERROR("Could not allocate memory for HVM guest.\n");
158 goto out;
159 }
162 /**********XXXXXXXXXXXXXXXX******************/
163 if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
164 ERROR("Could not get domain info");
165 return 1;
166 }
168 domctl.cmd = XEN_DOMCTL_getdomaininfo;
169 domctl.domain = (domid_t)dom;
170 if (xc_domctl(xc_handle, &domctl) < 0) {
171 ERROR("Could not get information on new domain");
172 goto out;
173 }
175 prev_pc = 0;
177 n = 0;
178 while (1) {
180 int j;
182 this_pc = (n * 100) / nr_pages;
183 if ( (this_pc - prev_pc) >= 5 )
184 {
185 PPRINTF("\b\b\b\b%3d%%", this_pc);
186 prev_pc = this_pc;
187 }
189 if (!read_exact(io_fd, &j, sizeof(int))) {
190 ERROR("HVM restore Error when reading batch size");
191 goto out;
192 }
194 PPRINTF("batch %d\n",j);
196 if (j == -1) {
197 verify = 1;
198 DPRINTF("Entering page verify mode\n");
199 continue;
200 }
202 if (j == 0)
203 break; /* our work here is done */
205 if (j > MAX_BATCH_SIZE) {
206 ERROR("Max batch size exceeded. Giving up.");
207 goto out;
208 }
210 if (!read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long))) {
211 ERROR("Error when reading region pfn types");
212 goto out;
213 }
215 region_base = xc_map_foreign_batch(
216 xc_handle, dom, PROT_WRITE, region_pfn_type, j);
218 for ( i = 0; i < j; i++ )
219 {
220 void *page;
222 pfn = region_pfn_type[i];
223 if ( pfn & XEN_DOMCTL_PFINFO_LTAB_MASK )
224 continue;
226 if ( pfn > max_pfn )
227 {
228 ERROR("pfn out of range");
229 goto out;
230 }
232 if ( pfn >= 0xa0 && pfn < 0xc0) {
233 ERROR("hvm restore:pfn in vga hole");
234 goto out;
235 }
238 /* In verify mode, we use a copy; otherwise we work in place */
239 page = verify ? (void *)buf : (region_base + i*PAGE_SIZE);
241 if (!read_exact(io_fd, page, PAGE_SIZE)) {
242 ERROR("Error when reading page (%x)", i);
243 goto out;
244 }
246 if (verify) {
248 int res = memcmp(buf, (region_base + i*PAGE_SIZE), PAGE_SIZE);
250 if (res) {
252 int v;
254 DPRINTF("************** pfn=%lx gotcs=%08lx "
255 "actualcs=%08lx\n", pfn,
256 csum_page(region_base + i*PAGE_SIZE),
257 csum_page(buf));
259 for (v = 0; v < 4; v++) {
261 unsigned long *p = (unsigned long *)
262 (region_base + i*PAGE_SIZE);
263 if (buf[v] != p[v])
264 DPRINTF(" %d: %08lx %08lx\n", v, buf[v], p[v]);
265 }
266 }
267 }
269 } /* end of 'batch' for loop */
270 munmap(region_base, j*PAGE_SIZE);
271 n+= j; /* crude stats */
273 }/*while 1*/
275 /* xc_set_hvm_param(xc_handle, dom, HVM_PARAM_APIC_ENABLED, apic);*/
276 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_PAE_ENABLED, pae);
277 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
279 if ( v_end > HVM_BELOW_4G_RAM_END )
280 shared_page_nr = (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT) - 1;
281 else
282 shared_page_nr = (v_end >> PAGE_SHIFT) - 1;
284 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, shared_page_nr-1);
285 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_BUFIOREQ_PFN, shared_page_nr-2);
286 xc_set_hvm_param(xc_handle, dom, HVM_PARAM_IOREQ_PFN, shared_page_nr);
288 /* caculate the store_mfn , wrong val cause hang when introduceDomain */
289 *store_mfn = (v_end >> PAGE_SHIFT) - 2;
290 DPRINTF("hvm restore:calculate new store_mfn=0x%lx,v_end=0x%llx..\n", *store_mfn, v_end);
292 /* restore hvm context including pic/pit/shpage */
293 if (!read_exact(io_fd, &rec_len, sizeof(uint32_t))) {
294 ERROR("error read hvm context size!\n");
295 goto out;
296 }
298 hvm_buf = malloc(rec_len);
299 if (hvm_buf == NULL) {
300 ERROR("memory alloc for hvm context buffer failed");
301 errno = ENOMEM;
302 goto out;
303 }
305 if (!read_exact(io_fd, hvm_buf, rec_len)) {
306 ERROR("error read hvm buffer!\n");
307 goto out;
308 }
310 if (( rc = xc_domain_hvm_setcontext(xc_handle, dom, hvm_buf, rec_len))) {
311 ERROR("error set hvm buffer!\n");
312 goto out;
313 }
315 if (!read_exact(io_fd, &nr_vcpus, sizeof(uint32_t))) {
316 ERROR("error read nr vcpu !\n");
317 goto out;
318 }
319 DPRINTF("hvm restore:get nr_vcpus=%d.\n", nr_vcpus);
321 for (i =0; i < nr_vcpus; i++) {
322 if (!read_exact(io_fd, &rec_len, sizeof(uint32_t))) {
323 ERROR("error read vcpu context size!\n");
324 goto out;
325 }
326 if (rec_len != sizeof(ctxt)) {
327 ERROR("vcpu context size dismatch!\n");
328 goto out;
329 }
331 if (!read_exact(io_fd, &(ctxt), sizeof(ctxt))) {
332 ERROR("error read vcpu context.\n");
333 goto out;
334 }
336 if ( (rc = xc_vcpu_setcontext(xc_handle, dom, i, &ctxt)) ) {
337 ERROR("Could not set vcpu context, rc=%d", rc);
338 goto out;
339 }
340 }
342 /* Shared-info pfn */
343 if (!read_exact(io_fd, &(shared_info_frame), sizeof(uint32_t)) ) {
344 ERROR("reading the shared-info pfn failed!\n");
345 goto out;
346 }
347 /* Map the shared-info frame where it was before */
348 xatp.domid = dom;
349 xatp.space = XENMAPSPACE_shared_info;
350 xatp.idx = 0;
351 xatp.gpfn = shared_info_frame;
352 if ( (rc = xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp)) != 0 ) {
353 ERROR("setting the shared-info pfn failed!\n");
354 goto out;
355 }
356 if ( (xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp) != 0) ||
357 ((shared_info = xc_map_foreign_range(
358 xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
359 shared_info_frame)) == NULL) )
360 goto out;
361 /* shared_info.arch.max_pfn is used by dump-core */
362 shared_info->arch.max_pfn = arch_max_pfn;
363 munmap(shared_info, PAGE_SIZE);
365 rc = 0;
366 goto out;
368 out:
369 if ( (rc != 0) && (dom != 0) )
370 xc_domain_destroy(xc_handle, dom);
371 free(pfns);
372 free(hvm_buf);
374 DPRINTF("Restore exit with rc=%d\n", rc);
376 return rc;
377 }