ia64/xen-unstable

view tools/libxc/xc_offline_page.c @ 19691:f44438bc79ac

libxc: Exchange a page for PV guest

This patch support exchange a page for a suspended PV guest from user
space.

The basic idea to offline a page is:
1) mark a page offline pending
2) If the page is owned by a HVM domain, user have to live migrate it.
In future, with stub-domain support, we can also exchange the page
without migration.
3) If the page is owned by a PV domain, we will try to exchange the
offline pending page to a new one and free the old page.

This patch achieves item 3.

The method to exchange the offline pending page for PV domain is:

1) Suspend the guest.
2) If the page is being granted out, return with offline pending.
3) Get a copy for the content
4) Scan all page table page to see if any reference to the offending
page, if yes, make the entry to be non-present to reduce the reference
count.
5) After update all page tables, user space tools will try to exchange
the old page. If the new mfn has no reference anymore (i.e.
count_info & count_mask =3D 1), the exchange will allocate a new page,
update the m2p and return success, otherwise it will return fail.
6) If step 5 is success, user space tools will update the content of
the new page change the p2m table, and change all entries scaned in
step 4 to point to new entry.
if step failed, it will try to undo step 4 to revert page table.
7) Resume the guest.

Please refer to thread in
http://www.mailinglistarchive.com/xen-devel@lists.xensource.com/msg63084.html
for more information.

Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jun 01 14:15:48 2009 +0100 (2009-06-01)
parents 84c1f7c46444
children
line source
1 /******************************************************************************
2 * xc_offline_page.c
3 *
4 * Helper functions to offline/online one page
5 *
6 * Copyright (c) 2003, K A Fraser.
7 * Copyright (c) 2009, Intel Corporation.
8 */
10 #include <inttypes.h>
11 #include <time.h>
12 #include <stdlib.h>
13 #include <unistd.h>
14 #include <sys/time.h>
15 #include <xs.h>
16 #include <xc_core.h>
18 #include "xc_private.h"
19 #include "xc_dom.h"
20 #include "xg_private.h"
21 #include "xg_save_restore.h"
23 struct domain_mem_info{
24 int domid;
25 unsigned int pt_level;
26 unsigned int guest_width;
27 uint32_t *pfn_type;
28 xen_pfn_t *p2m_table;
29 unsigned long p2m_size;
30 xen_pfn_t *m2p_table;
31 int max_mfn;
32 };
34 struct pte_backup_entry
35 {
36 xen_pfn_t table_mfn;
37 int offset;
38 };
40 #define DEFAULT_BACKUP_COUNT 1024
41 struct pte_backup
42 {
43 struct pte_backup_entry *entries;
44 int max;
45 int cur;
46 };
48 /* Global definition for some MACRO */
49 int guest_width, p2m_size;
51 int xc_mark_page_online(int xc, unsigned long start,
52 unsigned long end, uint32_t *status)
53 {
54 DECLARE_SYSCTL;
55 int ret = -1;
57 if ( !status || (end < start) )
58 return -EINVAL;
60 if (lock_pages(status, sizeof(uint32_t)*(end - start + 1)))
61 {
62 ERROR("Could not lock memory for xc_mark_page_online\n");
63 return -EINVAL;
64 }
66 sysctl.cmd = XEN_SYSCTL_page_offline_op;
67 sysctl.u.page_offline.start = start;
68 sysctl.u.page_offline.cmd = sysctl_page_online;
69 sysctl.u.page_offline.end = end;
70 set_xen_guest_handle(sysctl.u.page_offline.status, status);
71 ret = xc_sysctl(xc, &sysctl);
73 unlock_pages(status, sizeof(uint32_t)*(end - start + 1));
75 return ret;
76 }
78 int xc_mark_page_offline(int xc, unsigned long start,
79 unsigned long end, uint32_t *status)
80 {
81 DECLARE_SYSCTL;
82 int ret = -1;
84 if ( !status || (end < start) )
85 return -EINVAL;
87 if (lock_pages(status, sizeof(uint32_t)*(end - start + 1)))
88 {
89 ERROR("Could not lock memory for xc_mark_page_offline");
90 return -EINVAL;
91 }
93 sysctl.cmd = XEN_SYSCTL_page_offline_op;
94 sysctl.u.page_offline.start = start;
95 sysctl.u.page_offline.cmd = sysctl_page_offline;
96 sysctl.u.page_offline.end = end;
97 set_xen_guest_handle(sysctl.u.page_offline.status, status);
98 ret = xc_sysctl(xc, &sysctl);
100 unlock_pages(status, sizeof(uint32_t)*(end - start + 1));
102 return ret;
103 }
105 int xc_query_page_offline_status(int xc, unsigned long start,
106 unsigned long end, uint32_t *status)
107 {
108 DECLARE_SYSCTL;
109 int ret = -1;
111 if ( !status || (end < start) )
112 return -EINVAL;
114 if (lock_pages(status, sizeof(uint32_t)*(end - start + 1)))
115 {
116 ERROR("Could not lock memory for xc_query_page_offline_status\n");
117 return -EINVAL;
118 }
120 sysctl.cmd = XEN_SYSCTL_page_offline_op;
121 sysctl.u.page_offline.start = start;
122 sysctl.u.page_offline.cmd = sysctl_query_page_offline;
123 sysctl.u.page_offline.end = end;
124 set_xen_guest_handle(sysctl.u.page_offline.status, status);
125 ret = xc_sysctl(xc, &sysctl);
127 unlock_pages(status, sizeof(uint32_t)*(end - start + 1));
129 return ret;
130 }
132 /*
133 * There should no update to the grant when domain paused
134 */
135 static int xc_is_page_granted(int xc_handle, xen_pfn_t gpfn,
136 struct grant_entry *gnttab, int gnt_num)
137 {
138 int i = 0;
140 if (!gnttab)
141 return 0;
143 for (i = 0; i < gnt_num; i++)
144 if ( ((gnttab[i].flags & GTF_type_mask) != GTF_invalid) &&
145 (gnttab[i].frame == gpfn) )
146 break;
148 return (i != gnt_num);
149 }
151 static xen_pfn_t pfn_to_mfn(xen_pfn_t pfn, xen_pfn_t *p2m, int gwidth)
152 {
153 return ((xen_pfn_t) ((gwidth==8)?
154 (((uint64_t *)p2m)[(pfn)]):
155 ((((uint32_t *)p2m)[(pfn)]) == 0xffffffffU ?
156 (-1UL) :
157 (((uint32_t *)p2m)[(pfn)]))));
158 }
160 static int get_pt_level(int xc_handle, uint32_t domid,
161 unsigned int *pt_level,
162 unsigned int *gwidth)
163 {
164 DECLARE_DOMCTL;
165 xen_capabilities_info_t xen_caps = "";
167 if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0)
168 return -1;
170 memset(&domctl, 0, sizeof(domctl));
171 domctl.domain = domid;
172 domctl.cmd = XEN_DOMCTL_get_address_size;
174 if ( do_domctl(xc_handle, &domctl) != 0 )
175 return -1;
177 *gwidth = domctl.u.address_size.size / 8;
179 if (strstr(xen_caps, "xen-3.0-x86_64"))
180 /* Depends on whether it's a compat 32-on-64 guest */
181 *pt_level = ( (*gwidth == 8) ? 4 : 3 );
182 else if (strstr(xen_caps, "xen-3.0-x86_32p"))
183 *pt_level = 3;
184 else if (strstr(xen_caps, "xen-3.0-x86_32"))
185 *pt_level = 2;
186 else
187 return -1;
189 return 0;
190 }
192 static int close_mem_info(int xc_handle, struct domain_mem_info *minfo)
193 {
194 if (minfo->pfn_type)
195 free(minfo->pfn_type);
196 munmap(minfo->m2p_table, M2P_SIZE(minfo->max_mfn));
197 munmap(minfo->p2m_table, P2M_FLL_ENTRIES * PAGE_SIZE);
198 minfo->p2m_table = minfo->m2p_table = NULL;
200 return 0;
201 }
203 static int init_mem_info(int xc_handle, int domid,
204 struct domain_mem_info *minfo,
205 xc_dominfo_t *info)
206 {
207 uint64_aligned_t shared_info_frame;
208 shared_info_any_t *live_shinfo = NULL;
209 int i, rc;
211 /* Only be initialized once */
212 if (minfo->pfn_type || minfo->m2p_table || minfo->p2m_table)
213 return -EINVAL;
215 if ( get_pt_level(xc_handle, domid, &minfo->pt_level,
216 &minfo->guest_width) )
217 {
218 ERROR("Unable to get PT level info.");
219 return -EFAULT;
220 }
221 guest_width = minfo->guest_width;
223 shared_info_frame = info->shared_info_frame;
225 live_shinfo = xc_map_foreign_range(xc_handle, domid,
226 PAGE_SIZE, PROT_READ, shared_info_frame);
227 if ( !live_shinfo )
228 {
229 ERROR("Couldn't map live_shinfo");
230 return -EFAULT;
231 }
233 if ( (rc = xc_core_arch_map_p2m_writable(xc_handle, minfo->guest_width,
234 info, live_shinfo, &minfo->p2m_table, &minfo->p2m_size)) )
235 {
236 ERROR("Couldn't map p2m table %x\n", rc);
237 goto failed;
238 }
239 munmap(live_shinfo, PAGE_SIZE);
240 live_shinfo = NULL;
242 p2m_size = minfo->p2m_size;
244 minfo->max_mfn = xc_memory_op(xc_handle, XENMEM_maximum_ram_page, NULL);
245 if ( !(minfo->m2p_table =
246 xc_map_m2p(xc_handle, minfo->max_mfn, PROT_READ, NULL)) )
247 {
248 ERROR("Failed to map live M2P table");
249 goto failed;
250 }
252 /* Get pfn type */
253 minfo->pfn_type = malloc(sizeof(uint32_t) * minfo->p2m_size);
254 if (!minfo->pfn_type)
255 {
256 ERROR("Failed to malloc pfn_type\n");
257 goto failed;
258 }
259 memset(minfo->pfn_type, 0, sizeof(uint32_t) * minfo->p2m_size);
261 for (i = 0; i < minfo->p2m_size; i++)
262 minfo->pfn_type[i] = pfn_to_mfn(i, minfo->p2m_table,
263 minfo->guest_width);
265 if ( lock_pages(minfo->pfn_type, minfo->p2m_size * sizeof(uint32_t)) )
266 {
267 ERROR("Unable to lock pfn_type array");
268 goto failed;
269 }
271 for (i = 0; i < minfo->p2m_size ; i+=1024)
272 {
273 int count = ((p2m_size - i ) > 1024 ) ? 1024: (p2m_size - i);
274 if ( ( rc = xc_get_pfn_type_batch(xc_handle, domid, count,
275 minfo->pfn_type + i)) )
276 {
277 ERROR("Failed to get pfn_type %x\n", rc);
278 goto unlock;
279 }
280 }
281 return 0;
283 unlock:
284 unlock_pages(minfo->pfn_type, minfo->p2m_size * sizeof(uint32_t));
285 failed:
286 if (minfo->pfn_type)
287 {
288 minfo->pfn_type = NULL;
289 free(minfo->pfn_type);
290 }
291 if (live_shinfo)
292 munmap(live_shinfo, PAGE_SIZE);
293 munmap(minfo->m2p_table, M2P_SIZE(minfo->max_mfn));
294 munmap(minfo->p2m_table, P2M_FLL_ENTRIES * PAGE_SIZE);
295 minfo->p2m_table = minfo->m2p_table = NULL;
297 return -1;
298 }
300 static int backup_ptes(xen_pfn_t table_mfn, int offset,
301 struct pte_backup *backup)
302 {
303 if (!backup)
304 return -EINVAL;
306 if (backup->max == backup->cur)
307 {
308 backup->entries = realloc(backup->entries,
309 backup->max * 2 * sizeof(struct pte_backup_entry));
310 if (backup->entries == NULL)
311 return -1;
312 else
313 backup->max *= 2;
314 }
316 backup->entries[backup->cur].table_mfn = table_mfn;
317 backup->entries[backup->cur++].offset = offset;
319 return 0;
320 }
322 /*
323 * return:
324 * 1 when MMU update is required
325 * 0 when no changes
326 * <0 when error happen
327 */
328 typedef int (*pte_func)(uint64_t pte, uint64_t *new_pte,
329 unsigned long table_mfn, int table_offset,
330 struct pte_backup *backup,
331 unsigned long no_use);
333 static int __clear_pte(uint64_t pte, uint64_t *new_pte,
334 unsigned long table_mfn, int table_offset,
335 struct pte_backup *backup,
336 unsigned long mfn)
337 {
338 /* If no new_pte pointer, same as no changes needed */
339 if (!new_pte || !backup)
340 return -EINVAL;
342 if ( !(pte & _PAGE_PRESENT))
343 return 0;
345 /* XXX Check for PSE bit here */
346 /* Hit one entry */
347 if ( ((pte >> PAGE_SHIFT_X86) & MFN_MASK_X86) == mfn)
348 {
349 *new_pte = pte & ~_PAGE_PRESENT;
350 if (!backup_ptes(table_mfn, table_offset, backup))
351 return 1;
352 }
354 return 0;
355 }
357 static int __update_pte(uint64_t pte, uint64_t *new_pte,
358 unsigned long table_mfn, int table_offset,
359 struct pte_backup *backup,
360 unsigned long new_mfn)
361 {
362 int index;
364 if (!new_pte)
365 return 0;
367 for (index = 0; index < backup->cur; index ++)
368 if ( (backup->entries[index].table_mfn == table_mfn) &&
369 (backup->entries[index].offset == table_offset) )
370 break;
372 if (index != backup->cur)
373 {
374 if (pte & _PAGE_PRESENT)
375 ERROR("Page present while in backup ptes\n");
376 pte &= ~MFN_MASK_X86;
377 pte |= (new_mfn << PAGE_SHIFT_X86) | _PAGE_PRESENT;
378 *new_pte = pte;
379 return 1;
380 }
382 return 0;
383 }
385 static int change_pte(int xc_handle, int domid,
386 struct domain_mem_info *minfo,
387 struct pte_backup *backup,
388 struct xc_mmu *mmu,
389 pte_func func,
390 unsigned long data)
391 {
392 int pte_num, rc;
393 uint64_t i;
394 void *content = NULL;
396 pte_num = PAGE_SIZE / ((minfo->pt_level == 2) ? 4 : 8);
398 for (i = 0; i < minfo->p2m_size; i++)
399 {
400 xen_pfn_t table_mfn = pfn_to_mfn(i, minfo->p2m_table,
401 minfo->guest_width);
402 uint64_t pte, new_pte;
403 int j;
405 if ( table_mfn == INVALID_P2M_ENTRY )
406 continue;
408 if ( minfo->pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK )
409 {
410 content = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
411 PROT_READ, table_mfn);
412 if (!content)
413 goto failed;
415 for (j = 0; j < pte_num; j++)
416 {
417 if ( minfo->pt_level == 2 )
418 pte = ((const uint32_t*)content)[j];
419 else
420 pte = ((const uint64_t*)content)[j];
422 rc = func(pte, &new_pte, table_mfn, j, backup, data);
424 switch (rc)
425 {
426 case 1:
427 if ( xc_add_mmu_update(xc_handle, mmu,
428 table_mfn << PAGE_SHIFT |
429 j * ( (minfo->pt_level == 2) ?
430 sizeof(uint32_t): sizeof(uint64_t)) |
431 MMU_PT_UPDATE_PRESERVE_AD,
432 new_pte) )
433 goto failed;
434 break;
436 case 0:
437 break;
439 default:
440 goto failed;
441 }
442 }
443 }
445 munmap(content, PAGE_SIZE);
446 content = NULL;
447 }
449 if ( xc_flush_mmu_updates(xc_handle, mmu) )
450 goto failed;
452 return 0;
453 failed:
454 /* XXX Shall we take action if we have fail to swap? */
455 if (content)
456 munmap(content, PAGE_SIZE);
458 return -1;
459 }
461 static int update_pte(int xc_handle, int domid,
462 struct domain_mem_info *minfo,
463 struct pte_backup *backup,
464 struct xc_mmu *mmu,
465 unsigned long new_mfn)
466 {
467 return change_pte(xc_handle, domid, minfo, backup, mmu,
468 __update_pte, new_mfn);
469 }
471 static int clear_pte(int xc_handle, int domid,
472 struct domain_mem_info *minfo,
473 struct pte_backup *backup,
474 struct xc_mmu *mmu,
475 xen_pfn_t mfn)
476 {
477 return change_pte(xc_handle, domid, minfo, backup, mmu,
478 __clear_pte, mfn);
479 }
481 static int exchange_page(int xc_handle, xen_pfn_t mfn,
482 xen_pfn_t *new_mfn, int domid)
483 {
484 int rc;
485 xen_pfn_t out_mfn;
487 struct xen_memory_exchange exchange = {
488 .in = {
489 .nr_extents = 1,
490 .extent_order = 0,
491 .domid = domid
492 },
493 .out = {
494 .nr_extents = 1,
495 .extent_order = 0,
496 .domid = domid
497 }
498 };
499 set_xen_guest_handle(exchange.in.extent_start, &mfn);
500 set_xen_guest_handle(exchange.out.extent_start, &out_mfn);
502 rc = xc_memory_op(xc_handle, XENMEM_exchange, &exchange);
504 if (!rc)
505 *new_mfn = out_mfn;
507 return rc;
508 }
510 /*
511 * Check if a page can be exchanged successfully
512 */
514 static int is_page_exchangable(int xc_handle, int domid, xen_pfn_t mfn,
515 xc_dominfo_t *info)
516 {
517 uint32_t status;
518 int rc;
520 /* domain checking */
521 if ( !domid || (domid > DOMID_FIRST_RESERVED) )
522 {
523 DPRINTF("Dom0's page can't be LM");
524 return 0;
525 }
526 if (info->hvm)
527 {
528 DPRINTF("Currently we can only live change PV guest's page\n");
529 return 0;
530 }
532 /* Check if pages are offline pending or not */
533 rc = xc_query_page_offline_status(xc_handle, mfn, mfn, &status);
535 if ( rc || !(status & PG_OFFLINE_STATUS_OFFLINE_PENDING) )
536 {
537 ERROR("Page %lx is not offline pending %x\n",
538 mfn, status);
539 return 0;
540 }
542 return 1;
543 }
545 /* The domain should be suspended when called here */
546 int xc_exchange_page(int xc_handle, int domid, xen_pfn_t mfn)
547 {
548 xc_dominfo_t info;
549 struct domain_mem_info minfo;
550 struct xc_mmu *mmu = NULL;
551 struct pte_backup old_ptes = {NULL, 0, 0};
552 struct grant_entry *gnttab = NULL;
553 struct mmuext_op mops;
554 int gnt_num, unpined = 0;
555 void *old_p, *backup = NULL;
556 int rc, result = -1;
557 uint32_t status;
558 xen_pfn_t new_mfn, gpfn;
560 if ( xc_domain_getinfo(xc_handle, domid, 1, &info) != 1 )
561 {
562 ERROR("Could not get domain info");
563 return -EFAULT;
564 }
566 if (!info.shutdown || info.shutdown_reason != SHUTDOWN_suspend)
567 {
568 ERROR("Can't exchange page unless domain is suspended\n");
569 return -EINVAL;
570 }
572 if (!is_page_exchangable(xc_handle, domid, mfn, &info))
573 {
574 ERROR("Could not exchange page\n");
575 return -EINVAL;
576 }
578 /* Get domain's memory information */
579 memset(&minfo, 0, sizeof(minfo));
580 init_mem_info(xc_handle, domid, &minfo, &info);
581 gpfn = minfo.m2p_table[mfn];
583 /* Don't exchange CR3 for PAE guest in PAE host environment */
584 if (minfo.guest_width > sizeof(long))
585 {
586 if ( (minfo.pfn_type[mfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) ==
587 XEN_DOMCTL_PFINFO_L3TAB )
588 goto failed;
589 }
591 gnttab = xc_gnttab_map_table(xc_handle, domid, &gnt_num);
592 if (!gnttab)
593 {
594 ERROR("Failed to map grant table\n");
595 goto failed;
596 }
598 if (xc_is_page_granted(xc_handle, mfn, gnttab, gnt_num))
599 {
600 ERROR("Page %lx is granted now\n", mfn);
601 goto failed;
602 }
604 /* allocate required data structure */
605 backup = malloc(PAGE_SIZE);
606 if (!backup)
607 {
608 ERROR("Failed to allocate backup pages pointer\n");
609 goto failed;
610 }
612 old_ptes.max = DEFAULT_BACKUP_COUNT;
613 old_ptes.entries = malloc(sizeof(struct pte_backup_entry) *
614 DEFAULT_BACKUP_COUNT);
616 if (!old_ptes.entries)
617 {
618 ERROR("Faield to allocate backup\n");
619 goto failed;
620 }
621 old_ptes.cur = 0;
623 /* Unpin the page if it is pined */
624 if (minfo.pfn_type[mfn] & XEN_DOMCTL_PFINFO_LPINTAB)
625 {
626 mops.cmd = MMUEXT_UNPIN_TABLE;
627 mops.arg1.mfn = mfn;
629 if ( xc_mmuext_op(xc_handle, &mops, 1, domid) < 0 )
630 {
631 ERROR("Failed to unpin page %lx", mfn);
632 goto failed;
633 }
634 mops.arg1.mfn = mfn;
635 unpined = 1;
636 }
638 /* backup the content */
639 old_p = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
640 PROT_READ, mfn);
641 if (!old_p)
642 {
643 ERROR("Failed to map foreign page %lx\n", mfn);
644 goto failed;
645 }
647 memcpy(backup, old_p, PAGE_SIZE);
648 munmap(old_p, PAGE_SIZE);
650 mmu = xc_alloc_mmu_updates(xc_handle, domid);
651 if ( mmu == NULL )
652 {
653 ERROR("%s: failed at %d\n", __FUNCTION__, __LINE__);
654 goto failed;
655 }
657 /* Firstly update all pte to be invalid to remove the reference */
658 rc = clear_pte(xc_handle, domid, &minfo, &old_ptes, mmu, mfn);
660 if (rc)
661 {
662 ERROR("clear pte failed\n");
663 goto failed;
664 }
666 rc = exchange_page(xc_handle, mfn, &new_mfn, domid);
668 if (rc)
669 {
670 ERROR("Exchange the page failed\n");
671 /* Exchange fail means there are refere to the page still */
672 rc = update_pte(xc_handle, domid, &minfo, &old_ptes, mmu, mfn);
673 if (rc)
674 result = -2;
675 goto failed;
676 }
678 rc = update_pte(xc_handle, domid, &minfo, &old_ptes, mmu, new_mfn);
680 if (rc)
681 {
682 ERROR("update pte failed guest may be broken now\n");
683 /* No recover action now for swap fail */
684 result = -2;
685 goto failed;
686 }
688 /* Check if pages are offlined already */
689 rc = xc_query_page_offline_status(xc_handle, mfn, mfn,
690 &status);
692 if (rc)
693 {
694 ERROR("Fail to query offline status\n");
695 }else if ( !(status & PG_OFFLINE_STATUS_OFFLINED) )
696 {
697 ERROR("page is still online or pending\n");
698 goto failed;
699 }
700 else
701 {
702 void *new_p;
703 IPRINTF("Now page is offlined %lx\n", mfn);
704 /* Update the p2m table */
705 minfo.p2m_table[gpfn] = new_mfn;
707 new_p = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
708 PROT_READ|PROT_WRITE, new_mfn);
709 memcpy(new_p, backup, PAGE_SIZE);
710 munmap(new_p, PAGE_SIZE);
711 mops.arg1.mfn = new_mfn;
712 result = 0;
713 }
715 failed:
717 if (unpined && (minfo.pfn_type[mfn] & XEN_DOMCTL_PFINFO_LPINTAB))
718 {
719 switch ( minfo.pfn_type[mfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK )
720 {
721 case XEN_DOMCTL_PFINFO_L1TAB:
722 mops.cmd = MMUEXT_PIN_L1_TABLE;
723 break;
725 case XEN_DOMCTL_PFINFO_L2TAB:
726 mops.cmd = MMUEXT_PIN_L2_TABLE;
727 break;
729 case XEN_DOMCTL_PFINFO_L3TAB:
730 mops.cmd = MMUEXT_PIN_L3_TABLE;
731 break;
733 case XEN_DOMCTL_PFINFO_L4TAB:
734 mops.cmd = MMUEXT_PIN_L4_TABLE;
735 break;
737 default:
738 ERROR("Unpined for non pate table page\n");
739 break;
740 }
742 if ( xc_mmuext_op(xc_handle, &mops, 1, domid) < 0 )
743 {
744 ERROR("failed to pin the mfn again\n");
745 result = -2;
746 }
747 }
749 if (mmu)
750 free(mmu);
752 if (old_ptes.entries)
753 free(old_ptes.entries);
755 if (backup)
756 free(backup);
758 if (gnttab)
759 munmap(gnttab, gnt_num / (PAGE_SIZE/sizeof(struct grant_entry)));
761 close_mem_info(xc_handle, &minfo);
763 return result;
764 }