ia64/xen-unstable

view xen/arch/x86/mm/p2m.c @ 14152:1c5e6239a8d0

fix PDPE entry in P2M table under 32bit PAE hypervisor
author root@xenhog02.amd.com
date Sun Feb 25 23:58:33 2007 -0600 (2007-02-25)
parents 6daa91dc9247
children 3d5f39c610ad
line source
1 /******************************************************************************
2 * arch/x86/mm/p2m.c
3 *
4 * physical-to-machine mappings for automatically-translated domains.
5 *
6 * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
7 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
8 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
9 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
26 #include <asm/domain.h>
27 #include <asm/page.h>
28 #include <asm/paging.h>
29 #include <asm/p2m.h>
31 /* Debugging and auditing of the P2M code? */
32 #define P2M_AUDIT 0
33 #define P2M_DEBUGGING 1
35 /* The P2M lock. This protects all updates to the p2m table.
36 * Updates are expected to be safe against concurrent reads,
37 * which do *not* require the lock */
39 #define p2m_lock_init(_d) \
40 do { \
41 spin_lock_init(&(_d)->arch.p2m.lock); \
42 (_d)->arch.p2m.locker = -1; \
43 (_d)->arch.p2m.locker_function = "nobody"; \
44 } while (0)
46 #define p2m_lock(_d) \
47 do { \
48 if ( unlikely((_d)->arch.p2m.locker == current->processor) )\
49 { \
50 printk("Error: p2m lock held by %s\n", \
51 (_d)->arch.p2m.locker_function); \
52 BUG(); \
53 } \
54 spin_lock(&(_d)->arch.p2m.lock); \
55 ASSERT((_d)->arch.p2m.locker == -1); \
56 (_d)->arch.p2m.locker = current->processor; \
57 (_d)->arch.p2m.locker_function = __func__; \
58 } while (0)
60 #define p2m_unlock(_d) \
61 do { \
62 ASSERT((_d)->arch.p2m.locker == current->processor); \
63 (_d)->arch.p2m.locker = -1; \
64 (_d)->arch.p2m.locker_function = "nobody"; \
65 spin_unlock(&(_d)->arch.p2m.lock); \
66 } while (0)
70 /* Printouts */
71 #define P2M_PRINTK(_f, _a...) \
72 debugtrace_printk("p2m: %s(): " _f, __func__, ##_a)
73 #define P2M_ERROR(_f, _a...) \
74 printk("pg error: %s(): " _f, __func__, ##_a)
75 #if P2M_DEBUGGING
76 #define P2M_DEBUG(_f, _a...) \
77 debugtrace_printk("p2mdebug: %s(): " _f, __func__, ##_a)
78 #else
79 #define P2M_DEBUG(_f, _a...) do { (void)(_f); } while(0)
80 #endif
83 /* Override macros from asm/page.h to make them work with mfn_t */
84 #undef mfn_to_page
85 #define mfn_to_page(_m) (frame_table + mfn_x(_m))
86 #undef mfn_valid
87 #define mfn_valid(_mfn) (mfn_x(_mfn) < max_page)
88 #undef page_to_mfn
89 #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
93 // Find the next level's P2M entry, checking for out-of-range gfn's...
94 // Returns NULL on error.
95 //
96 static l1_pgentry_t *
97 p2m_find_entry(void *table, unsigned long *gfn_remainder,
98 unsigned long gfn, u32 shift, u32 max)
99 {
100 u32 index;
102 index = *gfn_remainder >> shift;
103 if ( index >= max )
104 {
105 P2M_DEBUG("gfn=0x%lx out of range "
106 "(gfn_remainder=0x%lx shift=%d index=0x%x max=0x%x)\n",
107 gfn, *gfn_remainder, shift, index, max);
108 return NULL;
109 }
110 *gfn_remainder &= (1 << shift) - 1;
111 return (l1_pgentry_t *)table + index;
112 }
114 // Walk one level of the P2M table, allocating a new table if required.
115 // Returns 0 on error.
116 //
117 static int
118 p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table,
119 unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
120 u32 max, unsigned long type)
121 {
122 l1_pgentry_t *p2m_entry;
123 l1_pgentry_t new_entry;
124 void *next;
125 ASSERT(d->arch.p2m.alloc_page);
127 if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
128 shift, max)) )
129 return 0;
131 if ( !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) )
132 {
133 struct page_info *pg = d->arch.p2m.alloc_page(d);
134 if ( pg == NULL )
135 return 0;
136 list_add_tail(&pg->list, &d->arch.p2m.pages);
137 pg->u.inuse.type_info = type | 1 | PGT_validated;
138 pg->count_info = 1;
140 new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
141 __PAGE_HYPERVISOR|_PAGE_USER);
143 switch ( type ) {
144 case PGT_l3_page_table:
145 paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 4);
146 break;
147 case PGT_l2_page_table:
148 #if CONFIG_PAGING_LEVELS == 3
149 /* for PAE mode, PDPE only has PCD/PWT/P bits available */
150 new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT);
151 #endif
152 paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 3);
153 break;
154 case PGT_l1_page_table:
155 paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 2);
156 break;
157 default:
158 BUG();
159 break;
160 }
161 }
162 *table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
163 next = map_domain_page(mfn_x(*table_mfn));
164 unmap_domain_page(*table);
165 *table = next;
167 return 1;
168 }
170 // Returns 0 on error (out of memory)
171 static int
172 set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
173 {
174 // XXX -- this might be able to be faster iff current->domain == d
175 mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
176 void *table =map_domain_page(mfn_x(table_mfn));
177 unsigned long gfn_remainder = gfn;
178 l1_pgentry_t *p2m_entry;
179 l1_pgentry_t entry_content;
180 int rv=0;
182 #if CONFIG_PAGING_LEVELS >= 4
183 if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
184 L4_PAGETABLE_SHIFT - PAGE_SHIFT,
185 L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
186 goto out;
187 #endif
188 #if CONFIG_PAGING_LEVELS >= 3
189 // When using PAE Xen, we only allow 33 bits of pseudo-physical
190 // address in translated guests (i.e. 8 GBytes). This restriction
191 // comes from wanting to map the P2M table into the 16MB RO_MPT hole
192 // in Xen's address space for translated PV guests.
193 //
194 if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
195 L3_PAGETABLE_SHIFT - PAGE_SHIFT,
196 (CONFIG_PAGING_LEVELS == 3
197 ? 8
198 : L3_PAGETABLE_ENTRIES),
199 PGT_l2_page_table) )
200 goto out;
201 #endif
202 if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
203 L2_PAGETABLE_SHIFT - PAGE_SHIFT,
204 L2_PAGETABLE_ENTRIES, PGT_l1_page_table) )
205 goto out;
207 p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
208 0, L1_PAGETABLE_ENTRIES);
209 ASSERT(p2m_entry);
211 /* Track the highest gfn for which we have ever had a valid mapping */
212 if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) )
213 d->arch.p2m.max_mapped_pfn = gfn;
215 if ( mfn_valid(mfn) )
216 entry_content = l1e_from_pfn(mfn_x(mfn), __PAGE_HYPERVISOR|_PAGE_USER);
217 else
218 entry_content = l1e_empty();
220 /* level 1 entry */
221 paging_write_p2m_entry(d, gfn, p2m_entry, entry_content, 1);
223 /* Success */
224 rv = 1;
226 out:
227 unmap_domain_page(table);
228 return rv;
229 }
232 /* Init the datastructures for later use by the p2m code */
233 void p2m_init(struct domain *d)
234 {
235 p2m_lock_init(d);
236 INIT_LIST_HEAD(&d->arch.p2m.pages);
237 }
240 // Allocate a new p2m table for a domain.
241 //
242 // The structure of the p2m table is that of a pagetable for xen (i.e. it is
243 // controlled by CONFIG_PAGING_LEVELS).
244 //
245 // The alloc_page and free_page functions will be used to get memory to
246 // build the p2m, and to release it again at the end of day.
247 //
248 // Returns 0 for success or -errno.
249 //
250 int p2m_alloc_table(struct domain *d,
251 struct page_info * (*alloc_page)(struct domain *d),
252 void (*free_page)(struct domain *d, struct page_info *pg))
254 {
255 mfn_t mfn;
256 struct list_head *entry;
257 struct page_info *page, *p2m_top;
258 unsigned int page_count = 0;
259 unsigned long gfn;
261 p2m_lock(d);
263 if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
264 {
265 P2M_ERROR("p2m already allocated for this domain\n");
266 p2m_unlock(d);
267 return -EINVAL;
268 }
270 P2M_PRINTK("allocating p2m table\n");
272 d->arch.p2m.alloc_page = alloc_page;
273 d->arch.p2m.free_page = free_page;
275 p2m_top = d->arch.p2m.alloc_page(d);
276 if ( p2m_top == NULL )
277 {
278 p2m_unlock(d);
279 return -ENOMEM;
280 }
281 list_add_tail(&p2m_top->list, &d->arch.p2m.pages);
283 p2m_top->count_info = 1;
284 p2m_top->u.inuse.type_info =
285 #if CONFIG_PAGING_LEVELS == 4
286 PGT_l4_page_table
287 #elif CONFIG_PAGING_LEVELS == 3
288 PGT_l3_page_table
289 #elif CONFIG_PAGING_LEVELS == 2
290 PGT_l2_page_table
291 #endif
292 | 1 | PGT_validated;
294 d->arch.phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
296 P2M_PRINTK("populating p2m table\n");
298 /* Initialise physmap tables for slot zero. Other code assumes this. */
299 gfn = 0;
300 mfn = _mfn(INVALID_MFN);
301 if ( !set_p2m_entry(d, gfn, mfn) )
302 goto error;
304 for ( entry = d->page_list.next;
305 entry != &d->page_list;
306 entry = entry->next )
307 {
308 page = list_entry(entry, struct page_info, list);
309 mfn = page_to_mfn(page);
310 gfn = get_gpfn_from_mfn(mfn_x(mfn));
311 page_count++;
312 if (
313 #ifdef __x86_64__
314 (gfn != 0x5555555555555555L)
315 #else
316 (gfn != 0x55555555L)
317 #endif
318 && gfn != INVALID_M2P_ENTRY
319 && !set_p2m_entry(d, gfn, mfn) )
320 goto error;
321 }
323 P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
324 p2m_unlock(d);
325 return 0;
327 error:
328 P2M_PRINTK("failed to initialize p2m table, gfn=%05lx, mfn=%"
329 PRI_mfn "\n", gfn, mfn_x(mfn));
330 p2m_unlock(d);
331 return -ENOMEM;
332 }
334 void p2m_teardown(struct domain *d)
335 /* Return all the p2m pages to Xen.
336 * We know we don't have any extra mappings to these pages */
337 {
338 struct list_head *entry, *n;
339 struct page_info *pg;
341 p2m_lock(d);
342 d->arch.phys_table = pagetable_null();
344 list_for_each_safe(entry, n, &d->arch.p2m.pages)
345 {
346 pg = list_entry(entry, struct page_info, list);
347 list_del(entry);
348 d->arch.p2m.free_page(d, pg);
349 }
350 p2m_unlock(d);
351 }
353 mfn_t
354 gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
355 /* Read another domain's p2m entries */
356 {
357 mfn_t mfn;
358 paddr_t addr = ((paddr_t)gpfn) << PAGE_SHIFT;
359 l2_pgentry_t *l2e;
360 l1_pgentry_t *l1e;
362 ASSERT(paging_mode_translate(d));
363 mfn = pagetable_get_mfn(d->arch.phys_table);
366 if ( gpfn > d->arch.p2m.max_mapped_pfn )
367 /* This pfn is higher than the highest the p2m map currently holds */
368 return _mfn(INVALID_MFN);
370 #if CONFIG_PAGING_LEVELS >= 4
371 {
372 l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
373 l4e += l4_table_offset(addr);
374 if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
375 {
376 unmap_domain_page(l4e);
377 return _mfn(INVALID_MFN);
378 }
379 mfn = _mfn(l4e_get_pfn(*l4e));
380 unmap_domain_page(l4e);
381 }
382 #endif
383 #if CONFIG_PAGING_LEVELS >= 3
384 {
385 l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
386 #if CONFIG_PAGING_LEVELS == 3
387 /* On PAE hosts the p2m has eight l3 entries, not four (see
388 * shadow_set_p2m_entry()) so we can't use l3_table_offset.
389 * Instead, just count the number of l3es from zero. It's safe
390 * to do this because we already checked that the gfn is within
391 * the bounds of the p2m. */
392 l3e += (addr >> L3_PAGETABLE_SHIFT);
393 #else
394 l3e += l3_table_offset(addr);
395 #endif
396 if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
397 {
398 unmap_domain_page(l3e);
399 return _mfn(INVALID_MFN);
400 }
401 mfn = _mfn(l3e_get_pfn(*l3e));
402 unmap_domain_page(l3e);
403 }
404 #endif
406 l2e = map_domain_page(mfn_x(mfn));
407 l2e += l2_table_offset(addr);
408 if ( (l2e_get_flags(*l2e) & _PAGE_PRESENT) == 0 )
409 {
410 unmap_domain_page(l2e);
411 return _mfn(INVALID_MFN);
412 }
413 mfn = _mfn(l2e_get_pfn(*l2e));
414 unmap_domain_page(l2e);
416 l1e = map_domain_page(mfn_x(mfn));
417 l1e += l1_table_offset(addr);
418 if ( (l1e_get_flags(*l1e) & _PAGE_PRESENT) == 0 )
419 {
420 unmap_domain_page(l1e);
421 return _mfn(INVALID_MFN);
422 }
423 mfn = _mfn(l1e_get_pfn(*l1e));
424 unmap_domain_page(l1e);
426 return mfn;
427 }
429 #if P2M_AUDIT
430 static void audit_p2m(struct domain *d)
431 {
432 struct list_head *entry;
433 struct page_info *page;
434 struct domain *od;
435 unsigned long mfn, gfn, m2pfn, lp2mfn = 0;
436 mfn_t p2mfn;
437 unsigned long orphans_d = 0, orphans_i = 0, mpbad = 0, pmbad = 0;
438 int test_linear;
440 if ( !paging_mode_translate(d) )
441 return;
443 //P2M_PRINTK("p2m audit starts\n");
445 test_linear = ( (d == current->domain)
446 && !pagetable_is_null(current->arch.monitor_table) );
447 if ( test_linear )
448 local_flush_tlb();
450 /* Audit part one: walk the domain's page allocation list, checking
451 * the m2p entries. */
452 for ( entry = d->page_list.next;
453 entry != &d->page_list;
454 entry = entry->next )
455 {
456 page = list_entry(entry, struct page_info, list);
457 mfn = mfn_x(page_to_mfn(page));
459 // P2M_PRINTK("auditing guest page, mfn=%#lx\n", mfn);
461 od = page_get_owner(page);
463 if ( od != d )
464 {
465 P2M_PRINTK("wrong owner %#lx -> %p(%u) != %p(%u)\n",
466 mfn, od, (od?od->domain_id:-1), d, d->domain_id);
467 continue;
468 }
470 gfn = get_gpfn_from_mfn(mfn);
471 if ( gfn == INVALID_M2P_ENTRY )
472 {
473 orphans_i++;
474 //P2M_PRINTK("orphaned guest page: mfn=%#lx has invalid gfn\n",
475 // mfn);
476 continue;
477 }
479 if ( gfn == 0x55555555 )
480 {
481 orphans_d++;
482 //P2M_PRINTK("orphaned guest page: mfn=%#lx has debug gfn\n",
483 // mfn);
484 continue;
485 }
487 p2mfn = gfn_to_mfn_foreign(d, gfn);
488 if ( mfn_x(p2mfn) != mfn )
489 {
490 mpbad++;
491 P2M_PRINTK("map mismatch mfn %#lx -> gfn %#lx -> mfn %#lx"
492 " (-> gfn %#lx)\n",
493 mfn, gfn, mfn_x(p2mfn),
494 (mfn_valid(p2mfn)
495 ? get_gpfn_from_mfn(mfn_x(p2mfn))
496 : -1u));
497 /* This m2p entry is stale: the domain has another frame in
498 * this physical slot. No great disaster, but for neatness,
499 * blow away the m2p entry. */
500 set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
501 }
503 if ( test_linear && (gfn <= d->arch.p2m.max_mapped_pfn) )
504 {
505 lp2mfn = mfn_x(gfn_to_mfn_current(gfn));
506 if ( lp2mfn != mfn_x(p2mfn) )
507 {
508 P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "
509 "(!= mfn %#lx)\n", gfn, lp2mfn, mfn_x(p2mfn));
510 }
511 }
513 // P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx, lp2mfn=%#lx\n",
514 // mfn, gfn, p2mfn, lp2mfn);
515 }
517 /* Audit part two: walk the domain's p2m table, checking the entries. */
518 if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
519 {
520 l2_pgentry_t *l2e;
521 l1_pgentry_t *l1e;
522 int i1, i2;
524 #if CONFIG_PAGING_LEVELS == 4
525 l4_pgentry_t *l4e;
526 l3_pgentry_t *l3e;
527 int i3, i4;
528 l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
529 #elif CONFIG_PAGING_LEVELS == 3
530 l3_pgentry_t *l3e;
531 int i3;
532 l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
533 #else /* CONFIG_PAGING_LEVELS == 2 */
534 l2e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
535 #endif
537 gfn = 0;
538 #if CONFIG_PAGING_LEVELS >= 3
539 #if CONFIG_PAGING_LEVELS >= 4
540 for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
541 {
542 if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
543 {
544 gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT);
545 continue;
546 }
547 l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
548 #endif /* now at levels 3 or 4... */
549 for ( i3 = 0;
550 i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
551 i3++ )
552 {
553 if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
554 {
555 gfn += 1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT);
556 continue;
557 }
558 l2e = map_domain_page(mfn_x(_mfn(l3e_get_pfn(l3e[i3]))));
559 #endif /* all levels... */
560 for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
561 {
562 if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
563 {
564 gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT);
565 continue;
566 }
567 l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2]))));
569 for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
570 {
571 if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
572 continue;
573 mfn = l1e_get_pfn(l1e[i1]);
574 ASSERT(mfn_valid(_mfn(mfn)));
575 m2pfn = get_gpfn_from_mfn(mfn);
576 if ( m2pfn != gfn )
577 {
578 pmbad++;
579 P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
580 " -> gfn %#lx\n", gfn, mfn, m2pfn);
581 BUG();
582 }
583 }
584 unmap_domain_page(l1e);
585 }
586 #if CONFIG_PAGING_LEVELS >= 3
587 unmap_domain_page(l2e);
588 }
589 #if CONFIG_PAGING_LEVELS >= 4
590 unmap_domain_page(l3e);
591 }
592 #endif
593 #endif
595 #if CONFIG_PAGING_LEVELS == 4
596 unmap_domain_page(l4e);
597 #elif CONFIG_PAGING_LEVELS == 3
598 unmap_domain_page(l3e);
599 #else /* CONFIG_PAGING_LEVELS == 2 */
600 unmap_domain_page(l2e);
601 #endif
603 }
605 //P2M_PRINTK("p2m audit complete\n");
606 //if ( orphans_i | orphans_d | mpbad | pmbad )
607 // P2M_PRINTK("p2m audit found %lu orphans (%lu inval %lu debug)\n",
608 // orphans_i + orphans_d, orphans_i, orphans_d,
609 if ( mpbad | pmbad )
610 P2M_PRINTK("p2m audit found %lu odd p2m, %lu bad m2p entries\n",
611 pmbad, mpbad);
612 }
613 #else
614 #define audit_p2m(_d) do { (void)(_d); } while(0)
615 #endif /* P2M_AUDIT */
619 static void
620 p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn)
621 {
622 if ( !paging_mode_translate(d) )
623 return;
624 P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
626 ASSERT(mfn_x(gfn_to_mfn(d, gfn)) == mfn);
627 //ASSERT(mfn_to_gfn(d, mfn) == gfn);
629 set_p2m_entry(d, gfn, _mfn(INVALID_MFN));
630 set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
631 }
633 void
634 guest_physmap_remove_page(struct domain *d, unsigned long gfn,
635 unsigned long mfn)
636 {
637 p2m_lock(d);
638 audit_p2m(d);
639 p2m_remove_page(d, gfn, mfn);
640 audit_p2m(d);
641 p2m_unlock(d);
642 }
644 void
645 guest_physmap_add_page(struct domain *d, unsigned long gfn,
646 unsigned long mfn)
647 {
648 unsigned long ogfn;
649 mfn_t omfn;
651 if ( !paging_mode_translate(d) )
652 return;
654 p2m_lock(d);
655 audit_p2m(d);
657 P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn);
659 omfn = gfn_to_mfn(d, gfn);
660 if ( mfn_valid(omfn) )
661 {
662 set_p2m_entry(d, gfn, _mfn(INVALID_MFN));
663 set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
664 }
666 ogfn = mfn_to_gfn(d, _mfn(mfn));
667 if (
668 #ifdef __x86_64__
669 (ogfn != 0x5555555555555555L)
670 #else
671 (ogfn != 0x55555555L)
672 #endif
673 && (ogfn != INVALID_M2P_ENTRY)
674 && (ogfn != gfn) )
675 {
676 /* This machine frame is already mapped at another physical address */
677 P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
678 mfn, ogfn, gfn);
679 if ( mfn_valid(omfn = gfn_to_mfn(d, ogfn)) )
680 {
681 P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
682 ogfn , mfn_x(omfn));
683 if ( mfn_x(omfn) == mfn )
684 p2m_remove_page(d, ogfn, mfn);
685 }
686 }
688 set_p2m_entry(d, gfn, _mfn(mfn));
689 set_gpfn_from_mfn(mfn, gfn);
691 audit_p2m(d);
692 p2m_unlock(d);
693 }
696 /*
697 * Local variables:
698 * mode: C
699 * c-set-style: "BSD"
700 * c-basic-offset: 4
701 * indent-tabs-mode: nil
702 * End:
703 */