ia64/xen-unstable

view xen/arch/x86/mm/p2m.c @ 15369:1feb91894e11

[HVM] HAP tidying.
Tighten up locking discipline, dead/useless code and unnecessary VMEXITS.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Jun 15 16:51:08 2007 +0100 (2007-06-15)
parents 3d5f39c610ad
children b8e8061c5a98
line source
1 /******************************************************************************
2 * arch/x86/mm/p2m.c
3 *
4 * physical-to-machine mappings for automatically-translated domains.
5 *
6 * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
7 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
8 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
9 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
26 #include <asm/domain.h>
27 #include <asm/page.h>
28 #include <asm/paging.h>
29 #include <asm/p2m.h>
31 /* Debugging and auditing of the P2M code? */
32 #define P2M_AUDIT 0
33 #define P2M_DEBUGGING 1
35 /*
36 * The P2M lock. This protects all updates to the p2m table.
37 * Updates are expected to be safe against concurrent reads,
38 * which do *not* require the lock.
39 *
40 * Locking discipline: always acquire this lock before the shadow or HAP one
41 */
43 #define p2m_lock_init(_d) \
44 do { \
45 spin_lock_init(&(_d)->arch.p2m.lock); \
46 (_d)->arch.p2m.locker = -1; \
47 (_d)->arch.p2m.locker_function = "nobody"; \
48 } while (0)
50 #define p2m_lock(_d) \
51 do { \
52 if ( unlikely((_d)->arch.p2m.locker == current->processor) )\
53 { \
54 printk("Error: p2m lock held by %s\n", \
55 (_d)->arch.p2m.locker_function); \
56 BUG(); \
57 } \
58 spin_lock(&(_d)->arch.p2m.lock); \
59 ASSERT((_d)->arch.p2m.locker == -1); \
60 (_d)->arch.p2m.locker = current->processor; \
61 (_d)->arch.p2m.locker_function = __func__; \
62 } while (0)
64 #define p2m_unlock(_d) \
65 do { \
66 ASSERT((_d)->arch.p2m.locker == current->processor); \
67 (_d)->arch.p2m.locker = -1; \
68 (_d)->arch.p2m.locker_function = "nobody"; \
69 spin_unlock(&(_d)->arch.p2m.lock); \
70 } while (0)
74 /* Printouts */
75 #define P2M_PRINTK(_f, _a...) \
76 debugtrace_printk("p2m: %s(): " _f, __func__, ##_a)
77 #define P2M_ERROR(_f, _a...) \
78 printk("pg error: %s(): " _f, __func__, ##_a)
79 #if P2M_DEBUGGING
80 #define P2M_DEBUG(_f, _a...) \
81 debugtrace_printk("p2mdebug: %s(): " _f, __func__, ##_a)
82 #else
83 #define P2M_DEBUG(_f, _a...) do { (void)(_f); } while(0)
84 #endif
87 /* Override macros from asm/page.h to make them work with mfn_t */
88 #undef mfn_to_page
89 #define mfn_to_page(_m) (frame_table + mfn_x(_m))
90 #undef mfn_valid
91 #define mfn_valid(_mfn) (mfn_x(_mfn) < max_page)
92 #undef page_to_mfn
93 #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
97 // Find the next level's P2M entry, checking for out-of-range gfn's...
98 // Returns NULL on error.
99 //
100 static l1_pgentry_t *
101 p2m_find_entry(void *table, unsigned long *gfn_remainder,
102 unsigned long gfn, u32 shift, u32 max)
103 {
104 u32 index;
106 index = *gfn_remainder >> shift;
107 if ( index >= max )
108 {
109 P2M_DEBUG("gfn=0x%lx out of range "
110 "(gfn_remainder=0x%lx shift=%d index=0x%x max=0x%x)\n",
111 gfn, *gfn_remainder, shift, index, max);
112 return NULL;
113 }
114 *gfn_remainder &= (1 << shift) - 1;
115 return (l1_pgentry_t *)table + index;
116 }
118 // Walk one level of the P2M table, allocating a new table if required.
119 // Returns 0 on error.
120 //
121 static int
122 p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table,
123 unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
124 u32 max, unsigned long type)
125 {
126 l1_pgentry_t *p2m_entry;
127 l1_pgentry_t new_entry;
128 void *next;
129 ASSERT(d->arch.p2m.alloc_page);
131 if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
132 shift, max)) )
133 return 0;
135 if ( !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) )
136 {
137 struct page_info *pg = d->arch.p2m.alloc_page(d);
138 if ( pg == NULL )
139 return 0;
140 list_add_tail(&pg->list, &d->arch.p2m.pages);
141 pg->u.inuse.type_info = type | 1 | PGT_validated;
142 pg->count_info = 1;
144 new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
145 __PAGE_HYPERVISOR|_PAGE_USER);
147 switch ( type ) {
148 case PGT_l3_page_table:
149 paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 4);
150 break;
151 case PGT_l2_page_table:
152 #if CONFIG_PAGING_LEVELS == 3
153 /* for PAE mode, PDPE only has PCD/PWT/P bits available */
154 new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT);
155 #endif
156 paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 3);
157 break;
158 case PGT_l1_page_table:
159 paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 2);
160 break;
161 default:
162 BUG();
163 break;
164 }
165 }
166 *table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
167 next = map_domain_page(mfn_x(*table_mfn));
168 unmap_domain_page(*table);
169 *table = next;
171 return 1;
172 }
174 // Returns 0 on error (out of memory)
175 static int
176 set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, u32 l1e_flags)
177 {
178 // XXX -- this might be able to be faster iff current->domain == d
179 mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
180 void *table =map_domain_page(mfn_x(table_mfn));
181 unsigned long gfn_remainder = gfn;
182 l1_pgentry_t *p2m_entry;
183 l1_pgentry_t entry_content;
184 int rv=0;
186 #if CONFIG_PAGING_LEVELS >= 4
187 if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
188 L4_PAGETABLE_SHIFT - PAGE_SHIFT,
189 L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
190 goto out;
191 #endif
192 #if CONFIG_PAGING_LEVELS >= 3
193 // When using PAE Xen, we only allow 33 bits of pseudo-physical
194 // address in translated guests (i.e. 8 GBytes). This restriction
195 // comes from wanting to map the P2M table into the 16MB RO_MPT hole
196 // in Xen's address space for translated PV guests.
197 //
198 if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
199 L3_PAGETABLE_SHIFT - PAGE_SHIFT,
200 (CONFIG_PAGING_LEVELS == 3
201 ? 8
202 : L3_PAGETABLE_ENTRIES),
203 PGT_l2_page_table) )
204 goto out;
205 #endif
206 if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
207 L2_PAGETABLE_SHIFT - PAGE_SHIFT,
208 L2_PAGETABLE_ENTRIES, PGT_l1_page_table) )
209 goto out;
211 p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
212 0, L1_PAGETABLE_ENTRIES);
213 ASSERT(p2m_entry);
215 /* Track the highest gfn for which we have ever had a valid mapping */
216 if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) )
217 d->arch.p2m.max_mapped_pfn = gfn;
219 if ( mfn_valid(mfn) )
220 entry_content = l1e_from_pfn(mfn_x(mfn), l1e_flags);
221 else
222 entry_content = l1e_empty();
224 /* level 1 entry */
225 paging_write_p2m_entry(d, gfn, p2m_entry, entry_content, 1);
227 /* Success */
228 rv = 1;
230 out:
231 unmap_domain_page(table);
232 return rv;
233 }
236 /* Init the datastructures for later use by the p2m code */
237 void p2m_init(struct domain *d)
238 {
239 p2m_lock_init(d);
240 INIT_LIST_HEAD(&d->arch.p2m.pages);
241 }
244 // Allocate a new p2m table for a domain.
245 //
246 // The structure of the p2m table is that of a pagetable for xen (i.e. it is
247 // controlled by CONFIG_PAGING_LEVELS).
248 //
249 // The alloc_page and free_page functions will be used to get memory to
250 // build the p2m, and to release it again at the end of day.
251 //
252 // Returns 0 for success or -errno.
253 //
254 int p2m_alloc_table(struct domain *d,
255 struct page_info * (*alloc_page)(struct domain *d),
256 void (*free_page)(struct domain *d, struct page_info *pg))
258 {
259 mfn_t mfn;
260 struct list_head *entry;
261 struct page_info *page, *p2m_top;
262 unsigned int page_count = 0;
263 unsigned long gfn;
265 p2m_lock(d);
267 if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
268 {
269 P2M_ERROR("p2m already allocated for this domain\n");
270 p2m_unlock(d);
271 return -EINVAL;
272 }
274 P2M_PRINTK("allocating p2m table\n");
276 d->arch.p2m.alloc_page = alloc_page;
277 d->arch.p2m.free_page = free_page;
279 p2m_top = d->arch.p2m.alloc_page(d);
280 if ( p2m_top == NULL )
281 {
282 p2m_unlock(d);
283 return -ENOMEM;
284 }
285 list_add_tail(&p2m_top->list, &d->arch.p2m.pages);
287 p2m_top->count_info = 1;
288 p2m_top->u.inuse.type_info =
289 #if CONFIG_PAGING_LEVELS == 4
290 PGT_l4_page_table
291 #elif CONFIG_PAGING_LEVELS == 3
292 PGT_l3_page_table
293 #elif CONFIG_PAGING_LEVELS == 2
294 PGT_l2_page_table
295 #endif
296 | 1 | PGT_validated;
298 d->arch.phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
300 P2M_PRINTK("populating p2m table\n");
302 /* Initialise physmap tables for slot zero. Other code assumes this. */
303 gfn = 0;
304 mfn = _mfn(INVALID_MFN);
305 if ( !set_p2m_entry(d, gfn, mfn, __PAGE_HYPERVISOR|_PAGE_USER) )
306 goto error;
308 for ( entry = d->page_list.next;
309 entry != &d->page_list;
310 entry = entry->next )
311 {
312 page = list_entry(entry, struct page_info, list);
313 mfn = page_to_mfn(page);
314 gfn = get_gpfn_from_mfn(mfn_x(mfn));
315 page_count++;
316 if (
317 #ifdef __x86_64__
318 (gfn != 0x5555555555555555L)
319 #else
320 (gfn != 0x55555555L)
321 #endif
322 && gfn != INVALID_M2P_ENTRY
323 && !set_p2m_entry(d, gfn, mfn, __PAGE_HYPERVISOR|_PAGE_USER) )
324 goto error;
325 }
327 P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
328 p2m_unlock(d);
329 return 0;
331 error:
332 P2M_PRINTK("failed to initialize p2m table, gfn=%05lx, mfn=%"
333 PRI_mfn "\n", gfn, mfn_x(mfn));
334 p2m_unlock(d);
335 return -ENOMEM;
336 }
338 void p2m_teardown(struct domain *d)
339 /* Return all the p2m pages to Xen.
340 * We know we don't have any extra mappings to these pages */
341 {
342 struct list_head *entry, *n;
343 struct page_info *pg;
345 p2m_lock(d);
346 d->arch.phys_table = pagetable_null();
348 list_for_each_safe(entry, n, &d->arch.p2m.pages)
349 {
350 pg = list_entry(entry, struct page_info, list);
351 list_del(entry);
352 d->arch.p2m.free_page(d, pg);
353 }
354 p2m_unlock(d);
355 }
357 mfn_t
358 gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
359 /* Read another domain's p2m entries */
360 {
361 mfn_t mfn;
362 paddr_t addr = ((paddr_t)gpfn) << PAGE_SHIFT;
363 l2_pgentry_t *l2e;
364 l1_pgentry_t *l1e;
366 ASSERT(paging_mode_translate(d));
367 mfn = pagetable_get_mfn(d->arch.phys_table);
370 if ( gpfn > d->arch.p2m.max_mapped_pfn )
371 /* This pfn is higher than the highest the p2m map currently holds */
372 return _mfn(INVALID_MFN);
374 #if CONFIG_PAGING_LEVELS >= 4
375 {
376 l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
377 l4e += l4_table_offset(addr);
378 if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
379 {
380 unmap_domain_page(l4e);
381 return _mfn(INVALID_MFN);
382 }
383 mfn = _mfn(l4e_get_pfn(*l4e));
384 unmap_domain_page(l4e);
385 }
386 #endif
387 #if CONFIG_PAGING_LEVELS >= 3
388 {
389 l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
390 #if CONFIG_PAGING_LEVELS == 3
391 /* On PAE hosts the p2m has eight l3 entries, not four (see
392 * shadow_set_p2m_entry()) so we can't use l3_table_offset.
393 * Instead, just count the number of l3es from zero. It's safe
394 * to do this because we already checked that the gfn is within
395 * the bounds of the p2m. */
396 l3e += (addr >> L3_PAGETABLE_SHIFT);
397 #else
398 l3e += l3_table_offset(addr);
399 #endif
400 if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
401 {
402 unmap_domain_page(l3e);
403 return _mfn(INVALID_MFN);
404 }
405 mfn = _mfn(l3e_get_pfn(*l3e));
406 unmap_domain_page(l3e);
407 }
408 #endif
410 l2e = map_domain_page(mfn_x(mfn));
411 l2e += l2_table_offset(addr);
412 if ( (l2e_get_flags(*l2e) & _PAGE_PRESENT) == 0 )
413 {
414 unmap_domain_page(l2e);
415 return _mfn(INVALID_MFN);
416 }
417 mfn = _mfn(l2e_get_pfn(*l2e));
418 unmap_domain_page(l2e);
420 l1e = map_domain_page(mfn_x(mfn));
421 l1e += l1_table_offset(addr);
422 if ( (l1e_get_flags(*l1e) & _PAGE_PRESENT) == 0 )
423 {
424 unmap_domain_page(l1e);
425 return _mfn(INVALID_MFN);
426 }
427 mfn = _mfn(l1e_get_pfn(*l1e));
428 unmap_domain_page(l1e);
430 return mfn;
431 }
433 #if P2M_AUDIT
434 static void audit_p2m(struct domain *d)
435 {
436 struct list_head *entry;
437 struct page_info *page;
438 struct domain *od;
439 unsigned long mfn, gfn, m2pfn, lp2mfn = 0;
440 mfn_t p2mfn;
441 unsigned long orphans_d = 0, orphans_i = 0, mpbad = 0, pmbad = 0;
442 int test_linear;
444 if ( !paging_mode_translate(d) )
445 return;
447 //P2M_PRINTK("p2m audit starts\n");
449 test_linear = ( (d == current->domain)
450 && !pagetable_is_null(current->arch.monitor_table) );
451 if ( test_linear )
452 local_flush_tlb();
454 /* Audit part one: walk the domain's page allocation list, checking
455 * the m2p entries. */
456 for ( entry = d->page_list.next;
457 entry != &d->page_list;
458 entry = entry->next )
459 {
460 page = list_entry(entry, struct page_info, list);
461 mfn = mfn_x(page_to_mfn(page));
463 // P2M_PRINTK("auditing guest page, mfn=%#lx\n", mfn);
465 od = page_get_owner(page);
467 if ( od != d )
468 {
469 P2M_PRINTK("wrong owner %#lx -> %p(%u) != %p(%u)\n",
470 mfn, od, (od?od->domain_id:-1), d, d->domain_id);
471 continue;
472 }
474 gfn = get_gpfn_from_mfn(mfn);
475 if ( gfn == INVALID_M2P_ENTRY )
476 {
477 orphans_i++;
478 //P2M_PRINTK("orphaned guest page: mfn=%#lx has invalid gfn\n",
479 // mfn);
480 continue;
481 }
483 if ( gfn == 0x55555555 )
484 {
485 orphans_d++;
486 //P2M_PRINTK("orphaned guest page: mfn=%#lx has debug gfn\n",
487 // mfn);
488 continue;
489 }
491 p2mfn = gfn_to_mfn_foreign(d, gfn);
492 if ( mfn_x(p2mfn) != mfn )
493 {
494 mpbad++;
495 P2M_PRINTK("map mismatch mfn %#lx -> gfn %#lx -> mfn %#lx"
496 " (-> gfn %#lx)\n",
497 mfn, gfn, mfn_x(p2mfn),
498 (mfn_valid(p2mfn)
499 ? get_gpfn_from_mfn(mfn_x(p2mfn))
500 : -1u));
501 /* This m2p entry is stale: the domain has another frame in
502 * this physical slot. No great disaster, but for neatness,
503 * blow away the m2p entry. */
504 set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY, __PAGE_HYPERVISOR|_PAGE_USER);
505 }
507 if ( test_linear && (gfn <= d->arch.p2m.max_mapped_pfn) )
508 {
509 lp2mfn = mfn_x(gfn_to_mfn_current(gfn));
510 if ( lp2mfn != mfn_x(p2mfn) )
511 {
512 P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "
513 "(!= mfn %#lx)\n", gfn, lp2mfn, mfn_x(p2mfn));
514 }
515 }
517 // P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx, lp2mfn=%#lx\n",
518 // mfn, gfn, p2mfn, lp2mfn);
519 }
521 /* Audit part two: walk the domain's p2m table, checking the entries. */
522 if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
523 {
524 l2_pgentry_t *l2e;
525 l1_pgentry_t *l1e;
526 int i1, i2;
528 #if CONFIG_PAGING_LEVELS == 4
529 l4_pgentry_t *l4e;
530 l3_pgentry_t *l3e;
531 int i3, i4;
532 l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
533 #elif CONFIG_PAGING_LEVELS == 3
534 l3_pgentry_t *l3e;
535 int i3;
536 l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
537 #else /* CONFIG_PAGING_LEVELS == 2 */
538 l2e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
539 #endif
541 gfn = 0;
542 #if CONFIG_PAGING_LEVELS >= 3
543 #if CONFIG_PAGING_LEVELS >= 4
544 for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
545 {
546 if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
547 {
548 gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT);
549 continue;
550 }
551 l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
552 #endif /* now at levels 3 or 4... */
553 for ( i3 = 0;
554 i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
555 i3++ )
556 {
557 if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
558 {
559 gfn += 1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT);
560 continue;
561 }
562 l2e = map_domain_page(mfn_x(_mfn(l3e_get_pfn(l3e[i3]))));
563 #endif /* all levels... */
564 for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
565 {
566 if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
567 {
568 gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT);
569 continue;
570 }
571 l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2]))));
573 for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
574 {
575 if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
576 continue;
577 mfn = l1e_get_pfn(l1e[i1]);
578 ASSERT(mfn_valid(_mfn(mfn)));
579 m2pfn = get_gpfn_from_mfn(mfn);
580 if ( m2pfn != gfn )
581 {
582 pmbad++;
583 P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
584 " -> gfn %#lx\n", gfn, mfn, m2pfn);
585 BUG();
586 }
587 }
588 unmap_domain_page(l1e);
589 }
590 #if CONFIG_PAGING_LEVELS >= 3
591 unmap_domain_page(l2e);
592 }
593 #if CONFIG_PAGING_LEVELS >= 4
594 unmap_domain_page(l3e);
595 }
596 #endif
597 #endif
599 #if CONFIG_PAGING_LEVELS == 4
600 unmap_domain_page(l4e);
601 #elif CONFIG_PAGING_LEVELS == 3
602 unmap_domain_page(l3e);
603 #else /* CONFIG_PAGING_LEVELS == 2 */
604 unmap_domain_page(l2e);
605 #endif
607 }
609 //P2M_PRINTK("p2m audit complete\n");
610 //if ( orphans_i | orphans_d | mpbad | pmbad )
611 // P2M_PRINTK("p2m audit found %lu orphans (%lu inval %lu debug)\n",
612 // orphans_i + orphans_d, orphans_i, orphans_d,
613 if ( mpbad | pmbad )
614 P2M_PRINTK("p2m audit found %lu odd p2m, %lu bad m2p entries\n",
615 pmbad, mpbad);
616 }
617 #else
618 #define audit_p2m(_d) do { (void)(_d); } while(0)
619 #endif /* P2M_AUDIT */
623 static void
624 p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn)
625 {
626 if ( !paging_mode_translate(d) )
627 return;
628 P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
630 ASSERT(mfn_x(gfn_to_mfn(d, gfn)) == mfn);
631 //ASSERT(mfn_to_gfn(d, mfn) == gfn);
633 set_p2m_entry(d, gfn, _mfn(INVALID_MFN), __PAGE_HYPERVISOR|_PAGE_USER);
634 set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
635 }
637 void
638 guest_physmap_remove_page(struct domain *d, unsigned long gfn,
639 unsigned long mfn)
640 {
641 p2m_lock(d);
642 audit_p2m(d);
643 p2m_remove_page(d, gfn, mfn);
644 audit_p2m(d);
645 p2m_unlock(d);
646 }
648 void
649 guest_physmap_add_page(struct domain *d, unsigned long gfn,
650 unsigned long mfn)
651 {
652 unsigned long ogfn;
653 mfn_t omfn;
655 if ( !paging_mode_translate(d) )
656 return;
658 p2m_lock(d);
659 audit_p2m(d);
661 P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn);
663 omfn = gfn_to_mfn(d, gfn);
664 if ( mfn_valid(omfn) )
665 {
666 set_p2m_entry(d, gfn, _mfn(INVALID_MFN), __PAGE_HYPERVISOR|_PAGE_USER);
667 set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
668 }
670 ogfn = mfn_to_gfn(d, _mfn(mfn));
671 if (
672 #ifdef __x86_64__
673 (ogfn != 0x5555555555555555L)
674 #else
675 (ogfn != 0x55555555L)
676 #endif
677 && (ogfn != INVALID_M2P_ENTRY)
678 && (ogfn != gfn) )
679 {
680 /* This machine frame is already mapped at another physical address */
681 P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
682 mfn, ogfn, gfn);
683 if ( mfn_valid(omfn = gfn_to_mfn(d, ogfn)) )
684 {
685 P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
686 ogfn , mfn_x(omfn));
687 if ( mfn_x(omfn) == mfn )
688 p2m_remove_page(d, ogfn, mfn);
689 }
690 }
692 set_p2m_entry(d, gfn, _mfn(mfn), __PAGE_HYPERVISOR|_PAGE_USER);
693 set_gpfn_from_mfn(mfn, gfn);
695 audit_p2m(d);
696 p2m_unlock(d);
697 }
699 /* This function goes through P2M table and modify l1e flags of all pages. Note
700 * that physical base address of l1e is intact. This function can be used for
701 * special purpose, such as marking physical memory as NOT WRITABLE for
702 * tracking dirty pages during live migration.
703 */
704 void p2m_set_flags_global(struct domain *d, u32 l1e_flags)
705 {
706 unsigned long mfn, gfn;
707 l1_pgentry_t l1e_content;
708 l1_pgentry_t *l1e;
709 l2_pgentry_t *l2e;
710 int i1, i2;
711 #if CONFIG_PAGING_LEVELS >= 3
712 l3_pgentry_t *l3e;
713 int i3;
714 #if CONFIG_PAGING_LEVELS == 4
715 l4_pgentry_t *l4e;
716 int i4;
717 #endif /* CONFIG_PAGING_LEVELS == 4 */
718 #endif /* CONFIG_PAGING_LEVELS >= 3 */
720 if ( !paging_mode_translate(d) )
721 return;
723 if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
724 return;
726 p2m_lock(d);
728 #if CONFIG_PAGING_LEVELS == 4
729 l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
730 #elif CONFIG_PAGING_LEVELS == 3
731 l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
732 #else /* CONFIG_PAGING_LEVELS == 2 */
733 l2e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
734 #endif
736 #if CONFIG_PAGING_LEVELS >= 3
737 #if CONFIG_PAGING_LEVELS >= 4
738 for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
739 {
740 if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
741 {
742 continue;
743 }
744 l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
745 #endif /* now at levels 3 or 4... */
746 for ( i3 = 0;
747 i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
748 i3++ )
749 {
750 if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
751 {
752 continue;
753 }
754 l2e = map_domain_page(mfn_x(_mfn(l3e_get_pfn(l3e[i3]))));
755 #endif /* all levels... */
756 for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
757 {
758 if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
759 {
760 continue;
761 }
762 l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2]))));
764 for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
765 {
766 if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
767 continue;
768 mfn = l1e_get_pfn(l1e[i1]);
769 gfn = get_gpfn_from_mfn(mfn);
770 /* create a new 1le entry using l1e_flags */
771 l1e_content = l1e_from_pfn(mfn, l1e_flags);
772 paging_write_p2m_entry(d, gfn, &l1e[i1], l1e_content, 1);
773 }
774 unmap_domain_page(l1e);
775 }
776 #if CONFIG_PAGING_LEVELS >= 3
777 unmap_domain_page(l2e);
778 }
779 #if CONFIG_PAGING_LEVELS >= 4
780 unmap_domain_page(l3e);
781 }
782 #endif
783 #endif
785 #if CONFIG_PAGING_LEVELS == 4
786 unmap_domain_page(l4e);
787 #elif CONFIG_PAGING_LEVELS == 3
788 unmap_domain_page(l3e);
789 #else /* CONFIG_PAGING_LEVELS == 2 */
790 unmap_domain_page(l2e);
791 #endif
793 p2m_unlock(d);
794 }
796 /* This function traces through P2M table and modifies l1e flags of a specific
797 * gpa.
798 */
799 int p2m_set_flags(struct domain *d, paddr_t gpa, u32 l1e_flags)
800 {
801 unsigned long gfn;
802 mfn_t mfn;
804 p2m_lock(d);
806 gfn = gpa >> PAGE_SHIFT;
807 mfn = gfn_to_mfn(d, gfn);
808 if ( mfn_valid(mfn) )
809 set_p2m_entry(d, gfn, mfn, l1e_flags);
811 p2m_unlock(d);
813 return 1;
814 }
816 /*
817 * Local variables:
818 * mode: C
819 * c-set-style: "BSD"
820 * c-basic-offset: 4
821 * indent-tabs-mode: nil
822 * End:
823 */