* table at offset in previous level MFN (pref_l_mfn). pt_pfn is a guest
* PFN.
*/
+static pgentry_t pt_prot[PAGETABLE_LEVELS] = {
+ L1_PROT,
+ L2_PROT,
+ L3_PROT,
+#if defined(__x86_64__)
+ L4_PROT,
+#endif
+};
+
static void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
unsigned long offset, unsigned long level)
{
- pgentry_t *tab = pt_base;
+ pgentry_t *tab;
unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
- pgentry_t prot_e, prot_t;
+#ifdef CONFIG_PARAVIRT
mmu_update_t mmu_updates[1];
int rc;
+#endif
- prot_e = prot_t = 0;
DEBUG("Allocating new L%d pt frame for pfn=%lx, "
"prev_l_mfn=%lx, offset=%lx",
level, *pt_pfn, prev_l_mfn, offset);
/* We need to clear the page, otherwise we might fail to map it
as a page table page */
memset((void*) pt_page, 0, PAGE_SIZE);
-
- switch ( level )
- {
- case L1_FRAME:
- prot_e = L1_PROT;
- prot_t = L2_PROT;
- break;
- case L2_FRAME:
- prot_e = L2_PROT;
- prot_t = L3_PROT;
- break;
-#if defined(__x86_64__)
- case L3_FRAME:
- prot_e = L3_PROT;
- prot_t = L4_PROT;
- break;
-#endif
- default:
- printk("new_pt_frame() called with invalid level number %lu\n", level);
- do_exit();
- break;
- }
+ ASSERT(level >= 1 && level <= PAGETABLE_LEVELS);
+
+#ifdef CONFIG_PARAVIRT
/* Make PFN a page table page */
+ tab = pt_base;
#if defined(__x86_64__)
tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
#endif
mmu_updates[0].ptr = (tab[l2_table_offset(pt_page)] & PAGE_MASK) +
sizeof(pgentry_t) * l1_table_offset(pt_page);
mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
- (prot_e & ~_PAGE_RW);
+ (pt_prot[level - 1] & ~_PAGE_RW);
if ( (rc = HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF)) < 0 )
{
/* Hook the new page table page into the hierarchy */
mmu_updates[0].ptr =
((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
- mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
+ mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
+ pt_prot[level];
if ( (rc = HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF)) < 0 )
{
printk("ERROR: mmu_update failed with rc=%d\n", rc);
do_exit();
}
+#else
+ tab = mfn_to_virt(prev_l_mfn);
+ tab[offset] = (*pt_pfn << PAGE_SHIFT) | pt_prot[level];
+#endif
*pt_pfn += 1;
}
{
unsigned long start_address, end_address;
unsigned long pfn_to_map, pt_pfn = *start_pfn;
- static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
pgentry_t *tab = pt_base, page;
unsigned long pt_mfn = pfn_to_mfn(virt_to_pfn(pt_base));
unsigned long offset;
+#ifdef CONFIG_PARAVIRT
+ static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
int count = 0;
int rc;
+#endif
/* Be conservative: even if we know there will be more pages already
mapped, start the loop at the very beginning. */
((unsigned long)pfn_to_virt(*max_pfn) -
(unsigned long)&_text)>>20);
}
+#else
+ /* Round up to next 2MB boundary as we are using 2MB pages on HVMlite. */
+ pfn_to_map = (pfn_to_map + L1_PAGETABLE_ENTRIES - 1) &
+ ~(L1_PAGETABLE_ENTRIES - 1);
#endif
start_address = (unsigned long)pfn_to_virt(pfn_to_map);
pt_mfn = pte_to_mfn(page);
tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
offset = l2_table_offset(start_address);
+#ifdef CONFIG_PARAVIRT
/* Need new L1 pt frame */
if ( !(tab[offset] & _PAGE_PRESENT) )
new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
count = 0;
}
start_address += PAGE_SIZE;
+#else
+ if ( !(tab[offset] & _PAGE_PRESENT) )
+ tab[offset] = (pgentry_t)pfn_to_map << PAGE_SHIFT |
+ L2_PROT | _PAGE_PSE;
+ start_address += 1UL << L2_PAGETABLE_SHIFT;
+#endif
}
*start_pfn = pt_pfn;
unsigned long start_address =
((unsigned long) text + PAGE_SIZE - 1) & PAGE_MASK;
unsigned long end_address = (unsigned long) etext;
- static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
pgentry_t *tab = pt_base, page;
unsigned long mfn = pfn_to_mfn(virt_to_pfn(pt_base));
unsigned long offset;
+ unsigned long page_size = PAGE_SIZE;
+#ifdef CONFIG_PARAVIRT
+ static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
int count = 0;
int rc;
+#endif
printk("setting %p-%p readonly\n", text, etext);
- while ( start_address + PAGE_SIZE <= end_address )
+ while ( start_address + page_size <= end_address )
{
tab = pt_base;
mfn = pfn_to_mfn(virt_to_pfn(pt_base));
mfn = pte_to_mfn(page);
tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
offset = l2_table_offset(start_address);
- page = tab[offset];
- mfn = pte_to_mfn(page);
- tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+ if ( !(tab[offset] & _PAGE_PSE) )
+ {
+ page = tab[offset];
+ mfn = pte_to_mfn(page);
+ tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
- offset = l1_table_offset(start_address);
+ offset = l1_table_offset(start_address);
+ }
if ( start_address != (unsigned long)&shared_info )
{
+#ifdef CONFIG_PARAVIRT
mmu_updates[count].ptr =
((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
count++;
+#else
+ tab[offset] &= ~_PAGE_RW;
+#endif
}
else
printk("skipped %lx\n", start_address);
- start_address += PAGE_SIZE;
+ start_address += page_size;
+#ifdef CONFIG_PARAVIRT
if ( count == L1_PAGETABLE_ENTRIES ||
- start_address + PAGE_SIZE > end_address )
+ start_address + page_size > end_address )
{
rc = HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF);
if ( rc < 0 )
}
count = 0;
}
+#else
+ if ( start_address == (1UL << L2_PAGETABLE_SHIFT) )
+ page_size = 1UL << L2_PAGETABLE_SHIFT;
+#endif
}
+#ifdef CONFIG_PARAVIRT
{
mmuext_op_t op = {
.cmd = MMUEXT_TLB_FLUSH_ALL,
int count;
HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
}
+#else
+ write_cr3((unsigned long)pt_base);
+#endif
}
/*
offset = l2_table_offset(va);
if ( !(tab[offset] & _PAGE_PRESENT) )
return NULL;
+ if ( tab[offset] & _PAGE_PSE )
+ return &tab[offset];
mfn = pte_to_mfn(tab[offset]);
tab = mfn_to_virt(mfn);
offset = l1_table_offset(va);
new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
}
ASSERT(tab[offset] & _PAGE_PRESENT);
+ if ( tab[offset] & _PAGE_PSE )
+ return &tab[offset];
+
pt_mfn = pte_to_mfn(tab[offset]);
tab = mfn_to_virt(pt_mfn);
{
pgentry_t *pgt = NULL;
unsigned long done = 0;
- unsigned long i;
- int rc;
if ( !mfns )
{
memset(err, 0x00, n * sizeof(int));
while ( done < n )
{
+#ifdef CONFIG_PARAVIRT
+ unsigned long i;
+ int rc;
unsigned long todo;
if ( err )
}
}
done += todo;
+#else
+ if ( !pgt || !(va & L1_MASK) )
+ pgt = need_pgt(va & ~L1_MASK);
+ if ( !pgt )
+ return -ENOMEM;
+
+ ASSERT(!(*pgt & _PAGE_PSE));
+ pgt[l1_table_offset(va)] = (pgentry_t)
+ (((mfns[done * stride] + done * incr) << PAGE_SHIFT) | prot);
+ done++;
+#endif
}
return 0;
#define UNMAP_BATCH ((STACK_SIZE / 2) / sizeof(multicall_entry_t))
int unmap_frames(unsigned long va, unsigned long num_frames)
{
+#ifdef CONFIG_PARAVIRT
int n = UNMAP_BATCH;
multicall_entry_t call[n];
int ret;
int i;
+#else
+ pgentry_t *pgt;
+#endif
ASSERT(!((unsigned long)va & ~PAGE_MASK));
DEBUG("va=%p, num=0x%lx\n", va, num_frames);
while ( num_frames ) {
+#ifdef CONFIG_PARAVIRT
if ( n > num_frames )
n = num_frames;
}
}
num_frames -= n;
+#else
+ pgt = get_pgt(va);
+ if ( pgt )
+ {
+ ASSERT(!(*pgt & _PAGE_PSE));
+ *pgt = 0;
+ invlpg(va);
+ }
+ va += PAGE_SIZE;
+ num_frames--;
+#endif
}
return 0;
}
*/
static void clear_bootstrap(void)
{
+#ifdef CONFIG_PARAVIRT
pte_t nullpte = { };
int rc;
+#else
+ pgentry_t *pgt;
+#endif
/* Use first page as the CoW zero page */
memset(&_text, 0, PAGE_SIZE);
mfn_zero = virt_to_mfn((unsigned long) &_text);
+#ifdef CONFIG_PARAVIRT
if ( (rc = HYPERVISOR_update_va_mapping(0, nullpte, UVMF_INVLPG)) )
printk("Unable to unmap NULL page. rc=%d\n", rc);
+#else
+ pgt = get_pgt((unsigned long)&_text);
+ *pgt = 0;
+ invlpg((unsigned long)&_text);
+#endif
}
#ifdef CONFIG_PARAVIRT