/* Map a 4k page in a fixmap entry */
void set_fixmap(unsigned map, unsigned long mfn, unsigned attributes)
{
- lpae_t pte = mfn_to_xen_entry(mfn);
+ lpae_t pte = mfn_to_xen_entry(mfn, attributes);
pte.pt.table = 1; /* 4k mappings always have this bit set */
- pte.pt.ai = attributes;
pte.pt.xn = 1;
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
else if ( map[slot].pt.avail == 0 )
{
/* Commandeer this 2MB slot */
- pte = mfn_to_xen_entry(slot_mfn);
+ pte = mfn_to_xen_entry(slot_mfn, WRITEALLOC);
pte.pt.avail = 1;
write_pte(map + slot, pte);
break;
{
paddr_t ma = va + phys_offset;
unsigned long mfn = ma >> PAGE_SHIFT;
- return mfn_to_xen_entry(mfn);
+ return mfn_to_xen_entry(mfn, WRITEALLOC);
}
void __init remove_early_mappings(void)
lpae_t pte, *p;
int i;
+ /* Map the destination in the boot misc area. */
+ dest_va = BOOT_RELOC_VIRT_START;
+ pte = mfn_to_xen_entry(xen_paddr >> PAGE_SHIFT, WRITEALLOC);
+ write_pte(xen_second + second_table_offset(dest_va), pte);
+ flush_xen_data_tlb_range_va(dest_va, SECOND_SIZE);
+
/* Calculate virt-to-phys offset for the new location */
phys_offset = xen_paddr - (unsigned long) _start;
/* Initialise xen second level entries ... */
/* ... Xen's text etc */
- pte = mfn_to_xen_entry(xen_paddr>>PAGE_SHIFT);
+ pte = mfn_to_xen_entry(xen_paddr>>PAGE_SHIFT, WRITEALLOC);
pte.pt.xn = 0;/* Contains our text mapping! */
xen_second[second_table_offset(XEN_VIRT_START)] = pte;
/* Map the destination in the boot misc area. */
dest_va = BOOT_RELOC_VIRT_START;
- pte = mfn_to_xen_entry(xen_paddr >> PAGE_SHIFT);
+ pte = mfn_to_xen_entry(xen_paddr >> PAGE_SHIFT, WRITEALLOC);
write_pte(boot_second + second_table_offset(dest_va), pte);
flush_xen_data_tlb_range_va(dest_va, SECOND_SIZE);
#ifdef CONFIG_ARM_64
unsigned long va = XEN_VIRT_START + (i << PAGE_SHIFT);
if ( !is_kernel(va) )
break;
- pte = mfn_to_xen_entry(mfn);
+ pte = mfn_to_xen_entry(mfn, WRITEALLOC);
pte.pt.table = 1; /* 4k mappings always have this bit set */
if ( is_kernel_text(va) || is_kernel_inittext(va) )
{
* domheap mapping pages. */
for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ )
{
- pte = mfn_to_xen_entry(virt_to_mfn(domheap+i*LPAE_ENTRIES));
+ pte = mfn_to_xen_entry(virt_to_mfn(domheap+i*LPAE_ENTRIES), WRITEALLOC);
pte.pt.table = 1;
write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte);
}
count = nr_mfns / LPAE_ENTRIES;
p = second + second_linear_offset(virt_offset);
- pte = mfn_to_xen_entry(base_mfn);
+ pte = mfn_to_xen_entry(base_mfn, WRITEALLOC);
pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */
for ( i = 0; i < count; i++ )
{
else
{
unsigned long first_mfn = alloc_boot_pages(1, 1);
- pte = mfn_to_xen_entry(first_mfn);
+ pte = mfn_to_xen_entry(first_mfn, WRITEALLOC);
pte.pt.table = 1;
write_pte(p, pte);
first = mfn_to_virt(first_mfn);
}
- pte = mfn_to_xen_entry(base_mfn);
+ pte = mfn_to_xen_entry(base_mfn, WRITEALLOC);
/* TODO: Set pte.pt.contig when appropriate. */
write_pte(&first[first_table_offset(vaddr)], pte);
second = mfn_to_virt(second_base);
for ( i = 0; i < nr_second; i++ )
{
- pte = mfn_to_xen_entry(second_base + i);
+ pte = mfn_to_xen_entry(second_base + i, WRITEALLOC);
pte.pt.table = 1;
write_pte(&xen_first[first_table_offset(FRAMETABLE_VIRT_START)+i], pte);
}
if ( p == NULL )
return -ENOMEM;
clear_page(p);
- pte = mfn_to_xen_entry(virt_to_mfn(p));
+ pte = mfn_to_xen_entry(virt_to_mfn(p), WRITEALLOC);
pte.pt.table = 1;
write_pte(entry, pte);
return 0;
addr, mfn);
return -EINVAL;
}
- pte = mfn_to_xen_entry(mfn);
+ pte = mfn_to_xen_entry(mfn, ai);
pte.pt.table = 1;
- pte.pt.ai = ai;
write_pte(&third[third_table_offset(addr)], pte);
break;
case REMOVE:
/* Standard entry type that we'll use to build Xen's own pagetables.
* We put the same permissions at every level, because they're ignored
* by the walker in non-leaf entries. */
-static inline lpae_t mfn_to_xen_entry(unsigned long mfn)
+static inline lpae_t mfn_to_xen_entry(unsigned long mfn, unsigned attr)
{
paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
lpae_t e = (lpae_t) {
.xn = 1, /* No need to execute outside .text */
.ng = 1, /* Makes TLB flushes easier */
.af = 1, /* No need for access tracking */
- .sh = LPAE_SH_OUTER, /* Xen mappings are globally coherent */
.ns = 1, /* Hyp mode is in the non-secure world */
.user = 1, /* See below */
- .ai = WRITEALLOC,
+ .ai = attr,
.table = 0, /* Set to 1 for links and 4k maps */
.valid = 1, /* Mappings are present */
}};;
* pagetables un User mode it's OK. If this changes, remember
* to update the hard-coded values in head.S too */
+ switch ( attr )
+ {
+ case BUFFERABLE:
+ /*
+ * ARM ARM: Overlaying the shareability attribute (DDI
+ * 0406C.b B3-1376 to 1377)
+ *
+ * A memory region with a resultant memory type attribute of Normal,
+ * and a resultant cacheability attribute of Inner Non-cacheable,
+ * Outer Non-cacheable, must have a resultant shareability attribute
+ * of Outer Shareable, otherwise shareability is UNPREDICTABLE.
+ *
+ * On ARMv8 sharability is ignored and explicitly treated as Outer
+ * Shareable for Normal Inner Non_cacheable, Outer Non-cacheable.
+ */
+ e.pt.sh = LPAE_SH_OUTER;
+ break;
+ case UNCACHED:
+ case DEV_SHARED:
+ /* Shareability is ignored for non-Normal memory, Outer is as
+ * good as anything.
+ *
+ * On ARMv8 sharability is ignored and explicitly treated as Outer
+ * Shareable for any device memory type.
+ */
+ e.pt.sh = LPAE_SH_OUTER;
+ break;
+ default:
+ e.pt.sh = LPAE_SH_INNER; /* Xen mappings are SMP coherent */
+ break;
+ }
+
ASSERT(!(pa & ~PAGE_MASK));
ASSERT(!(pa & ~PADDR_MASK));