unsigned long max_page;
+extern char __init_begin[], __init_end[];
+
/* Map a 4k page in a fixmap entry */
void set_fixmap(unsigned map, unsigned long mfn, unsigned attributes)
{
/* Undo the temporary map */
pte.bits = 0;
write_pte(xen_second + second_table_offset(dest_va), pte);
- /*
- * Have removed a mapping previously used for .text. Flush everything
- * for safety.
- */
- asm volatile (
- "dsb;" /* Ensure visibility of PTE write */
- STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */
- STORE_CP32(0, BPIALL) /* Flush branch predictor */
- "dsb;" /* Ensure completion of TLB+BP flush */
- "isb;"
- : : "r" (i /*dummy*/) : "memory");
+ flush_xen_text_tlb();
/* Link in the fixmap pagetable */
pte = mfn_to_xen_entry((((unsigned long) xen_fixmap) + phys_offset)
pte.pt.table = 1;
write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte);
/* Have changed a mapping used for .text. Flush everything for safety. */
- asm volatile (
- "dsb;" /* Ensure visibility of PTE write */
- STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */
- STORE_CP32(0, BPIALL) /* Flush branch predictor */
- "dsb;" /* Ensure completion of TLB+BP flush */
- "isb;"
- : : "r" (i /*dummy*/) : "memory");
+ flush_xen_text_tlb();
/* From now on, no mapping may be both writable and executable. */
WRITE_CP32(READ_CP32(HSCTLR) | SCTLR_WXN, HSCTLR);
frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pages * sizeof(struct page_info));
}
+enum mg { mg_clear, mg_ro, mg_rw, mg_rx };
+static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg)
+{
+ lpae_t pte;
+ int i;
+
+ ASSERT(is_kernel(p) && is_kernel(p + l));
+
+ /* Can only guard in page granularity */
+ ASSERT(!((unsigned long) p & ~PAGE_MASK));
+ ASSERT(!(l & ~PAGE_MASK));
+
+ for ( i = (p - _start) / PAGE_SIZE;
+ i < (p + l - _start) / PAGE_SIZE;
+ i++ )
+ {
+ pte = xen_xenmap[i];
+ switch ( mg )
+ {
+ case mg_clear:
+ pte.pt.valid = 0;
+ break;
+ case mg_ro:
+ pte.pt.valid = 1;
+ pte.pt.pxn = 1;
+ pte.pt.xn = 1;
+ pte.pt.ro = 1;
+ break;
+ case mg_rw:
+ pte.pt.valid = 1;
+ pte.pt.pxn = 1;
+ pte.pt.xn = 1;
+ pte.pt.ro = 0;
+ break;
+ case mg_rx:
+ pte.pt.valid = 1;
+ pte.pt.pxn = 0;
+ pte.pt.xn = 0;
+ pte.pt.ro = 1;
+ break;
+ }
+ write_pte(xen_xenmap + i, pte);
+ }
+ flush_xen_text_tlb();
+}
+
+/* Release all __init and __initdata ranges to be reused */
+void free_init_memory(void)
+{
+ paddr_t pa = virt_to_maddr(__init_begin);
+ unsigned long len = __init_end - __init_begin;
+ set_pte_flags_on_range(__init_begin, len, mg_rw);
+ memset(__init_begin, 0xcc, len);
+ set_pte_flags_on_range(__init_begin, len, mg_clear);
+ init_domheap_pages(pa, pa + len);
+ printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
+}
+
void arch_dump_shared_mem_info(void)
{
}
#include <asm/setup.h>
#include "gic.h"
-extern const char __init_begin[], __init_end[], __bss_start[];
-
/* Spinlock for serializing CPU bringup */
unsigned long __initdata boot_gate = 1;
/* Number of non-boot CPUs ready to enter C */
static __attribute_used__ void init_done(void)
{
- /* TODO: free (or page-protect) the init areas.
- memset(__init_begin, 0xcc, __init_end - __init_begin);
- free_xen_data(__init_begin, __init_end);
- */
- printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
-
+ free_init_memory();
startup_cpu_idle_loop();
}
: : "r" (pte.bits), "r" (p) : "memory");
}
+/*
+ * Flush all hypervisor mappings from the TLB and branch predictor.
+ * This is needed after changing Xen code mappings.
+ */
+static inline void flush_xen_text_tlb(void)
+{
+ register unsigned long r0 asm ("r0");
+ asm volatile (
+ "dsb;" /* Ensure visibility of PTE writes */
+ STORE_CP32(0, TLBIALLH) /* Flush hypervisor TLB */
+ STORE_CP32(0, BPIALL) /* Flush branch predictor */
+ "dsb;" /* Ensure completion of TLB+BP flush */
+ "isb;"
+ : : "r" (r0) /*dummy*/ : "memory");
+}
+
/*
* Flush all hypervisor mappings from the data TLB. This is not
* sufficient when changing code mappings or for self modifying code.