static void update_xen_slot_in_full_gdt(const struct vcpu *v, unsigned int cpu)
{
- l1e_write(pv_gdt_ptes(v) + FIRST_RESERVED_GDT_PAGE,
- !is_pv_32bit_vcpu(v) ? per_cpu(gdt_l1e, cpu)
- : per_cpu(compat_gdt_l1e, cpu));
+ ASSERT(v != current);
+
+ populate_perdomain_mapping(v,
+ GDT_VIRT_START(v) +
+ (FIRST_RESERVED_GDT_PAGE << PAGE_SHIFT),
+ !is_pv_32bit_vcpu(v) ? &per_cpu(gdt_mfn, cpu)
+ : &per_cpu(compat_gdt_mfn,
+ cpu), 1);
}
static void load_full_gdt(const struct vcpu *v, unsigned int cpu)
#ifndef __ASSEMBLY__
+#include <xen/mm-frame.h>
+
#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
/* Fix up the RPL of a guest segment selector. */
extern seg_desc_t boot_gdt[];
DECLARE_PER_CPU(seg_desc_t *, gdt);
-DECLARE_PER_CPU(l1_pgentry_t, gdt_l1e);
+DECLARE_PER_CPU(mfn_t, gdt_mfn);
extern seg_desc_t boot_compat_gdt[];
DECLARE_PER_CPU(seg_desc_t *, compat_gdt);
-DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_l1e);
+DECLARE_PER_CPU(mfn_t, compat_gdt_mfn);
DECLARE_PER_CPU(bool, full_gdt_loaded);
static inline void lgdt(const struct desc_ptr *gdtr)
int create_perdomain_mapping(struct domain *d, unsigned long va,
unsigned int nr, l1_pgentry_t **pl1tab,
struct page_info **ppg);
+void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
+ mfn_t *mfn, unsigned long nr);
void destroy_perdomain_mapping(struct domain *d, unsigned long va,
unsigned int nr);
void free_perdomain_mappings(struct domain *d);
return cr3 & X86_CR3_ADDR_MASK;
}
+static inline mfn_t cr3_mfn(unsigned long cr3)
+{
+ return maddr_to_mfn(cr3_pa(cr3));
+}
+
static inline unsigned int cr3_pcid(unsigned long cr3)
{
return IS_ENABLED(CONFIG_PV) ? cr3 & X86_CR3_PCID_MASK : 0;
return rc;
}
+void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
+ mfn_t *mfn, unsigned long nr)
+{
+ l1_pgentry_t *l1tab = NULL, *pl1e;
+ const l3_pgentry_t *l3tab;
+ const l2_pgentry_t *l2tab;
+ struct domain *d = v->domain;
+
+ ASSERT(va >= PERDOMAIN_VIRT_START &&
+ va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
+ ASSERT(!nr || !l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
+
+ /* Ensure loaded page-tables are from current (if current != curr_vcpu). */
+ sync_local_execstate();
+
+ /* Use likely to force the optimization for the fast path. */
+ if ( likely(v == current) )
+ {
+ unsigned int i;
+
+ /* Fast path: get L1 entries using the recursive linear mappings. */
+ pl1e = &__linear_l1_table[l1_linear_offset(va)];
+
+ for ( i = 0; i < nr; i++, pl1e++ )
+ {
+ if ( unlikely(perdomain_l1e_needs_freeing(*pl1e)) )
+ {
+ ASSERT_UNREACHABLE();
+ free_domheap_page(l1e_get_page(*pl1e));
+ }
+ l1e_write(pl1e, l1e_from_mfn(mfn[i], __PAGE_HYPERVISOR_RW));
+ }
+
+ return;
+ }
+
+ ASSERT(d->arch.perdomain_l3_pg);
+ l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+
+ if ( unlikely(!(l3e_get_flags(l3tab[l3_table_offset(va)]) &
+ _PAGE_PRESENT)) )
+ {
+ unmap_domain_page(l3tab);
+ gprintk(XENLOG_ERR, "unable to map at VA %lx: L3e not present\n", va);
+ ASSERT_UNREACHABLE();
+ domain_crash(d);
+
+ return;
+ }
+
+ l2tab = map_l2t_from_l3e(l3tab[l3_table_offset(va)]);
+
+ for ( ; nr--; va += PAGE_SIZE, mfn++ )
+ {
+ if ( !l1tab || !l1_table_offset(va) )
+ {
+ const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
+
+ if ( unlikely(!(l2e_get_flags(*pl2e) & _PAGE_PRESENT)) )
+ {
+ gprintk(XENLOG_ERR, "unable to map at VA %lx: L2e not present\n",
+ va);
+ ASSERT_UNREACHABLE();
+ domain_crash(d);
+
+ break;
+ }
+
+ unmap_domain_page(l1tab);
+ l1tab = map_l1t_from_l2e(*pl2e);
+ }
+
+ pl1e = &l1tab[l1_table_offset(va)];
+
+ if ( unlikely(perdomain_l1e_needs_freeing(*pl1e)) )
+ {
+ ASSERT_UNREACHABLE();
+ free_domheap_page(l1e_get_page(*pl1e));
+ }
+
+ l1e_write(pl1e, l1e_from_mfn(*mfn, __PAGE_HYPERVISOR_RW));
+ }
+
+ unmap_domain_page(l1tab);
+ unmap_domain_page(l2tab);
+ unmap_domain_page(l3tab);
+}
+
void destroy_perdomain_mapping(struct domain *d, unsigned long va,
unsigned int nr)
{
if ( gdt == NULL )
goto out;
per_cpu(gdt, cpu) = gdt;
- per_cpu(gdt_l1e, cpu) =
- l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
+ per_cpu(gdt_mfn, cpu) = _mfn(virt_to_mfn(gdt));
memcpy(gdt, boot_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
BUILD_BUG_ON(NR_CPUS > 0x10000);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
per_cpu(compat_gdt, cpu) = gdt = alloc_xenheap_pages(0, memflags);
if ( gdt == NULL )
goto out;
- per_cpu(compat_gdt_l1e, cpu) =
- l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
+ per_cpu(compat_gdt_mfn, cpu) = _mfn(virt_to_mfn(gdt));
memcpy(gdt, boot_compat_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
#endif
static DEFINE_PER_CPU(unsigned long, last_extable_addr);
DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, gdt);
-DEFINE_PER_CPU_READ_MOSTLY(l1_pgentry_t, gdt_l1e);
+DEFINE_PER_CPU_READ_MOSTLY(mfn_t, gdt_mfn);
#ifdef CONFIG_PV32
DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, compat_gdt);
-DEFINE_PER_CPU_READ_MOSTLY(l1_pgentry_t, compat_gdt_l1e);
+DEFINE_PER_CPU_READ_MOSTLY(mfn_t, compat_gdt_mfn);
#endif
/* Master table, used by CPU0. */
}
/* Cache {,compat_}gdt_l1e now that physically relocation is done. */
- this_cpu(gdt_l1e) =
- l1e_from_pfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
+ this_cpu(gdt_mfn) = _mfn(virt_to_mfn(boot_gdt));
if ( IS_ENABLED(CONFIG_PV32) )
- this_cpu(compat_gdt_l1e) =
- l1e_from_pfn(virt_to_mfn(boot_compat_gdt), __PAGE_HYPERVISOR_RW);
+ this_cpu(compat_gdt_mfn) = _mfn(virt_to_mfn(boot_compat_gdt));
percpu_traps_init();