source "common/Kconfig"
source "drivers/Kconfig"
+
+menu "Deprecated Functionality"
+
+config PV_LDT_PAGING
+ bool "PV LDT Paging-out support"
+ depends on PV
+ ---help---
+ For a very long time, the PV ABI has included the ability to page
+ out the LDT by transitioning its mapping to not-present. This
+ functionality is believed to only exist for the PV Windows XP port
+ which never came to anything.
+
+ The implementation contains a vCPU scalability limitation in a
+ position which is prohibitively complicated to resolve. As the
+ feature is believed to be unused in practice, removing the feature
+ is the easiest remediation.
+
+ If you discover a usecase which is broken by this option being off,
+ please contact xen-devel@lists.xenproject.org urgently. Baring
+ something unexpected, the code and this option will be deleted 2
+ releases after Xen 4.12.
+
+endmenu
{
for_each_vcpu ( d, v )
{
- /*
- * Relinquish GDT mappings. No need for explicit unmapping of
- * the LDT as it automatically gets squashed with the guest
- * mappings.
- */
+ /* Relinquish GDT/LDT mappings. */
+ pv_destroy_ldt(v);
pv_destroy_gdt(v);
}
}
unsigned long pfn = l1e_get_pfn(l1e);
struct page_info *page;
struct domain *pg_owner;
- struct vcpu *v;
if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || is_iomem_page(_mfn(pfn)) )
return;
}
else
{
+#ifdef CONFIG_PV_LDT_PAGING
/* We expect this is rare so we blow the entire shadow LDT. */
if ( unlikely(((page->u.inuse.type_info & PGT_type_mask) ==
PGT_seg_desc_page)) &&
unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) &&
(l1e_owner == pg_owner) )
{
+ struct vcpu *v;
cpumask_t *mask = this_cpu(scratch_cpumask);
cpumask_clear(mask);
if ( !cpumask_empty(mask) )
flush_tlb_mask(mask);
}
+#endif /* CONFIG_PV_LDT_PAGING */
put_page(page);
}
}
ASSERT(!in_irq());
+#ifdef CONFIG_PV_LDT_PAGING
spin_lock(&v->arch.pv.shadow_ldt_lock);
if ( v->arch.pv.shadow_ldt_mapcnt == 0 )
goto out;
+#else
+ ASSERT(v == current || !vcpu_cpu_dirty(v));
+#endif
pl1e = pv_ldt_ptes(v);
put_page_and_type(page);
}
+#ifdef CONFIG_PV_LDT_PAGING
ASSERT(v->arch.pv.shadow_ldt_mapcnt == mappings_dropped);
v->arch.pv.shadow_ldt_mapcnt = 0;
out:
spin_unlock(&v->arch.pv.shadow_ldt_lock);
+#endif
return mappings_dropped;
}
l1_pgentry_t zero_l1e = l1e_from_mfn(zero_mfn, __PAGE_HYPERVISOR_RO);
unsigned int i;
+ ASSERT(v == current || !vcpu_cpu_dirty(v));
+
v->arch.pv.gdt_ents = 0;
for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
{
l1_pgentry_t *pl1e;
unsigned int i, nr_frames = DIV_ROUND_UP(entries, 512);
+ ASSERT(v == current || !vcpu_cpu_dirty(v));
+
if ( entries > FIRST_RESERVED_GDT_ENTRY )
return -EINVAL;
ASSERT(!is_idle_domain(d));
+#ifdef CONFIG_PV_LDT_PAGING
spin_lock_init(&v->arch.pv.shadow_ldt_lock);
+#endif
rc = pv_create_gdt_ldt_l1tab(v);
if ( rc )
pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT];
l1e_add_flags(gl1e, _PAGE_RW);
+#ifdef CONFIG_PV_LDT_PAGING
spin_lock(&curr->arch.pv.shadow_ldt_lock);
+#endif
+
l1e_write(pl1e, gl1e);
+
+#ifdef CONFIG_PV_LDT_PAGING
curr->arch.pv.shadow_ldt_mapcnt++;
spin_unlock(&curr->arch.pv.shadow_ldt_lock);
+#endif
return true;
}
unsigned int iopl; /* Current IOPL for this VCPU, shifted left by
* 12 to match the eflags register. */
+#ifdef CONFIG_PV_LDT_PAGING
/* Current LDT details. */
unsigned long shadow_ldt_mapcnt;
spinlock_t shadow_ldt_lock;
+#endif
/* data breakpoint extension MSRs */
uint32_t dr_mask[4];