]> xenbits.xensource.com Git - people/dariof/xen.git/commitdiff
x86/pv: Delete CONFIG_PV_LDT_PAGING
authorAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 17 Apr 2020 11:31:13 +0000 (12:31 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 17 Apr 2020 17:45:19 +0000 (18:45 +0100)
... in accordance with the timeline laid out in the Kconfig message.  There
has been no comment since it was disabled by default.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wl@xen.org>
xen/arch/x86/Kconfig
xen/arch/x86/mm.c
xen/arch/x86/pv/descriptor-tables.c
xen/arch/x86/pv/domain.c
xen/arch/x86/pv/mm.c
xen/include/asm-x86/domain.h

index 8149362bdef3a153e80ae62c1bdef974801fe031..a69be983d6f3aa02dbec60754a2c71b2b441850e 100644 (file)
@@ -225,26 +225,3 @@ endmenu
 source "common/Kconfig"
 
 source "drivers/Kconfig"
-
-menu "Deprecated Functionality"
-
-config PV_LDT_PAGING
-       bool "PV LDT Paging-out support"
-       depends on PV
-       ---help---
-         For a very long time, the PV ABI has included the ability to page
-         out the LDT by transitioning its mapping to not-present.  This
-         functionality is believed to only exist for the PV Windows XP port
-         which never came to anything.
-
-         The implementation contains a vCPU scalability limitation in a
-         position which is prohibitively complicated to resolve.  As the
-         feature is believed to be unused in practice, removing the feature
-         is the easiest remediation.
-
-         If you discover a usecase which is broken by this option being off,
-         please contact xen-devel@lists.xenproject.org urgently.  Baring
-         something unexpected, the code and this option will be deleted 2
-         releases after Xen 4.12.
-
-endmenu
index fb53d62abc029ecb07ac303a521b80ca5adfc50f..ee56e053e14d8a3821e2882ee1aede2bbcd31f7e 100644 (file)
@@ -1251,40 +1251,9 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner)
      */
     if ( (l1e_get_flags(l1e) & _PAGE_RW) &&
          ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) )
-    {
         put_page_and_type(page);
-    }
     else
-    {
-#ifdef CONFIG_PV_LDT_PAGING
-        /* We expect this is rare so we blow the entire shadow LDT. */
-        if ( unlikely(((page->u.inuse.type_info & PGT_type_mask) ==
-                       PGT_seg_desc_page)) &&
-             unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) &&
-             (l1e_owner == pg_owner) )
-        {
-            struct vcpu *v;
-            cpumask_t *mask = this_cpu(scratch_cpumask);
-
-            cpumask_clear(mask);
-
-            for_each_vcpu ( pg_owner, v )
-            {
-                unsigned int cpu;
-
-                if ( !pv_destroy_ldt(v) )
-                    continue;
-                cpu = read_atomic(&v->dirty_cpu);
-                if ( is_vcpu_dirty_cpu(cpu) )
-                    __cpumask_set_cpu(cpu, mask);
-            }
-
-            if ( !cpumask_empty(mask) )
-                flush_tlb_mask(mask);
-        }
-#endif /* CONFIG_PV_LDT_PAGING */
         put_page(page);
-    }
 }
 
 #ifdef CONFIG_PV
index 940804b18a04f6cd5d08b795113bdbe109a0f9e3..090f901b5b9e369ffb49c17739ba450f354a1467 100644 (file)
@@ -37,14 +37,7 @@ bool pv_destroy_ldt(struct vcpu *v)
 
     ASSERT(!in_irq());
 
-#ifdef CONFIG_PV_LDT_PAGING
-    spin_lock(&v->arch.pv.shadow_ldt_lock);
-
-    if ( v->arch.pv.shadow_ldt_mapcnt == 0 )
-        goto out;
-#else
     ASSERT(v == current || !vcpu_cpu_dirty(v));
-#endif
 
     pl1e = pv_ldt_ptes(v);
 
@@ -62,14 +55,6 @@ bool pv_destroy_ldt(struct vcpu *v)
         put_page_and_type(page);
     }
 
-#ifdef CONFIG_PV_LDT_PAGING
-    ASSERT(v->arch.pv.shadow_ldt_mapcnt == mappings_dropped);
-    v->arch.pv.shadow_ldt_mapcnt = 0;
-
- out:
-    spin_unlock(&v->arch.pv.shadow_ldt_lock);
-#endif
-
     return mappings_dropped;
 }
 
index 70fae43965b831712cc580c0dbf4d6cdb1e128bf..43da5c179f7a2e33095dee99fc9becddfee15187 100644 (file)
@@ -243,10 +243,6 @@ int pv_vcpu_initialise(struct vcpu *v)
 
     ASSERT(!is_idle_domain(d));
 
-#ifdef CONFIG_PV_LDT_PAGING
-    spin_lock_init(&v->arch.pv.shadow_ldt_lock);
-#endif
-
     rc = pv_create_gdt_ldt_l1tab(v);
     if ( rc )
         return rc;
index 2b0dadc8dade64d246696494063b5477787a5d29..5d4cd009410b800f1ca96e6e076010bd179dcaf7 100644 (file)
@@ -123,17 +123,8 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
     pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT];
     l1e_add_flags(gl1e, _PAGE_RW);
 
-#ifdef CONFIG_PV_LDT_PAGING
-    spin_lock(&curr->arch.pv.shadow_ldt_lock);
-#endif
-
     l1e_write(pl1e, gl1e);
 
-#ifdef CONFIG_PV_LDT_PAGING
-    curr->arch.pv.shadow_ldt_mapcnt++;
-    spin_unlock(&curr->arch.pv.shadow_ldt_lock);
-#endif
-
     return true;
 }
 
index 4192c636b162da36e40c9a0ef0d2ad3423dc81b4..554b8dddcc92521436668cce81b936863f8d0c1e 100644 (file)
@@ -520,12 +520,6 @@ struct pv_vcpu
     unsigned int iopl;        /* Current IOPL for this VCPU, shifted left by
                                * 12 to match the eflags register. */
 
-#ifdef CONFIG_PV_LDT_PAGING
-    /* Current LDT details. */
-    unsigned long shadow_ldt_mapcnt;
-    spinlock_t shadow_ldt_lock;
-#endif
-
     /*
      * %dr7 bits the guest has set, but aren't loaded into hardware, and are
      * completely emulated.