printk(" paging assistance: ");
if ( paging_mode_shadow(d) )
printk("shadow ");
+ if ( paging_mode_sh_forced(d) )
+ printk("forced ");
if ( paging_mode_hap(d) )
printk("hap ");
if ( paging_mode_refcounts(d) )
ASSERT(paging_locked_by_me(d));
ASSERT(d != current->domain);
+ /*
+ * If PG_SH_forced has previously been activated because of writing an
+ * L1TF-vulnerable PTE, it must remain active for the remaining lifetime
+ * of the domain, even if the logdirty mode needs to be controlled for
+ * migration purposes.
+ */
+ if ( paging_mode_sh_forced(d) )
+ new_mode |= PG_SH_forced | PG_SH_enable;
+
d->arch.paging.mode = new_mode;
for_each_vcpu(d, v)
sh_update_paging_modes(v);
#endif /* Shadow audit */
+#ifdef CONFIG_PV
+
+void pv_l1tf_tasklet(unsigned long data)
+{
+ struct domain *d = (void *)data;
+
+ domain_pause(d);
+ paging_lock(d);
+
+ if ( !paging_mode_sh_forced(d) && !d->is_dying )
+ {
+ int ret = shadow_one_bit_enable(d, PG_SH_forced);
+
+ if ( ret )
+ {
+ printk(XENLOG_G_ERR "d%d Failed to enable PG_SH_forced: %d\n",
+ d->domain_id, ret);
+ domain_crash(d);
+ }
+ }
+
+ paging_unlock(d);
+ domain_unpause(d);
+}
+
+#endif /* CONFIG_PV */
+
/*
* Local variables:
* mode: C
#include <asm/invpcid.h>
#include <asm/spec_ctrl.h>
#include <asm/pv/domain.h>
+#include <asm/shadow.h>
static __read_mostly enum {
PCID_OFF,
void pv_domain_destroy(struct domain *d)
{
+ pv_l1tf_domain_destroy(d);
+
destroy_perdomain_mapping(d, GDT_LDT_VIRT_START,
GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
};
int rc = -ENOMEM;
+ pv_l1tf_domain_init(d);
+
d->arch.pv_domain.gdt_ldt_l1tab =
alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
if ( !d->arch.pv_domain.gdt_ldt_l1tab )
spin_lock_irqsave(&tasklet_lock, flags);
+ /* Cope with uninitialised tasklets. */
+ if ( list_head_is_null(&t->list) )
+ goto unlock;
+
if ( !list_empty(&t->list) )
{
BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
spin_lock_irqsave(&tasklet_lock, flags);
}
+ unlock:
spin_unlock_irqrestore(&tasklet_lock, flags);
}
/* Has this domain ever used HVMOP_pagetable_dying? */
bool_t pagetable_dying_op;
+
+#ifdef CONFIG_PV
+ /* PV L1 Terminal Fault mitigation. */
+ struct tasklet pv_l1tf_tasklet;
+#endif /* CONFIG_PV */
#endif
};
bool xpti;
/* Use PCID feature? */
bool pcid;
+ /* Mitigate L1TF with shadow/crashing? */
+ bool check_l1tf;
/* map_domain_page() mapping cache. */
struct mapcache_domain mapcache;
#define PG_SH_shift 20
#define PG_HAP_shift 21
+#define PG_SHF_shift 22
/* We're in one of the shadow modes */
#ifdef CONFIG_SHADOW_PAGING
#define PG_SH_enable (1U << PG_SH_shift)
+#define PG_SH_forced (1U << PG_SHF_shift)
#else
#define PG_SH_enable 0
+#define PG_SH_forced 0
#endif
#define PG_HAP_enable (1U << PG_HAP_shift)
#define paging_mode_enabled(_d) (!!(_d)->arch.paging.mode)
#define paging_mode_shadow(_d) (!!((_d)->arch.paging.mode & PG_SH_enable))
+#define paging_mode_sh_forced(_d) (!!((_d)->arch.paging.mode & PG_SH_forced))
#define paging_mode_hap(_d) (!!((_d)->arch.paging.mode & PG_HAP_enable))
#define paging_mode_refcounts(_d) (!!((_d)->arch.paging.mode & PG_refcounts))
#include <asm/flushtlb.h>
#include <asm/paging.h>
#include <asm/p2m.h>
+#include <asm/spec_ctrl.h>
/*****************************************************************************
* Macros to tell which shadow paging mode a domain is in*/
#endif /* CONFIG_SHADOW_PAGING */
+/*
+ * Mitigations for L1TF / CVE-2018-3620 for PV guests.
+ *
+ * We cannot alter an architecturally-legitimate PTE which a PV guest has
+ * chosen to write, as traditional paged-out metadata is L1TF-vulnerable.
+ * What we can do is force a PV guest which writes a vulnerable PTE into
+ * shadow mode, so Xen controls the pagetables which are reachable by the CPU
+ * pagewalk.
+ */
+
+void pv_l1tf_tasklet(unsigned long data);
+
+static inline void pv_l1tf_domain_init(struct domain *d)
+{
+ d->arch.pv_domain.check_l1tf =
+ opt_pv_l1tf & (is_hardware_domain(d)
+ ? OPT_PV_L1TF_DOM0 : OPT_PV_L1TF_DOMU);
+
+#if defined(CONFIG_SHADOW_PAGING) && defined(CONFIG_PV)
+ tasklet_init(&d->arch.paging.shadow.pv_l1tf_tasklet,
+ pv_l1tf_tasklet, (unsigned long)d);
+#endif
+}
+
+static inline void pv_l1tf_domain_destroy(struct domain *d)
+{
+#if defined(CONFIG_SHADOW_PAGING) && defined(CONFIG_PV)
+ tasklet_kill(&d->arch.paging.shadow.pv_l1tf_tasklet);
+#endif
+}
+
/* Remove all shadows of the guest mfn. */
static inline void shadow_remove_all_shadows(struct domain *d, mfn_t gmfn)
{
list->prev = list;
}
+static inline bool list_head_is_null(const struct list_head *list)
+{
+ return !list->next && !list->prev;
+}
+
/*
* Insert a new entry between two known consecutive entries.
*