#include <xen/config.h>
#include <xen/sched.h>
+#include <xen/smp.h>
#include <xen/softirq.h>
#include <asm/flushtlb.h>
#include <asm/page.h>
local_irq_restore(irqfl);
+ if ( flags & FLUSH_ROOT_PGTBL )
+ get_cpu_info()->root_pgt_changed = 1;
+
return flags;
}
void write_ptbase(struct vcpu *v)
{
+ get_cpu_info()->root_pgt_changed = 1;
write_cr3(v->arch.cr3);
}
case PGT_l4_page_table:
rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
- /*
- * No need to sync if all uses of the page can be accounted
- * to the page lock we hold, its pinned status, and uses on
- * this (v)CPU.
- */
- if ( !rc && !cpu_has_no_xpti &&
- ((page->u.inuse.type_info & PGT_count_mask) >
- (1 + !!(page->u.inuse.type_info & PGT_pinned) +
- (pagetable_get_pfn(curr->arch.guest_table) == mfn) +
- (pagetable_get_pfn(curr->arch.guest_table_user) ==
- mfn))) )
- sync_guest = 1;
+ if ( !rc && !cpu_has_no_xpti )
+ {
+ bool_t local_in_use = 0;
+
+ if ( pagetable_get_pfn(curr->arch.guest_table) == mfn )
+ {
+ local_in_use = 1;
+ get_cpu_info()->root_pgt_changed = 1;
+ }
+
+ /*
+ * No need to sync if all uses of the page can be
+ * accounted to the page lock we hold, its pinned
+ * status, and uses on this (v)CPU.
+ */
+ if ( (page->u.inuse.type_info & PGT_count_mask) >
+ (1 + !!(page->u.inuse.type_info & PGT_pinned) +
+ (pagetable_get_pfn(curr->arch.guest_table_user) ==
+ mfn) + local_in_use) )
+ sync_guest = 1;
+ }
break;
case PGT_writable_page:
perfc_incr(writable_mmu_updates);
* Force other vCPU-s of the affected guest to pick up L4 entry
* changes (if any).
*/
- flush_mask(pt_owner->domain_dirty_cpumask, FLUSH_TLB_GLOBAL);
+ flush_mask(pt_owner->domain_dirty_cpumask,
+ FLUSH_TLB_GLOBAL | FLUSH_ROOT_PGTBL);
}
perfc_add(num_page_updates, i);
/* Write the new entry */
shadow_write_entries(sl4e, &new_sl4e, 1, sl4mfn);
+ flush_root_pgtbl_domain(d);
+
flags |= SHADOW_SET_CHANGED;
if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT )
}
sh_put_ref(d, osl3mfn, paddr);
}
+
return flags;
}
ack_APIC_irq();
perfc_incr(ipis);
if ( __sync_local_execstate() )
- flags &= ~(FLUSH_TLB | FLUSH_TLB_GLOBAL);
+ flags &= ~(FLUSH_TLB | FLUSH_TLB_GLOBAL | FLUSH_ROOT_PGTBL);
flush_area_local(flush_va, flags);
cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
}
OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl);
OFFSET(CPUINFO_xen_spec_ctrl, struct cpu_info, xen_spec_ctrl);
OFFSET(CPUINFO_spec_ctrl_flags, struct cpu_info, spec_ctrl_flags);
+ OFFSET(CPUINFO_root_pgt_changed, struct cpu_info, root_pgt_changed);
DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
BLANK();
mov VCPU_cr3(%rbx), %r9
GET_STACK_END(dx)
mov STACK_CPUINFO_FIELD(pv_cr3)(%rdx), %rdi
+ test %rdi, %rdi
+ jz .Lrag_keep_cr3
+ mov %rdi, %rax
+ cmpb $0, STACK_CPUINFO_FIELD(root_pgt_changed)(%rdx)
+ je .Lrag_copy_done
+ movb $0, STACK_CPUINFO_FIELD(root_pgt_changed)(%rdx)
movabs $PADDR_MASK & PAGE_MASK, %rsi
movabs $DIRECTMAP_VIRT_START, %rcx
- mov %rdi, %rax
and %rsi, %rdi
- jz .Lrag_keep_cr3
and %r9, %rsi
add %rcx, %rdi
add %rcx, %rsi
sub $(ROOT_PAGETABLE_FIRST_XEN_SLOT - \
ROOT_PAGETABLE_LAST_XEN_SLOT - 1) * 8, %rdi
rep movsq
+.Lrag_copy_done:
mov STACK_CPUINFO_FIELD(cr4)(%rdx), %rdi
mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx)
mov %rdi, %rsi
v->arch.flags ^= TF_kernel_mode;
update_cr3(v);
+ get_cpu_info()->root_pgt_changed = 1;
+
/* Don't flush user global mappings from the TLB. Don't tick TLB clock. */
asm volatile ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" );
uint8_t xen_spec_ctrl;
uint8_t spec_ctrl_flags;
+ /*
+ * The following field controls copying of the L4 page table of 64-bit
+ * PV guests to the per-cpu root page table on entering the guest context.
+ * If set the L4 page table is being copied to the root page table and
+ * the field will be reset.
+ */
+ bool_t root_pgt_changed;
+
unsigned long __pad;
/* get_stack_bottom() must be 16-byte aligned */
};
#define FLUSH_CACHE 0x400
/* VA for the flush has a valid mapping */
#define FLUSH_VA_VALID 0x800
+ /* Flush the per-cpu root page table */
+#define FLUSH_ROOT_PGTBL 0x2000
/* Flush local TLBs/caches. */
unsigned int flush_area_local(const void *va, unsigned int flags);
#define flush_tlb_one_all(v) \
flush_tlb_one_mask(&cpu_online_map, v)
+#define flush_root_pgtbl_domain(d) \
+{ \
+ if ( !cpu_has_no_xpti && is_pv_domain(d) && !is_pv_32bit_domain(d) ) \
+ flush_mask((d)->domain_dirty_cpumask, FLUSH_ROOT_PGTBL); \
+}
+
static inline void flush_page_to_ram(unsigned long mfn) {}
static inline int invalidate_dcache_va_range(const void *p,
unsigned long size)