l2_pgentry_t l2e, *l2t;
l1_pgentry_t l1e, *l1t;
unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
- unsigned long mfn = cr3 >> PAGE_SHIFT;
+ unsigned long mfn = paddr_to_pfn(cr3_pa(cr3));
DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
cr3, pgd3val);
if ( (v = idle_vcpu[smp_processor_id()]) == current )
sync_local_execstate();
/* We must now be running on the idle page table. */
- ASSERT(read_cr3() == __pa(idle_pg_table));
+ ASSERT(cr3_pa(read_cr3()) == __pa(idle_pg_table));
}
return v;
#define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */
#define X86_CR0_PG 0x80000000 /* Paging (RW) */
+/*
+ * Intel CPU flags in CR3
+ */
+#define X86_CR3_NOFLUSH (_AC(1, ULL) << 63)
+#define X86_CR3_ADDR_MASK (PAGE_MASK & PADDR_MASK)
+#define X86_CR3_PCID_MASK _AC(0x0fff, ULL) /* Mask for PCID */
+
/*
* Intel CPU features in CR4
*/
asm volatile ( "mov %0, %%cr3" : : "r" (val) : "memory" );
}
+static inline unsigned long cr3_pa(unsigned long cr3)
+{
+ return cr3 & X86_CR3_ADDR_MASK;
+}
+
+static inline unsigned long cr3_pcid(unsigned long cr3)
+{
+ return cr3 & X86_CR3_PCID_MASK;
+}
+
static inline unsigned long read_cr4(void)
{
return get_cpu_info()->cr4;