#include <asm/apic.h>
#include <asm/io.h>
#include <asm/mpspec.h>
+#include <asm/processor.h>
#include <mach_apic.h>
#include <mach_mpparse.h>
pmprintk(XENLOG_DEBUG, "Back to C.");
+ /* Restore CR4 and EFER from cached values. */
+ write_cr4(read_cr4());
+ write_efer(read_efer());
+
device_power_up();
pmprintk(XENLOG_INFO, "Finishing wakeup from ACPI S%d state.", state);
#endif
DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
-DEFINE_PER_CPU(__u64, efer);
+DEFINE_PER_CPU(u64, efer);
+DEFINE_PER_CPU(unsigned long, cr4);
static void unmap_vcpu_info(struct vcpu *v);
v->arch.schedule_tail = continue_idle_domain;
v->arch.cr3 = __pa(idle_pg_table);
}
+
+ v->arch.guest_context.ctrlreg[4] = mmu_cr4_features;
}
v->arch.perdomain_ptes =
free_xenheap_page(d->shared_info);
}
+unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4)
+{
+ unsigned long hv_cr4 = read_cr4(), hv_cr4_mask = ~X86_CR4_TSD;
+ if ( cpu_has_de )
+ hv_cr4_mask &= ~X86_CR4_DE;
+
+ if ( (guest_cr4 & hv_cr4_mask) !=
+ (hv_cr4 & hv_cr4_mask & ~(X86_CR4_PGE|X86_CR4_PSE)) )
+ gdprintk(XENLOG_WARNING,
+ "Attempt to change CR4 flags %08lx -> %08lx\n",
+ hv_cr4 & ~(X86_CR4_PGE|X86_CR4_PSE), guest_cr4);
+
+ return (hv_cr4 & hv_cr4_mask) | (guest_cr4 & ~hv_cr4_mask);
+}
+
/* This is called by arch_final_setup_guest and do_boot_vcpu */
int arch_set_info_guest(
struct vcpu *v, vcpu_guest_context_u c)
{
struct domain *d = v->domain;
unsigned long cr3_pfn = INVALID_MFN;
- unsigned long flags;
+ unsigned long flags, cr4;
int i, rc = 0, compat;
/* The context is a compat-mode one if the target domain is compat-mode;
/* Ensure real hardware interrupts are enabled. */
v->arch.guest_context.user_regs.eflags |= EF_IE;
+ cr4 = v->arch.guest_context.ctrlreg[4];
+ v->arch.guest_context.ctrlreg[4] =
+ (cr4 == 0) ? mmu_cr4_features : pv_guest_cr4_fixup(cr4);
+
if ( v->is_initialised )
goto out;
{
set_int80_direct_trap(v);
switch_kernel_stack(v);
+
+ if ( unlikely(read_cr4() != v->arch.guest_context.ctrlreg[4]) )
+ write_cr4(v->arch.guest_context.ctrlreg[4]);
}
#define loaddebug(_v,_reg) \
hvm_flush_guest_tlbs();
#ifdef USER_MAPPINGS_ARE_GLOBAL
- __pge_off();
- asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
- __pge_on();
+ {
+ unsigned long cr4 = read_cr4();
+ write_cr4(cr4 & ~X86_CR4_PGE);
+ asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
+ write_cr4(cr4);
+ }
#else
asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
#endif
hvm_flush_guest_tlbs();
#ifndef USER_MAPPINGS_ARE_GLOBAL
- if ( !(flags & FLUSH_TLB_GLOBAL) ||
- !(mmu_cr4_features & X86_CR4_PGE) )
+ if ( !(flags & FLUSH_TLB_GLOBAL) || !(read_cr4() & X86_CR4_PGE) )
{
asm volatile ( "mov %0, %%cr3"
: : "r" (read_cr3()) : "memory" );
else
#endif
{
- __pge_off();
+ unsigned long cr4 = read_cr4();
+ write_cr4(cr4 & ~X86_CR4_PGE);
barrier();
- __pge_on();
+ write_cr4(cr4);
}
post_flush(t);
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <xen/config.h>
+#include <xen/errno.h>
#include <asm/iommu.h>
#include <asm/amd-iommu.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <xen/config.h>
+#include <xen/errno.h>
#include <asm/amd-iommu.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
#include <asm-x86/fixmap.h>
ASSERT((intack.source == hvm_intsrc_pic) ||
(intack.source == hvm_intsrc_lapic));
- if ( irq_masked(guest_cpu_user_regs()->eflags) )
+ if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
return hvm_intblk_rflags_ie;
if ( (intack.source == hvm_intsrc_lapic) &&
/* Host control registers. */
__vmwrite(HOST_CR0, read_cr0() | X86_CR0_TS);
- __vmwrite(HOST_CR4, read_cr4());
+ __vmwrite(HOST_CR4, mmu_cr4_features);
/* Host CS:RIP. */
__vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
static void vmx_ctxt_switch_to(struct vcpu *v)
{
+ /* HOST_CR4 in VMCS is always mmu_cr4_features. Sync CR4 now. */
+ if ( unlikely(read_cr4() != mmu_cr4_features) )
+ write_cr4(mmu_cr4_features);
+
vmx_restore_guest_msrs(v);
vmx_restore_dr(v);
}
ASSERT((intack.source == hvm_intsrc_pic) ||
(intack.source == hvm_intsrc_lapic));
- if ( irq_masked(guest_cpu_user_regs()->eflags) )
+ if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
return hvm_intblk_rflags_ie;
if ( intack.source == hvm_intsrc_lapic )
set_current((struct vcpu *)0xfffff000); /* debug sanity */
idle_vcpu[0] = current;
set_processor_id(0); /* needed early, for smp_processor_id() */
+ rdmsrl(MSR_EFER, this_cpu(efer));
+ asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) );
smp_prepare_boot_cpu();
ASSERT(!cpus_empty(cpumask));
}
+void apic_wait_icr_idle(void)
+{
+ while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY )
+ cpu_relax();
+}
+
void send_IPI_mask_flat(cpumask_t cpumask, int vector)
{
unsigned long mask = cpus_addr(cpumask)[0];
set_processor_id(cpu);
set_current(idle_vcpu[cpu]);
this_cpu(curr_vcpu) = idle_vcpu[cpu];
+ rdmsrl(MSR_EFER, this_cpu(efer));
+ asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) );
percpu_traps_init();
break;
case 4: /* Write CR4 */
- if ( *reg != (read_cr4() & ~(X86_CR4_PGE|X86_CR4_PSE)) )
- gdprintk(XENLOG_WARNING,
- "Attempt to change CR4 flags %08lx -> %08lx\n",
- read_cr4() & ~(X86_CR4_PGE|X86_CR4_PSE), *reg);
+ v->arch.guest_context.ctrlreg[4] = pv_guest_cr4_fixup(*reg);
+ write_cr4(v->arch.guest_context.ctrlreg[4]);
break;
default:
}
break;
+ case 0x31: /* RDTSC */
+ rdtsc(regs->eax, regs->edx);
+ break;
+
case 0x32: /* RDMSR */
switch ( regs->ecx )
{
#include <xen/init.h>
#include <xen/types.h>
+#include <xen/list.h>
#include <xen/spinlock.h>
-#include <xen/mm.h>
#include <asm/hvm/svm/amd-iommu-defs.h>
#define iommu_found() (!list_empty(&amd_iommu_head))
#define __ASM_APIC_H
#include <xen/config.h>
-#include <asm/fixmap.h>
#include <asm/apicdef.h>
-#include <asm/processor.h>
#include <asm/system.h>
#define Dprintk(x...)
return *((volatile u32 *)(APIC_BASE+reg));
}
-static __inline__ void apic_wait_icr_idle(void)
-{
- while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY )
- cpu_relax();
-}
+void apic_wait_icr_idle(void);
int get_physical_broadcast(void);
/* Continue the current hypercall via func(data) on specified cpu. */
int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data);
+unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4);
+
#endif /* __ASM_DOMAIN_H__ */
/*
return register_io_handler(d, addr, size, action, HVM_BUFFERED_IO);
}
-#if defined(__i386__) || defined(__x86_64__)
-static inline int irq_masked(unsigned long eflags)
-{
- return ((eflags & X86_EFLAGS_IF) == 0);
-}
-#endif
-
extern void send_pio_req(unsigned long port, unsigned long count, int size,
paddr_t value, int dir, int df, int value_is_ptr);
void send_timeoffset_req(unsigned long timeoff);
#include <xen/types.h>
#include <xen/spinlock.h>
+#include <asm/irq.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/vpic.h>
#include <asm/hvm/vioapic.h>
#define __ASM_IO_APIC_H
#include <xen/config.h>
-#include <asm/fixmap.h>
#include <asm/types.h>
#include <asm/mpspec.h>
+#include <asm/apicdef.h>
+#include <asm/fixmap.h>
/*
* Intel IO-APIC support for SMP and UP systems.
#define _IOMMU_H_
#include <xen/init.h>
-#include <xen/bitmap.h>
-#include <xen/irq.h>
+#include <xen/list.h>
#include <xen/spinlock.h>
-#include <xen/mm.h>
-#include <xen/xmalloc.h>
#include <asm/hvm/vmx/intel-iommu.h>
#include <public/hvm/ioreq.h>
#include <public/domctl.h>
: "c" (counter))
-DECLARE_PER_CPU(__u64, efer);
+DECLARE_PER_CPU(u64, efer);
-static inline __u64 read_efer(void)
+static inline u64 read_efer(void)
{
- if (!this_cpu(efer))
- rdmsrl(MSR_EFER, this_cpu(efer));
return this_cpu(efer);
}
-static inline void write_efer(__u64 val)
+static inline void write_efer(u64 val)
{
this_cpu(efer) = val;
wrmsrl(MSR_EFER, val);
void setup_idle_pagetable(void);
#endif /* !defined(__ASSEMBLY__) */
-#define __pge_off() write_cr4(mmu_cr4_features & ~X86_CR4_PGE)
-#define __pge_on() write_cr4(mmu_cr4_features)
-
#define _PAGE_PRESENT 0x001U
#define _PAGE_RW 0x002U
#define _PAGE_USER 0x004U
#include <xen/config.h>
#include <xen/cache.h>
#include <xen/types.h>
+#include <xen/smp.h>
+#include <xen/percpu.h>
#include <public/xen.h>
#include <asm/types.h>
#include <asm/cpufeature.h>
return cr2;
}
+DECLARE_PER_CPU(unsigned long, cr4);
+
static inline unsigned long read_cr4(void)
{
- unsigned long cr4;
- asm volatile ( "mov %%cr4,%0\n\t" : "=r" (cr4) );
- return cr4;
-}
-
+ return this_cpu(cr4);
+}
+
static inline void write_cr4(unsigned long val)
{
- asm volatile ( "mov %0,%%cr4" : : "r" ((unsigned long)val) );
+ this_cpu(cr4) = val;
+ asm volatile ( "mov %0,%%cr4" : : "r" (val) );
}
/* Clear and set 'TS' bit respectively */
static always_inline void set_in_cr4 (unsigned long mask)
{
mmu_cr4_features |= mask;
- write_cr4(mmu_cr4_features);
+ write_cr4(read_cr4() | mask);
}
static always_inline void clear_in_cr4 (unsigned long mask)
{
- mmu_cr4_features &= ~mask;
- write_cr4(mmu_cr4_features);
+ mmu_cr4_features &= ~mask;
+ write_cr4(read_cr4() & ~mask);
}
/*
#ifdef CONFIG_X86_LOCAL_APIC
#ifndef __ASSEMBLY__
-#include <asm/fixmap.h>
#include <asm/bitops.h>
#include <asm/mpspec.h>
#ifdef CONFIG_X86_IO_APIC
#ifndef __X86_32_ELF_H__
#define __X86_32_ELF_H__
-#include <asm/processor.h>
-
typedef struct {
unsigned long ebx;
unsigned long ecx;
asm volatile("movw %%fs, %%ax;" :"=a"(core_regs->fs));
asm volatile("movw %%gs, %%ax;" :"=a"(core_regs->gs));
/* orig_eax not filled in for now */
- core_regs->eip = (unsigned long)current_text_addr();
+ core_regs->eip = (unsigned long)elf_core_save_regs;
asm volatile("movw %%cs, %%ax;" :"=a"(core_regs->cs));
asm volatile("pushfl; popl %0" :"=m"(core_regs->eflags));
asm volatile("movl %%esp,%0" : "=m"(core_regs->esp));
#ifndef __X86_64_ELF_H__
#define __X86_64_ELF_H__
-#include <asm/processor.h>
-
typedef struct {
unsigned long r15;
unsigned long r14;
asm volatile("movq %%rsi,%0" : "=m"(core_regs->rsi));
asm volatile("movq %%rdi,%0" : "=m"(core_regs->rdi));
/* orig_rax not filled in for now */
- core_regs->rip = (unsigned long)current_text_addr();
+ core_regs->rip = (unsigned long)elf_core_save_regs;
asm volatile("movl %%cs, %%eax;" :"=a"(core_regs->cs));
asm volatile("pushfq; popq %0" :"=m"(core_regs->eflags));
asm volatile("movq %%rsp,%0" : "=m"(core_regs->rsp));