reset_stack_and_jump(idle_loop);
}
+void init_hypercall_page(struct domain *d, void *ptr)
+{
+ memset(ptr, 0xcc, PAGE_SIZE);
+
+ if ( is_hvm_domain(d) )
+ hvm_init_hypercall_page(d, ptr);
+ else if ( is_pv_64bit_domain(d) )
+ pv_ring3_init_hypercall_page(ptr);
+ else if ( is_pv_32bit_domain(d) )
+ pv_ring1_init_hypercall_page(ptr);
+ else
+ ASSERT_UNREACHABLE();
+}
+
void dump_pageframe_info(struct domain *d)
{
struct page_info *page;
}
hypercall_page = __map_domain_page(page);
- hypercall_page_initialise(d, hypercall_page);
+ init_hypercall_page(d, hypercall_page);
unmap_domain_page(hypercall_page);
put_page_and_type(page);
}
}
-/* Initialise a hypercall transfer page for a VMX domain using
- paravirtualised drivers. */
-void hvm_hypercall_page_initialise(struct domain *d,
- void *hypercall_page)
+void hvm_init_hypercall_page(struct domain *d, void *ptr)
{
hvm_latch_shinfo_size(d);
- alternative_vcall(hvm_funcs.init_hypercall_page, d, hypercall_page);
+
+ alternative_vcall(hvm_funcs.init_hypercall_page, ptr);
}
void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
return len;
}
-static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
+static void svm_init_hypercall_page(void *p)
{
- char *p;
- int i;
+ unsigned int i;
- for ( i = 0; i < (PAGE_SIZE / 32); i++ )
+ for ( i = 0; i < (PAGE_SIZE / 32); i++, p += 32 )
{
- if ( i == __HYPERVISOR_iret )
+ if ( unlikely(i == __HYPERVISOR_iret) )
+ {
+ /* HYPERVISOR_iret isn't supported */
+ *(u16 *)p = 0x0b0f; /* ud2 */
+
continue;
+ }
- p = (char *)(hypercall_page + (i * 32));
*(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */
*(u32 *)(p + 1) = i;
*(u8 *)(p + 5) = 0x0f; /* vmmcall */
*(u8 *)(p + 7) = 0xd9;
*(u8 *)(p + 8) = 0xc3; /* ret */
}
-
- /* Don't support HYPERVISOR_iret at the moment */
- *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
}
static inline void svm_tsc_ratio_save(struct vcpu *v)
vmx_vmcs_exit(v);
}
-static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
+static void vmx_init_hypercall_page(void *p)
{
- char *p;
- int i;
+ unsigned int i;
- for ( i = 0; i < (PAGE_SIZE / 32); i++ )
+ for ( i = 0; i < (PAGE_SIZE / 32); i++, p += 32 )
{
- if ( i == __HYPERVISOR_iret )
+ if ( unlikely(i == __HYPERVISOR_iret) )
+ {
+ /* HYPERVISOR_iret isn't supported */
+ *(u16 *)p = 0x0b0f; /* ud2 */
+
continue;
+ }
- p = (char *)(hypercall_page + (i * 32));
*(u8 *)(p + 0) = 0xb8; /* mov imm32, %eax */
*(u32 *)(p + 1) = i;
*(u8 *)(p + 5) = 0x0f; /* vmcall */
*(u8 *)(p + 7) = 0xc1;
*(u8 *)(p + 8) = 0xc3; /* ret */
}
-
- /* Don't support HYPERVISOR_iret at the moment */
- *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
}
static unsigned int vmx_get_interrupt_shadow(struct vcpu *v)
rc = -EINVAL;
goto out;
}
- hypercall_page_initialise(
- d, (void *)(unsigned long)parms.virt_hypercall);
+ init_hypercall_page(d, _p(parms.virt_hypercall));
}
/* Free temporary buffers. */
? mc_continue : mc_preempt;
}
-void hypercall_page_initialise_ring3_kernel(void *hypercall_page)
+void pv_ring3_init_hypercall_page(void *p)
{
- void *p = hypercall_page;
unsigned int i;
- /* Fill in all the transfer points with template machine code. */
for ( i = 0; i < (PAGE_SIZE / 32); i++, p += 32 )
{
- if ( i == __HYPERVISOR_iret )
+ if ( unlikely(i == __HYPERVISOR_iret) )
+ {
+ /*
+ * HYPERVISOR_iret is special because it doesn't return and
+ * expects a special stack frame. Guests jump at this transfer
+ * point instead of calling it.
+ */
+ *(u8 *)(p+ 0) = 0x51; /* push %rcx */
+ *(u16 *)(p+ 1) = 0x5341; /* push %r11 */
+ *(u8 *)(p+ 3) = 0x50; /* push %rax */
+ *(u8 *)(p+ 4) = 0xb8; /* mov $__HYPERVISOR_iret, %eax */
+ *(u32 *)(p+ 5) = __HYPERVISOR_iret;
+ *(u16 *)(p+ 9) = 0x050f; /* syscall */
+
continue;
+ }
*(u8 *)(p+ 0) = 0x51; /* push %rcx */
*(u16 *)(p+ 1) = 0x5341; /* push %r11 */
*(u8 *)(p+12) = 0x59; /* pop %rcx */
*(u8 *)(p+13) = 0xc3; /* ret */
}
-
- /*
- * HYPERVISOR_iret is special because it doesn't return and expects a
- * special stack frame. Guests jump at this transfer point instead of
- * calling it.
- */
- p = hypercall_page + (__HYPERVISOR_iret * 32);
- *(u8 *)(p+ 0) = 0x51; /* push %rcx */
- *(u16 *)(p+ 1) = 0x5341; /* push %r11 */
- *(u8 *)(p+ 3) = 0x50; /* push %rax */
- *(u8 *)(p+ 4) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */
- *(u32 *)(p+ 5) = __HYPERVISOR_iret;
- *(u16 *)(p+ 9) = 0x050f; /* syscall */
}
-void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
+void pv_ring1_init_hypercall_page(void *p)
{
- void *p = hypercall_page;
unsigned int i;
- /* Fill in all the transfer points with template machine code. */
-
for ( i = 0; i < (PAGE_SIZE / 32); i++, p += 32 )
{
- if ( i == __HYPERVISOR_iret )
+ if ( unlikely(i == __HYPERVISOR_iret) )
+ {
+ /*
+ * HYPERVISOR_iret is special because it doesn't return and
+ * expects a special stack frame. Guests jump at this transfer
+ * point instead of calling it.
+ */
+ *(u8 *)(p+ 0) = 0x50; /* push %eax */
+ *(u8 *)(p+ 1) = 0xb8; /* mov $__HYPERVISOR_iret, %eax */
+ *(u32 *)(p+ 2) = __HYPERVISOR_iret;
+ *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */
+
continue;
+ }
*(u8 *)(p+ 0) = 0xb8; /* mov $<i>,%eax */
*(u32 *)(p+ 1) = i;
*(u16 *)(p+ 5) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */
*(u8 *)(p+ 7) = 0xc3; /* ret */
}
-
- /*
- * HYPERVISOR_iret is special because it doesn't return and expects a
- * special stack frame. Guests jump at this transfer point instead of
- * calling it.
- */
- p = hypercall_page + (__HYPERVISOR_iret * 32);
- *(u8 *)(p+ 0) = 0x50; /* push %eax */
- *(u8 *)(p+ 1) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */
- *(u32 *)(p+ 2) = __HYPERVISOR_iret;
- *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */
}
/*
}
hypercall_page = __map_domain_page(page);
- hypercall_page_initialise(d, hypercall_page);
+ init_hypercall_page(d, hypercall_page);
unmap_domain_page(hypercall_page);
put_page_and_type(page);
wrmsrl(MSR_SYSCALL_MASK, XEN_SYSCALL_MASK);
}
-void hypercall_page_initialise(struct domain *d, void *hypercall_page)
-{
- memset(hypercall_page, 0xCC, PAGE_SIZE);
- if ( is_hvm_domain(d) )
- hvm_hypercall_page_initialise(d, hypercall_page);
- else if ( is_pv_64bit_domain(d) )
- hypercall_page_initialise_ring3_kernel(hypercall_page);
- else if ( is_pv_32bit_domain(d) )
- hypercall_page_initialise_ring1_kernel(hypercall_page);
- else
- ASSERT_UNREACHABLE();
-}
-
/*
* Local variables:
* mode: C
* Initialise a hypercall-transfer page. The given pointer must be mapped
* in Xen virtual address space (accesses are not validated or checked).
*/
-void hypercall_page_initialise(struct domain *d, void *);
+void init_hypercall_page(struct domain *d, void *);
/************************************************/
/* shadow paging extension */
void (*inject_event)(const struct x86_event *event);
- void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
+ void (*init_hypercall_page)(void *ptr);
bool (*event_pending)(const struct vcpu *v);
bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
enum hvm_intblk
hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);
-void hvm_hypercall_page_initialise(struct domain *d, void *hypercall_page);
+void hvm_init_hypercall_page(struct domain *d, void *ptr);
void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg);
void pv_hypercall(struct cpu_user_regs *regs);
#endif
-void hypercall_page_initialise_ring3_kernel(void *hypercall_page);
-void hypercall_page_initialise_ring1_kernel(void *hypercall_page);
+void pv_ring1_init_hypercall_page(void *ptr);
+void pv_ring3_init_hypercall_page(void *ptr);
/*
* Both do_mmuext_op() and do_mmu_update():