#define thr_sp md.thrmd_sp
#define thr_ip md.thrmd_ip
-extern void __arch_switch_threads(unsigned long *prevctx, unsigned long *nextctx);
+extern void _minios_entry_arch_switch_threads(unsigned long *prevctx, unsigned long *nextctx);
-#define arch_switch_threads(prev,next) __arch_switch_threads(&(prev)->thr_sp, &(next)->thr_sp)
+#define arch_switch_threads(prev,next) _minios_entry_arch_switch_threads(&(prev)->thr_sp, &(next)->thr_sp)
#endif /* __ARCH_SCHED_H__ */
#define __STR(x) #x
#define STR(x) __STR(x)
-extern char hypercall_page[PAGE_SIZE];
+extern char _minios_hypercall_page[PAGE_SIZE];
#define _hypercall0(type, name) \
({ \
long __res; \
asm volatile ( \
- "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ "call _minios_hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
: "=a" (__res) \
: \
: "memory" ); \
({ \
long __res, __ign1; \
asm volatile ( \
- "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ "call _minios_hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
: "=a" (__res), "=D" (__ign1) \
: "1" ((long)(a1)) \
: "memory" ); \
({ \
long __res, __ign1, __ign2; \
asm volatile ( \
- "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ "call _minios_hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
: "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
: "1" ((long)(a1)), "2" ((long)(a2)) \
: "memory" ); \
({ \
long __res, __ign1, __ign2, __ign3; \
asm volatile ( \
- "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ "call _minios_hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
: "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
"=d" (__ign3) \
: "1" ((long)(a1)), "2" ((long)(a2)), \
long __res, __ign1, __ign2, __ign3; \
asm volatile ( \
"movq %7,%%r10; " \
- "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ "call _minios_hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
: "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
"=d" (__ign3) \
: "1" ((long)(a1)), "2" ((long)(a2)), \
long __res, __ign1, __ign2, __ign3; \
asm volatile ( \
"movq %7,%%r10; movq %8,%%r8; " \
- "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+ "call _minios_hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
: "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
"=d" (__ign3) \
: "1" ((long)(a1)), "2" ((long)(a2)), \
/*
* Mark portion of the address space read only.
*/
-extern struct shared_info shared_info;
+extern struct shared_info _minios_shared_info;
static void set_readonly(void *text, void *etext)
{
unsigned long start_address =
offset = l1_table_offset(start_address);
- if ( start_address != (unsigned long)&shared_info )
+ if ( start_address != (unsigned long)&_minios_shared_info )
{
mmu_updates[count].ptr =
((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
/* Gets run when a new thread is scheduled the first time ever,
defined in x86_[32/64].S */
-extern void thread_starter(void);
+extern void _minios_entry_thread_starter(void);
/* Pushes the specified value onto the stack of the specified thread */
static void stack_push(struct thread *thread, unsigned long value)
stack_push(thread, (unsigned long) function);
stack_push(thread, (unsigned long) data);
- thread->thr_ip = (unsigned long) thread_starter;
+ thread->thr_ip = (unsigned long) _minios_entry_thread_starter;
return thread;
}
*/
char stack[2*STACK_SIZE];
-extern char shared_info[PAGE_SIZE];
+extern char _minios_shared_info[PAGE_SIZE];
/* Assembler interface fns in entry.S. */
-void hypervisor_callback(void);
-void failsafe_callback(void);
+extern void _minios_entry_hypervisor_callback(void);
+extern void _minios_entry_failsafe_callback(void);
#if defined(__x86_64__)
#define __pte(x) ((pte_t) { (x) } )
int rc;
if ( (rc = HYPERVISOR_update_va_mapping(
- (unsigned long)shared_info, __pte(pa | 7), UVMF_INVLPG)) )
+ (unsigned long)_minios_shared_info, __pte(pa | 7), UVMF_INVLPG)) )
{
printk("Failed to map shared_info!! rc=%d\n", rc);
do_exit();
}
- return (shared_info_t *)shared_info;
+ return (shared_info_t *)_minios_shared_info;
}
void
__KERNEL_CS, (unsigned long)failsafe_callback);
#else
HYPERVISOR_set_callbacks(
- (unsigned long)hypervisor_callback,
- (unsigned long)failsafe_callback, 0);
+ (unsigned long)_minios_entry_hypervisor_callback,
+ (unsigned long)_minios_entry_failsafe_callback, 0);
#endif
}
* These are assembler stubs in entry.S.
* They are the actual entry points for virtual exceptions.
*/
-void divide_error(void);
-void debug(void);
-void int3(void);
-void overflow(void);
-void bounds(void);
-void invalid_op(void);
-void device_not_available(void);
-void coprocessor_segment_overrun(void);
-void invalid_TSS(void);
-void segment_not_present(void);
-void stack_segment(void);
-void general_protection(void);
-void page_fault(void);
-void coprocessor_error(void);
-void simd_coprocessor_error(void);
-void alignment_check(void);
-void spurious_interrupt_bug(void);
-void machine_check(void);
+void _minios_entry_divide_error(void);
+void _minios_entry_debug(void);
+void _minios_entry_int3(void);
+void _minios_entry_overflow(void);
+void _minios_entry_bounds(void);
+void _minios_entry_invalid_op(void);
+void _minios_entry_device_not_available(void);
+void _minios_entry_coprocessor_segment_overrun(void);
+void _minios_entry_invalid_TSS(void);
+void _minios_entry_segment_not_present(void);
+void _minios_entry_stack_segment(void);
+void _minios_entry_general_protection(void);
+void _minios_entry_page_fault(void);
+void _minios_entry_coprocessor_error(void);
+void _minios_entry_simd_coprocessor_error(void);
+void _minios_entry_alignment_check(void);
+void _minios_entry_spurious_interrupt_bug(void);
+void _minios_entry_machine_check(void);
void dump_regs(struct pt_regs *regs)
* can trap to that vector using a software-interrupt instruction (INT).
*/
static trap_info_t trap_table[] = {
- { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
- { 1, 0, __KERNEL_CS, (unsigned long)debug },
- { 3, 3, __KERNEL_CS, (unsigned long)int3 },
- { 4, 3, __KERNEL_CS, (unsigned long)overflow },
- { 5, 3, __KERNEL_CS, (unsigned long)bounds },
- { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
- { 7, 0, __KERNEL_CS, (unsigned long)device_not_available },
- { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
- { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
- { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
- { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
- { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
- { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
- { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug },
- { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
- { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
- { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
+ { 0, 0, __KERNEL_CS, (unsigned long)_minios_entry_divide_error },
+ { 1, 0, __KERNEL_CS, (unsigned long)_minios_entry_debug },
+ { 3, 3, __KERNEL_CS, (unsigned long)_minios_entry_int3 },
+ { 4, 3, __KERNEL_CS, (unsigned long)_minios_entry_overflow },
+ { 5, 3, __KERNEL_CS, (unsigned long)_minios_entry_bounds },
+ { 6, 0, __KERNEL_CS, (unsigned long)_minios_entry_invalid_op },
+ { 7, 0, __KERNEL_CS, (unsigned long)_minios_entry_device_not_available },
+ { 9, 0, __KERNEL_CS, (unsigned long)_minios_entry_coprocessor_segment_overrun },
+ { 10, 0, __KERNEL_CS, (unsigned long)_minios_entry_invalid_TSS },
+ { 11, 0, __KERNEL_CS, (unsigned long)_minios_entry_segment_not_present },
+ { 12, 0, __KERNEL_CS, (unsigned long)_minios_entry_stack_segment },
+ { 13, 0, __KERNEL_CS, (unsigned long)_minios_entry_general_protection },
+ { 14, 0, __KERNEL_CS, (unsigned long)_minios_entry_page_fault },
+ { 15, 0, __KERNEL_CS, (unsigned long)_minios_entry_spurious_interrupt_bug },
+ { 16, 0, __KERNEL_CS, (unsigned long)_minios_entry_coprocessor_error },
+ { 17, 0, __KERNEL_CS, (unsigned long)_minios_entry_alignment_check },
+ { 19, 0, __KERNEL_CS, (unsigned long)_minios_entry_simd_coprocessor_error },
{ 0, 0, 0, 0 }
};
.byte 0
.text
-#define ENTRY(X) .globl X ; X :
-.globl _start, shared_info, hypercall_page
+#define ENTRY(name) \
+ .globl _minios_entry_##name; \
+ _minios_entry_##name:
+
+.globl _start, _minios_shared_info, _minios_hypercall_page
_start:
/* Unpleasant -- the PTE that maps this page is actually overwritten */
/* to map the real shared-info page! :-) */
.org 0x1000
-shared_info:
+_minios_shared_info:
.org 0x2000
-hypercall_page:
+_minios_hypercall_page:
.org 0x3000
2: /* Slow iret via hypervisor. */
andl $~NMI_MASK, 16(%rsp)
pushq $\flag
- jmp hypercall_page + (__HYPERVISOR_iret * 32)
+ jmp _minios_hypercall_page + (__HYPERVISOR_iret * 32)
.endm
/*
- * Exception entry point. This expects an error code/orig_rax on the stack
- * and the exception handler in %rax.
+ * Common code to all exception entry points. This expects an error
+ * code/orig_rax on the stack and the exception handler in %rax.
*/
-ENTRY(error_entry)
+error_common:
/* rdi slot contains rax, oldrax contains error code */
cld
subq $14*8,%rsp
call *%rax
jmp error_exit
-.macro zeroentry sym
- movq (%rsp),%rcx
- movq 8(%rsp),%r11
- addq $0x10,%rsp /* skip rcx and r11 */
- pushq $0 /* push error code/oldrax */
- pushq %rax /* push real oldrax to the rdi slot */
- leaq \sym(%rip),%rax
- jmp error_entry
-.endm
-
-.macro errorentry sym
+.macro errorentry sym has_error_code:req
movq (%rsp),%rcx
movq 8(%rsp),%r11
- addq $0x10,%rsp /* rsp points to the error code */
- pushq %rax
+ addq $0x10,%rsp /* skip rcx and r11 */
+ .if !\has_error_code
+ pushq $0 /* push error code/oldrax */
+ .endif
+ pushq %rax /* push real oldrax to the rdi slot */
leaq \sym(%rip),%rax
- jmp error_entry
-.endm
+ jmp error_common
+.endm
#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
#define XEN_PUT_VCPU_INFO(reg)
ENTRY(hypervisor_callback)
- zeroentry hypervisor_callback2
+ errorentry hypervisor_callback2 0
-ENTRY(hypervisor_callback2)
+hypervisor_callback2:
movq %rdi, %rsp
11: movq %gs:8,%rax
incl %gs:0
ENTRY(coprocessor_error)
- zeroentry do_coprocessor_error
+ errorentry do_coprocessor_error 0
ENTRY(simd_coprocessor_error)
- zeroentry do_simd_coprocessor_error
+ errorentry do_simd_coprocessor_error 0
ENTRY(device_not_available)
- zeroentry do_device_not_available
+ errorentry do_device_not_available 0
ENTRY(debug)
- zeroentry do_debug
+ errorentry do_debug 0
ENTRY(int3)
- zeroentry do_int3
+ errorentry do_int3 0
ENTRY(overflow)
- zeroentry do_overflow
+ errorentry do_overflow 0
ENTRY(bounds)
- zeroentry do_bounds
+ errorentry do_bounds 0
ENTRY(invalid_op)
- zeroentry do_invalid_op
+ errorentry do_invalid_op 0
ENTRY(coprocessor_segment_overrun)
- zeroentry do_coprocessor_segment_overrun
+ errorentry do_coprocessor_segment_overrun 0
ENTRY(invalid_TSS)
- errorentry do_invalid_TSS
+ errorentry do_invalid_TSS 1
ENTRY(segment_not_present)
- errorentry do_segment_not_present
+ errorentry do_segment_not_present 1
/* runs on exception stack */
ENTRY(stack_segment)
- errorentry do_stack_segment
+ errorentry do_stack_segment 1
ENTRY(general_protection)
- errorentry do_general_protection
+ errorentry do_general_protection 1
ENTRY(alignment_check)
- errorentry do_alignment_check
+ errorentry do_alignment_check 1
ENTRY(divide_error)
- zeroentry do_divide_error
+ errorentry do_divide_error 0
ENTRY(spurious_interrupt_bug)
- zeroentry do_spurious_interrupt_bug
+ errorentry do_spurious_interrupt_bug 0
ENTRY(page_fault)
- errorentry do_page_fault
+ errorentry do_page_fault 1
call exit_thread
-ENTRY(__arch_switch_threads)
+ENTRY(arch_switch_threads)
pushq %rbp
pushq %rbx
pushq %r12