ia64/xen-unstable
changeset 9911:f6507937cb7c
Fix x86/64 version of Mini-OS. It encompasses the following:
a) 64-bit switch_to scheduler macro (by Aravindh Puthiyaparambil)
b) implements 64-bit hypervisor_callback
c) fixes thread creation issues (thread_starter used to perform
initialisation)
Signed-off-by: Grzegorz Milos <gm281@cam.ac.uk>
a) 64-bit switch_to scheduler macro (by Aravindh Puthiyaparambil)
b) implements 64-bit hypervisor_callback
c) fixes thread creation issues (thread_starter used to perform
initialisation)
Signed-off-by: Grzegorz Milos <gm281@cam.ac.uk>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Tue May 02 09:12:39 2006 +0100 (2006-05-02) |
parents | dc3c59367403 |
children | d7e6e5f29226 |
files | extras/mini-os/console/console.c extras/mini-os/events.c extras/mini-os/hypervisor.c extras/mini-os/include/mm.h extras/mini-os/include/os.h extras/mini-os/include/sched.h extras/mini-os/kernel.c extras/mini-os/minios-x86_64.lds extras/mini-os/sched.c extras/mini-os/x86_32.S extras/mini-os/x86_64.S |
line diff
1.1 --- a/extras/mini-os/console/console.c Mon May 01 17:44:51 2006 +0100 1.2 +++ b/extras/mini-os/console/console.c Tue May 02 09:12:39 2006 +0100 1.3 @@ -128,7 +128,7 @@ void printk(const char *fmt, ...) 1.4 { 1.5 va_list args; 1.6 va_start(args, fmt); 1.7 - print(0, fmt, args); 1.8 + print(1, fmt, args); 1.9 va_end(args); 1.10 } 1.11
2.1 --- a/extras/mini-os/events.c Mon May 01 17:44:51 2006 +0100 2.2 +++ b/extras/mini-os/events.c Tue May 02 09:12:39 2006 +0100 2.3 @@ -106,6 +106,17 @@ void unbind_virq( u32 port ) 2.4 unbind_evtchn(port); 2.5 } 2.6 2.7 +#if defined(__x86_64__) 2.8 +/* Allocate 4 pages for the irqstack */ 2.9 +#define STACK_PAGES 4 2.10 +char irqstack[1024 * 4 * STACK_PAGES]; 2.11 + 2.12 +static struct pda 2.13 +{ 2.14 + int irqcount; /* offset 0 (used in x86_64.S) */ 2.15 + char *irqstackptr; /* 8 */ 2.16 +} cpu0_pda; 2.17 +#endif 2.18 2.19 /* 2.20 * Initially all events are without a handler and disabled 2.21 @@ -113,7 +124,12 @@ void unbind_virq( u32 port ) 2.22 void init_events(void) 2.23 { 2.24 int i; 2.25 - 2.26 +#if defined(__x86_64__) 2.27 + asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 2.28 + wrmsrl(0xc0000101, &cpu0_pda); /* 0xc0000101 is MSR_GS_BASE */ 2.29 + cpu0_pda.irqcount = -1; 2.30 + cpu0_pda.irqstackptr = irqstack + 1024 * 4 * STACK_PAGES; 2.31 +#endif 2.32 /* inintialise event handler */ 2.33 for ( i = 0; i < NR_EVS; i++ ) 2.34 {
3.1 --- a/extras/mini-os/hypervisor.c Mon May 01 17:44:51 2006 +0100 3.2 +++ b/extras/mini-os/hypervisor.c Tue May 02 09:12:39 2006 +0100 3.3 @@ -41,8 +41,8 @@ void do_hypervisor_callback(struct pt_re 3.4 shared_info_t *s = HYPERVISOR_shared_info; 3.5 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu]; 3.6 3.7 + 3.8 vcpu_info->evtchn_upcall_pending = 0; 3.9 - 3.10 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */ 3.11 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0); 3.12 while ( l1 != 0 )
4.1 --- a/extras/mini-os/include/mm.h Mon May 01 17:44:51 2006 +0100 4.2 +++ b/extras/mini-os/include/mm.h Tue May 02 09:12:39 2006 +0100 4.3 @@ -148,7 +148,7 @@ static __inline__ unsigned long machine_ 4.4 } 4.5 4.6 #if defined(__x86_64__) 4.7 -#define VIRT_START 0xFFFFFFFF00000000UL 4.8 +#define VIRT_START 0xFFFFFFFF80000000UL 4.9 #elif defined(__i386__) 4.10 #define VIRT_START 0xC0000000UL 4.11 #endif
5.1 --- a/extras/mini-os/include/os.h Mon May 01 17:44:51 2006 +0100 5.2 +++ b/extras/mini-os/include/os.h Tue May 02 09:12:39 2006 +0100 5.3 @@ -434,6 +434,13 @@ static __inline__ unsigned long __ffs(un 5.4 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ 5.5 } while(0) 5.6 5.7 +#define wrmsr(msr,val1,val2) \ 5.8 + __asm__ __volatile__("wrmsr" \ 5.9 + : /* no outputs */ \ 5.10 + : "c" (msr), "a" (val1), "d" (val2)) 5.11 + 5.12 +#define wrmsrl(msr,val) wrmsr(msr,(u32)((u64)(val)),((u64)(val))>>32) 5.13 + 5.14 5.15 #else /* ifdef __x86_64__ */ 5.16 #error "Unsupported architecture"
6.1 --- a/extras/mini-os/include/sched.h Mon May 01 17:44:51 2006 +0100 6.2 +++ b/extras/mini-os/include/sched.h Tue May 02 09:12:39 2006 +0100 6.3 @@ -7,8 +7,8 @@ struct thread 6.4 { 6.5 char *name; 6.6 char *stack; 6.7 - unsigned long eps; 6.8 - unsigned long eip; 6.9 + unsigned long sp; /* Stack pointer */ 6.10 + unsigned long ip; /* Instruction pointer */ 6.11 struct list_head thread_list; 6.12 u32 flags; 6.13 }; 6.14 @@ -25,7 +25,9 @@ static inline struct thread* get_current 6.15 struct thread **current; 6.16 #ifdef __i386__ 6.17 __asm__("andl %%esp,%0; ":"=r" (current) : "r" (~8191UL)); 6.18 -#endif 6.19 +#else 6.20 + __asm__("andq %%rsp,%0; ":"=r" (current) : "r" (~8191UL)); 6.21 +#endif 6.22 return *current; 6.23 } 6.24
7.1 --- a/extras/mini-os/kernel.c Mon May 01 17:44:51 2006 +0100 7.2 +++ b/extras/mini-os/kernel.c Tue May 02 09:12:39 2006 +0100 7.3 @@ -35,6 +35,8 @@ 7.4 #include <lib.h> 7.5 #include <sched.h> 7.6 #include <xenbus.h> 7.7 +#include <xen/features.h> 7.8 +#include <xen/version.h> 7.9 7.10 /* 7.11 * Shared page for communicating with the hypervisor. 7.12 @@ -85,6 +87,26 @@ static void init_xs(void *ign) 7.13 test_xenbus(); 7.14 } 7.15 7.16 + 7.17 +u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; 7.18 + 7.19 +void setup_xen_features(void) 7.20 +{ 7.21 + xen_feature_info_t fi; 7.22 + int i, j; 7.23 + 7.24 + for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) 7.25 + { 7.26 + fi.submap_idx = i; 7.27 + if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) 7.28 + break; 7.29 + 7.30 + for (j=0; j<32; j++) 7.31 + xen_features[i*32+j] = !!(fi.submap & 1<<j); 7.32 + } 7.33 +} 7.34 + 7.35 + 7.36 /* 7.37 * INITIAL C ENTRY POINT. 7.38 */ 7.39 @@ -127,7 +149,9 @@ void start_kernel(start_info_t *si) 7.40 printk(" flags: 0x%x\n", (unsigned int)si->flags); 7.41 printk(" cmd_line: %s\n", 7.42 si->cmd_line ? (const char *)si->cmd_line : "NULL"); 7.43 + printk(" stack: %p-%p\n", stack, stack + 8192); 7.44 7.45 + setup_xen_features(); 7.46 7.47 /* Init memory management. */ 7.48 init_mm(); 7.49 @@ -146,7 +170,7 @@ void start_kernel(start_info_t *si) 7.50 7.51 /* Init XenBus from a separate thread */ 7.52 create_thread("init_xs", init_xs, NULL); 7.53 - 7.54 + 7.55 /* Everything initialised, start idle thread */ 7.56 run_idle_thread(); 7.57 }
8.1 --- a/extras/mini-os/minios-x86_64.lds Mon May 01 17:44:51 2006 +0100 8.2 +++ b/extras/mini-os/minios-x86_64.lds Tue May 02 09:12:39 2006 +0100 8.3 @@ -3,7 +3,7 @@ OUTPUT_ARCH(i386:x86-64) 8.4 ENTRY(_start) 8.5 SECTIONS 8.6 { 8.7 - . = 0xFFFFFFFF00000000; 8.8 + . = 0xFFFFFFFF80000000; 8.9 _text = .; /* Text and read-only data */ 8.10 .text : { 8.11 *(.text)
9.1 --- a/extras/mini-os/sched.c Mon May 01 17:44:51 2006 +0100 9.2 +++ b/extras/mini-os/sched.c Tue May 02 09:12:39 2006 +0100 9.3 @@ -69,17 +69,27 @@ void idle_thread_fn(void *unused); 9.4 9.5 void dump_stack(struct thread *thread) 9.6 { 9.7 - unsigned long *bottom = (unsigned long *)thread->stack + 2048; 9.8 - unsigned long *pointer = (unsigned long *)thread->eps; 9.9 + unsigned long *bottom = (unsigned long *)(thread->stack + 2*4*1024); 9.10 + unsigned long *pointer = (unsigned long *)thread->sp; 9.11 int count; 9.12 + if(thread == current) 9.13 + { 9.14 +#ifdef __i386__ 9.15 + asm("movl %%esp,%0" 9.16 + : "=r"(pointer)); 9.17 +#else 9.18 + asm("movq %%rsp,%0" 9.19 + : "=r"(pointer)); 9.20 +#endif 9.21 + } 9.22 printk("The stack for \"%s\"\n", thread->name); 9.23 - for(count = 0; count < 15 && pointer < bottom; count ++) 9.24 + for(count = 0; count < 25 && pointer < bottom; count ++) 9.25 { 9.26 printk("[0x%lx] 0x%lx\n", pointer, *pointer); 9.27 pointer++; 9.28 } 9.29 9.30 - if(pointer < bottom) printk("Not the whole stack printed\n"); 9.31 + if(pointer < bottom) printk(" ... continues.\n"); 9.32 } 9.33 9.34 #ifdef __i386__ 9.35 @@ -95,13 +105,29 @@ void dump_stack(struct thread *thread) 9.36 "1:\t" \ 9.37 "popl %%ebp\n\t" \ 9.38 "popfl" \ 9.39 - :"=m" (prev->eps),"=m" (prev->eip), \ 9.40 + :"=m" (prev->sp),"=m" (prev->ip), \ 9.41 "=S" (esi),"=D" (edi) \ 9.42 - :"m" (next->eps),"m" (next->eip), \ 9.43 + :"m" (next->sp),"m" (next->ip), \ 9.44 "2" (prev), "d" (next)); \ 9.45 } while (0) 9.46 #elif __x86_64__ 9.47 -/* FIXME */ 9.48 +#define switch_threads(prev, next) do { \ 9.49 + unsigned long rsi,rdi; \ 9.50 + __asm__ __volatile__("pushfq\n\t" \ 9.51 + "pushq %%rbp\n\t" \ 9.52 + "movq %%rsp,%0\n\t" /* save RSP */ \ 9.53 + "movq %4,%%rsp\n\t" /* restore RSP */ \ 9.54 + "movq $1f,%1\n\t" /* save RIP */ \ 9.55 + "pushq %5\n\t" /* restore RIP */ \ 9.56 + "ret\n\t" \ 9.57 + "1:\t" \ 9.58 + "popq %%rbp\n\t" \ 9.59 + "popfq" \ 9.60 + :"=m" (prev->sp),"=m" (prev->ip), \ 9.61 + "=S" (rsi),"=D" (rdi) \ 9.62 + :"m" (next->sp),"m" (next->ip), \ 9.63 + "2" (prev), "d" (next)); \ 9.64 +} while (0) 9.65 #endif 9.66 9.67 void inline print_runqueue(void) 9.68 @@ -151,17 +177,19 @@ void schedule(void) 9.69 local_irq_restore(flags); 9.70 /* Interrupting the switch is equivalent to having the next thread 9.71 inturrupted at the return instruction. And therefore at safe point. */ 9.72 -/* The thread switching only works for i386 at the moment */ 9.73 -#ifdef __i386__ 9.74 if(prev != next) switch_threads(prev, next); 9.75 -#endif 9.76 } 9.77 9.78 9.79 +/* Gets run when a new thread is scheduled the first time ever, 9.80 + defined in x86_[32/64].S */ 9.81 +extern void thread_starter(void); 9.82 9.83 -void exit_thread(struct thread *thread) 9.84 + 9.85 +void exit_thread(void) 9.86 { 9.87 unsigned long flags; 9.88 + struct thread *thread = current; 9.89 printk("Thread \"%s\" exited.\n", thread->name); 9.90 local_irq_save(flags); 9.91 /* Remove from the thread list */ 9.92 @@ -174,6 +202,12 @@ void exit_thread(struct thread *thread) 9.93 schedule(); 9.94 } 9.95 9.96 +/* Pushes the specified value onto the stack of the specified thread */ 9.97 +static void stack_push(struct thread *thread, unsigned long value) 9.98 +{ 9.99 + thread->sp -= sizeof(unsigned long); 9.100 + *((unsigned long *)thread->sp) = value; 9.101 +} 9.102 9.103 struct thread* create_thread(char *name, void (*function)(void *), void *data) 9.104 { 9.105 @@ -187,23 +221,17 @@ struct thread* create_thread(char *name, 9.106 printk("Thread \"%s\": pointer: 0x%lx, stack: 0x%lx\n", name, thread, 9.107 thread->stack); 9.108 9.109 - thread->eps = (unsigned long)thread->stack + 4096 * 2 - 4; 9.110 + thread->sp = (unsigned long)thread->stack + 4096 * 2; 9.111 /* Save pointer to the thread on the stack, used by current macro */ 9.112 *((unsigned long *)thread->stack) = (unsigned long)thread; 9.113 - *((unsigned long *)thread->eps) = (unsigned long)thread; 9.114 - thread->eps -= 4; 9.115 - *((unsigned long *)thread->eps) = (unsigned long)data; 9.116 9.117 - /* No return address */ 9.118 - thread->eps -= 4; 9.119 - *((unsigned long *)thread->eps) = (unsigned long)exit_thread; 9.120 - 9.121 - thread->eip = (unsigned long)function; 9.122 + stack_push(thread, (unsigned long) function); 9.123 + stack_push(thread, (unsigned long) data); 9.124 + thread->ip = (unsigned long) thread_starter; 9.125 9.126 /* Not runable, not exited */ 9.127 thread->flags = 0; 9.128 set_runnable(thread); 9.129 - 9.130 local_irq_save(flags); 9.131 if(idle_thread != NULL) { 9.132 list_add_tail(&thread->thread_list, &idle_thread->thread_list); 9.133 @@ -213,7 +241,6 @@ struct thread* create_thread(char *name, 9.134 BUG(); 9.135 } 9.136 local_irq_restore(flags); 9.137 - 9.138 return thread; 9.139 } 9.140 9.141 @@ -240,11 +267,19 @@ void idle_thread_fn(void *unused) 9.142 void run_idle_thread(void) 9.143 { 9.144 /* Switch stacks and run the thread */ 9.145 +#if defined(__i386__) 9.146 __asm__ __volatile__("mov %0,%%esp\n\t" 9.147 "push %1\n\t" 9.148 "ret" 9.149 - :"=m" (idle_thread->eps) 9.150 - :"m" (idle_thread->eip)); 9.151 + :"=m" (idle_thread->sp) 9.152 + :"m" (idle_thread->ip)); 9.153 +#elif defined(__x86_64__) 9.154 + __asm__ __volatile__("mov %0,%%rsp\n\t" 9.155 + "push %1\n\t" 9.156 + "ret" 9.157 + :"=m" (idle_thread->sp) 9.158 + :"m" (idle_thread->ip)); 9.159 +#endif 9.160 } 9.161 9.162
10.1 --- a/extras/mini-os/x86_32.S Mon May 01 17:44:51 2006 +0100 10.2 +++ b/extras/mini-os/x86_32.S Tue May 02 09:12:39 2006 +0100 10.3 @@ -286,3 +286,11 @@ ENTRY(spurious_interrupt_bug) 10.4 pushl $0 10.5 pushl $do_spurious_interrupt_bug 10.6 jmp do_exception 10.7 + 10.8 +ENTRY(thread_starter) 10.9 + popl %eax 10.10 + popl %ebx 10.11 + pushl %eax 10.12 + call *%ebx 10.13 + call exit_thread 10.14 +
11.1 --- a/extras/mini-os/x86_64.S Mon May 01 17:44:51 2006 +0100 11.2 +++ b/extras/mini-os/x86_64.S Tue May 02 09:12:39 2006 +0100 11.3 @@ -1,4 +1,5 @@ 11.4 #include <os.h> 11.5 +#include <xen/features.h> 11.6 11.7 .section __xen_guest 11.8 .ascii "GUEST_OS=Mini-OS" 11.9 @@ -65,10 +66,253 @@ shared_info: 11.10 hypercall_page: 11.11 .org 0x3000 11.12 11.13 + 11.14 +/* Offsets into shared_info_t. */ 11.15 +#define evtchn_upcall_pending /* 0 */ 11.16 +#define evtchn_upcall_mask 1 11.17 + 11.18 +NMI_MASK = 0x80000000 11.19 + 11.20 +#define RDI 112 11.21 +#define ORIG_RAX 120 /* + error_code */ 11.22 +#define EFLAGS 144 11.23 + 11.24 +#define REST_SKIP 6*8 11.25 +.macro SAVE_REST 11.26 + subq $REST_SKIP,%rsp 11.27 +# CFI_ADJUST_CFA_OFFSET REST_SKIP 11.28 + movq %rbx,5*8(%rsp) 11.29 +# CFI_REL_OFFSET rbx,5*8 11.30 + movq %rbp,4*8(%rsp) 11.31 +# CFI_REL_OFFSET rbp,4*8 11.32 + movq %r12,3*8(%rsp) 11.33 +# CFI_REL_OFFSET r12,3*8 11.34 + movq %r13,2*8(%rsp) 11.35 +# CFI_REL_OFFSET r13,2*8 11.36 + movq %r14,1*8(%rsp) 11.37 +# CFI_REL_OFFSET r14,1*8 11.38 + movq %r15,(%rsp) 11.39 +# CFI_REL_OFFSET r15,0*8 11.40 +.endm 11.41 + 11.42 + 11.43 +.macro RESTORE_REST 11.44 + movq (%rsp),%r15 11.45 +# CFI_RESTORE r15 11.46 + movq 1*8(%rsp),%r14 11.47 +# CFI_RESTORE r14 11.48 + movq 2*8(%rsp),%r13 11.49 +# CFI_RESTORE r13 11.50 + movq 3*8(%rsp),%r12 11.51 +# CFI_RESTORE r12 11.52 + movq 4*8(%rsp),%rbp 11.53 +# CFI_RESTORE rbp 11.54 + movq 5*8(%rsp),%rbx 11.55 +# CFI_RESTORE rbx 11.56 + addq $REST_SKIP,%rsp 11.57 +# CFI_ADJUST_CFA_OFFSET -(REST_SKIP) 11.58 +.endm 11.59 + 11.60 + 11.61 +#define ARG_SKIP 9*8 11.62 +.macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0 11.63 + .if \skipr11 11.64 + .else 11.65 + movq (%rsp),%r11 11.66 +# CFI_RESTORE r11 11.67 + .endif 11.68 + .if \skipr8910 11.69 + .else 11.70 + movq 1*8(%rsp),%r10 11.71 +# CFI_RESTORE r10 11.72 + movq 2*8(%rsp),%r9 11.73 +# CFI_RESTORE r9 11.74 + movq 3*8(%rsp),%r8 11.75 +# CFI_RESTORE r8 11.76 + .endif 11.77 + .if \skiprax 11.78 + .else 11.79 + movq 4*8(%rsp),%rax 11.80 +# CFI_RESTORE rax 11.81 + .endif 11.82 + .if \skiprcx 11.83 + .else 11.84 + movq 5*8(%rsp),%rcx 11.85 +# CFI_RESTORE rcx 11.86 + .endif 11.87 + .if \skiprdx 11.88 + .else 11.89 + movq 6*8(%rsp),%rdx 11.90 +# CFI_RESTORE rdx 11.91 + .endif 11.92 + movq 7*8(%rsp),%rsi 11.93 +# CFI_RESTORE rsi 11.94 + movq 8*8(%rsp),%rdi 11.95 +# CFI_RESTORE rdi 11.96 + .if ARG_SKIP+\addskip > 0 11.97 + addq $ARG_SKIP+\addskip,%rsp 11.98 +# CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) 11.99 + .endif 11.100 +.endm 11.101 + 11.102 + 11.103 +.macro HYPERVISOR_IRET flag 11.104 +# testb $3,1*8(%rsp) /* Don't need to do that in Mini-os, as */ 11.105 +# jnz 2f /* there is no userspace? */ 11.106 + testl $NMI_MASK,2*8(%rsp) 11.107 + jnz 2f 11.108 + 11.109 + testb $1,(xen_features+XENFEAT_supervisor_mode_kernel) 11.110 + jnz 1f 11.111 + 11.112 + /* Direct iret to kernel space. Correct CS and SS. */ 11.113 + orb $3,1*8(%rsp) 11.114 + orb $3,4*8(%rsp) 11.115 +1: iretq 11.116 + 11.117 +2: /* Slow iret via hypervisor. */ 11.118 + andl $~NMI_MASK, 16(%rsp) 11.119 + pushq $\flag 11.120 + jmp hypercall_page + (__HYPERVISOR_iret * 32) 11.121 +.endm 11.122 + 11.123 +/* 11.124 + * Exception entry point. This expects an error code/orig_rax on the stack 11.125 + * and the exception handler in %rax. 11.126 + */ 11.127 +ENTRY(error_entry) 11.128 +# _frame RDI 11.129 + /* rdi slot contains rax, oldrax contains error code */ 11.130 + cld 11.131 + subq $14*8,%rsp 11.132 +# CFI_ADJUST_CFA_OFFSET (14*8) 11.133 + movq %rsi,13*8(%rsp) 11.134 +# CFI_REL_OFFSET rsi,RSI 11.135 + movq 14*8(%rsp),%rsi /* load rax from rdi slot */ 11.136 + movq %rdx,12*8(%rsp) 11.137 +# CFI_REL_OFFSET rdx,RDX 11.138 + movq %rcx,11*8(%rsp) 11.139 +# CFI_REL_OFFSET rcx,RCX 11.140 + movq %rsi,10*8(%rsp) /* store rax */ 11.141 +# CFI_REL_OFFSET rax,RAX 11.142 + movq %r8, 9*8(%rsp) 11.143 +# CFI_REL_OFFSET r8,R8 11.144 + movq %r9, 8*8(%rsp) 11.145 +# CFI_REL_OFFSET r9,R9 11.146 + movq %r10,7*8(%rsp) 11.147 +# CFI_REL_OFFSET r10,R10 11.148 + movq %r11,6*8(%rsp) 11.149 +# CFI_REL_OFFSET r11,R11 11.150 + movq %rbx,5*8(%rsp) 11.151 +# CFI_REL_OFFSET rbx,RBX 11.152 + movq %rbp,4*8(%rsp) 11.153 +# CFI_REL_OFFSET rbp,RBP 11.154 + movq %r12,3*8(%rsp) 11.155 +# CFI_REL_OFFSET r12,R12 11.156 + movq %r13,2*8(%rsp) 11.157 +# CFI_REL_OFFSET r13,R13 11.158 + movq %r14,1*8(%rsp) 11.159 +# CFI_REL_OFFSET r14,R14 11.160 + movq %r15,(%rsp) 11.161 +# CFI_REL_OFFSET r15,R15 11.162 +#if 0 11.163 + cmpl $__KERNEL_CS,CS(%rsp) 11.164 + je error_kernelspace 11.165 +#endif 11.166 +error_call_handler: 11.167 + movq %rdi, RDI(%rsp) 11.168 + movq %rsp,%rdi 11.169 + movq ORIG_RAX(%rsp),%rsi # get error code 11.170 + movq $-1,ORIG_RAX(%rsp) 11.171 + call *%rax 11.172 + 11.173 +.macro zeroentry sym 11.174 +# INTR_FRAME 11.175 + movq (%rsp),%rcx 11.176 + movq 8(%rsp),%r11 11.177 + addq $0x10,%rsp /* skip rcx and r11 */ 11.178 + pushq $0 /* push error code/oldrax */ 11.179 +# CFI_ADJUST_CFA_OFFSET 8 11.180 + pushq %rax /* push real oldrax to the rdi slot */ 11.181 +# CFI_ADJUST_CFA_OFFSET 8 11.182 + leaq \sym(%rip),%rax 11.183 + jmp error_entry 11.184 +# CFI_ENDPROC 11.185 +.endm 11.186 + 11.187 + 11.188 + 11.189 +#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg 11.190 +#define XEN_PUT_VCPU_INFO(reg) 11.191 +#define XEN_PUT_VCPU_INFO_fixup 11.192 +#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg) 11.193 +#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg) 11.194 +#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg) 11.195 + 11.196 +#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \ 11.197 + XEN_LOCKED_BLOCK_EVENTS(reg) ; \ 11.198 + XEN_PUT_VCPU_INFO(reg) 11.199 + 11.200 +#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \ 11.201 + XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \ 11.202 + XEN_PUT_VCPU_INFO(reg) 11.203 + 11.204 + 11.205 + 11.206 ENTRY(hypervisor_callback) 11.207 - popq %rcx 11.208 - popq %r11 11.209 - iretq 11.210 + zeroentry hypervisor_callback2 11.211 + 11.212 +ENTRY(hypervisor_callback2) 11.213 + movq %rdi, %rsp 11.214 +11: movq %gs:8,%rax 11.215 + incl %gs:0 11.216 + cmovzq %rax,%rsp 11.217 + pushq %rdi 11.218 + call do_hypervisor_callback 11.219 + popq %rsp 11.220 + decl %gs:0 11.221 + jmp error_exit 11.222 + 11.223 +# ALIGN 11.224 +restore_all_enable_events: 11.225 + XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up... 11.226 + 11.227 +scrit: /**** START OF CRITICAL REGION ****/ 11.228 + XEN_TEST_PENDING(%rsi) 11.229 + jnz 14f # process more events if necessary... 11.230 + XEN_PUT_VCPU_INFO(%rsi) 11.231 + RESTORE_ARGS 0,8,0 11.232 + HYPERVISOR_IRET 0 11.233 + 11.234 +14: XEN_LOCKED_BLOCK_EVENTS(%rsi) 11.235 + XEN_PUT_VCPU_INFO(%rsi) 11.236 + SAVE_REST 11.237 + movq %rsp,%rdi # set the argument again 11.238 + jmp 11b 11.239 +ecrit: /**** END OF CRITICAL REGION ****/ 11.240 + 11.241 + 11.242 +retint_kernel: 11.243 +retint_restore_args: 11.244 + movl EFLAGS-REST_SKIP(%rsp), %eax 11.245 + shr $9, %eax # EAX[0] == IRET_EFLAGS.IF 11.246 + XEN_GET_VCPU_INFO(%rsi) 11.247 + andb evtchn_upcall_mask(%rsi),%al 11.248 + andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask 11.249 + jnz restore_all_enable_events # != 0 => enable event delivery 11.250 + XEN_PUT_VCPU_INFO(%rsi) 11.251 + 11.252 + RESTORE_ARGS 0,8,0 11.253 + HYPERVISOR_IRET 0 11.254 + 11.255 + 11.256 +error_exit: 11.257 + RESTORE_REST 11.258 +/* cli */ 11.259 + XEN_BLOCK_EVENTS(%rsi) 11.260 + jmp retint_kernel 11.261 + 11.262 + 11.263 11.264 ENTRY(failsafe_callback) 11.265 popq %rcx 11.266 @@ -228,3 +472,12 @@ ENTRY(exception_table) 11.267 .quad do_alignment_check 11.268 .quad do_machine_check 11.269 .quad do_simd_coprocessor_error 11.270 + 11.271 + 11.272 +ENTRY(thread_starter) 11.273 + popq %rdi 11.274 + popq %rbx 11.275 + call *%rbx 11.276 + call exit_thread 11.277 + 11.278 +