]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/mini-os.git/commitdiff
bitkeeper revision 1.1177.1.1 (420c983eSjbjDYGF-fYWrXw_L7qkLA)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Fri, 11 Feb 2005 11:34:22 +0000 (11:34 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Fri, 11 Feb 2005 11:34:22 +0000 (11:34 +0000)
Mini-OS cleanups. Bug fixes in x86_64 assembly code.
Signed-off-by: keir.fraser@cl.cam.ac.uk
14 files changed:
Makefile
entry.S [deleted file]
h/hypervisor.h
h/mm.h
h/os.h
head.S [deleted file]
kernel.c
lib/math.c
minios-x86_32.lds [new file with mode: 0644]
minios-x86_64.lds [new file with mode: 0644]
minios.lds [deleted file]
traps.c
x86_32.S [new file with mode: 0644]
x86_64.S [new file with mode: 0644]

index 606454b6bb5ed2655e63ebd34543713cc14e9d5e..56bc7f9e5f062983939a010b20cba75fd30b8097 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,31 +2,44 @@
 CC := gcc
 LD := ld
 
-# Linker should relocate monitor to this address
-MONITOR_BASE := 0xE0100000
+TARGET_ARCH := $(shell uname -m | sed -e s/i.86/x86_32/)
 
 # NB. '-Wcast-qual' is nasty, so I omitted it.
-CFLAGS := -fno-builtin -O3 -Wall -Ih/ -Wredundant-decls
-CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline -ansi
+CFLAGS := -fno-builtin -O3 -Wall -Ih/ -Wredundant-decls -Wno-format
+CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline
+
+ifeq ($(TARGET_ARCH),x86_32)
+CFLAGS += -m32 -march=i686
+LDFLAGS := -m elf_i386
+endif
+
+ifeq ($(TARGET_ARCH),x86_64)
+CFLAGS += -m64 -mno-red-zone -fpic -fno-reorder-blocks
+CFLAGS += -fno-asynchronous-unwind-tables
+LDFLAGS := -m elf_x86_64
+endif
 
 TARGET := mini-os
 
-LOBJS := lib/malloc.o lib/math.o lib/printf.o lib/string.o 
-OBJS  := entry.o kernel.o traps.o hypervisor.o mm.o events.o time.o ${LOBJS}
+OBJS := $(TARGET_ARCH).o
+OBJS += $(patsubst %.c,%.o,$(wildcard *.c))
+OBJS += $(patsubst %.c,%.o,$(wildcard lib/*.c))
 
-HINTF := h/xen-public/xen.h
-HDRS  :=  h/os.h h/types.h h/hypervisor.h h/mm.h h/events.h h/time.h h/lib.h
-HDRS  += $(HINTF)
+OBJS := $(subst events.o,,$(OBJS))
+OBJS := $(subst hypervisor.o,,$(OBJS))
+OBJS := $(subst time.o,,$(OBJS))
+
+HDRS := $(wildcard h/*.h)
+HDRS += $(wildcard h/xen-public/*.h)
 
 default: $(TARGET)
 
 xen-public:
-       [ -e h/xen-public] || ln -sf ../../../xen/include/public h/xen-public
+       [ -e h/xen-public ] || ln -sf ../../../xen/include/public h/xen-public
 
-$(TARGET): xen-public head.o $(OBJS)
-       $(LD) -N -T minios.lds head.o $(OBJS) -o $@.elf
-       objcopy -R .note -R .comment $@.elf $@
-       gzip -f -9 -c $@ >$@.gz
+$(TARGET): xen-public $(OBJS)
+       $(LD) -N -T minios-$(TARGET_ARCH).lds $(OBJS) -o $@.elf
+       gzip -f -9 -c $@.elf >$@.gz
 
 clean:
        find . -type f -name '*.o' | xargs rm -f
@@ -38,4 +51,3 @@ clean:
 
 %.o: %.S $(HDRS) Makefile
        $(CC) $(CFLAGS) -D__ASSEMBLY__ -c $< -o $@
-
diff --git a/entry.S b/entry.S
deleted file mode 100644 (file)
index 8565caf..0000000
--- a/entry.S
+++ /dev/null
@@ -1,265 +0,0 @@
-        
-#include <os.h>
-
-ES             = 0x20
-ORIG_EAX       = 0x24
-EIP            = 0x28
-CS             = 0x2C
-
-#define ENTRY(X) .globl X ; X :
-
-#define SAVE_ALL \
-       cld; \
-       pushl %es; \
-       pushl %ds; \
-       pushl %eax; \
-       pushl %ebp; \
-       pushl %edi; \
-       pushl %esi; \
-       pushl %edx; \
-       pushl %ecx; \
-       pushl %ebx; \
-       movl $(__KERNEL_DS),%edx; \
-       movl %edx,%ds; \
-       movl %edx,%es;
-
-#define RESTORE_ALL    \
-       popl %ebx;      \
-       popl %ecx;      \
-       popl %edx;      \
-       popl %esi;      \
-       popl %edi;      \
-       popl %ebp;      \
-       popl %eax;      \
-       popl %ds;       \
-       popl %es;       \
-       addl $4,%esp;   \
-       iret;           \
-
-ENTRY(divide_error)
-       pushl $0                # no error code
-       pushl $do_divide_error
-do_exception:
-       pushl %ds
-       pushl %eax
-       xorl %eax,%eax
-       pushl %ebp
-       pushl %edi
-       pushl %esi
-       pushl %edx
-       decl %eax                       # eax = -1
-       pushl %ecx
-       pushl %ebx
-       cld
-       movl %es,%ecx
-       movl ORIG_EAX(%esp), %esi       # get the error code
-       movl ES(%esp), %edi             # get the function address
-       movl %eax, ORIG_EAX(%esp)
-       movl %ecx, ES(%esp)
-       movl %esp,%edx
-       pushl %esi                      # push the error code
-       pushl %edx                      # push the pt_regs pointer
-       movl $(__KERNEL_DS),%edx
-       movl %edx,%ds
-       movl %edx,%es
-       call *%edi
-       addl $8,%esp
-
-        
-ret_from_exception:
-        movb CS(%esp),%cl
-       test $2,%cl          # slow return to ring 2 or 3
-       jne  safesti
-        RESTORE_ALL
-
-# A note on the "critical region" in our callback handler.
-# We want to avoid stacking callback handlers due to events occurring
-# during handling of the last event. To do this, we keep events disabled
-# until weve done all processing. HOWEVER, we must enable events before
-# popping the stack frame (cant be done atomically) and so it would still
-# be possible to get enough handler activations to overflow the stack.
-# Although unlikely, bugs of that kind are hard to track down, so wed
-# like to avoid the possibility.
-# So, on entry to the handler we detect whether we interrupted an
-# existing activation in its critical region -- if so, we pop the current
-# activation and restart the handler using the previous one.
-ENTRY(hypervisor_callback)
-        pushl %eax
-        SAVE_ALL
-        movl EIP(%esp),%eax
-        cmpl $scrit,%eax
-        jb   11f
-        cmpl $ecrit,%eax
-        jb   critical_region_fixup
-11:     push %esp
-        call do_hypervisor_callback
-        add  $4,%esp
-        movl HYPERVISOR_shared_info,%esi
-        xorl %eax,%eax
-        movb CS(%esp),%cl
-       test $2,%cl          # slow return to ring 2 or 3
-        jne  safesti
-safesti:btsl $31,4(%esi)     # reenable event callbacks
-scrit:  /**** START OF CRITICAL REGION ****/
-        cmpl %eax,(%esi)
-        jne  14f              # process more events if necessary...
-        RESTORE_ALL
-14:     btrl %eax,4(%esi)
-        jmp  11b
-ecrit:  /**** END OF CRITICAL REGION ****/
-# [How we do the fixup]. We want to merge the current stack frame with the
-# just-interrupted frame. How we do this depends on where in the critical
-# region the interrupted handler was executing, and so how many saved
-# registers are in each frame. We do this quickly using the lookup table
-# 'critical_fixup_table'. For each byte offset in the critical region, it
-# provides the number of bytes which have already been popped from the
-# interrupted stack frame. 
-critical_region_fixup:
-        addl $critical_fixup_table-scrit,%eax
-        movzbl (%eax),%eax    # %eax contains num bytes popped
-        mov  %esp,%esi
-        add  %eax,%esi        # %esi points at end of src region
-        mov  %esp,%edi
-        add  $0x34,%edi       # %edi points at end of dst region
-        mov  %eax,%ecx
-        shr  $2,%ecx          # convert words to bytes
-        je   16f              # skip loop if nothing to copy
-15:     subl $4,%esi          # pre-decrementing copy loop
-        subl $4,%edi
-        movl (%esi),%eax
-        movl %eax,(%edi)
-        loop 15b
-16:     movl %edi,%esp        # final %edi is top of merged stack
-        jmp  11b
-         
-critical_fixup_table:        
-        .byte 0x00,0x00                       # cmpl %eax,(%esi)
-        .byte 0x00,0x00                       # jne  14f
-        .byte 0x00                            # pop  %ebx
-        .byte 0x04                            # pop  %ecx
-        .byte 0x08                            # pop  %edx
-        .byte 0x0c                            # pop  %esi
-        .byte 0x10                            # pop  %edi
-        .byte 0x14                            # pop  %ebp
-        .byte 0x18                            # pop  %eax
-        .byte 0x1c                            # pop  %ds
-        .byte 0x20                            # pop  %es
-        .byte 0x24,0x24,0x24                  # add  $4,%esp
-        .byte 0x28                            # iret
-        .byte 0x00,0x00,0x00,0x00,0x00        # btrl $31,4(%esi)
-        .byte 0x00,0x00                       # jmp  11b
-       
-# Hypervisor uses this for application faults while it executes.
-ENTRY(failsafe_callback)
-      pop  %ds
-      pop  %es
-      pop  %fs
-      pop  %gs
-      iret
-                
-ENTRY(coprocessor_error)
-       pushl $0
-       pushl $do_coprocessor_error
-       jmp do_exception
-
-ENTRY(simd_coprocessor_error)
-       pushl $0
-       pushl $do_simd_coprocessor_error
-       jmp do_exception
-
-ENTRY(device_not_available)
-        iret
-
-ENTRY(debug)
-       pushl $0
-       pushl $do_debug
-       jmp do_exception
-
-ENTRY(int3)
-       pushl $0
-       pushl $do_int3
-       jmp do_exception
-
-ENTRY(overflow)
-       pushl $0
-       pushl $do_overflow
-       jmp do_exception
-
-ENTRY(bounds)
-       pushl $0
-       pushl $do_bounds
-       jmp do_exception
-
-ENTRY(invalid_op)
-       pushl $0
-       pushl $do_invalid_op
-       jmp do_exception
-
-ENTRY(coprocessor_segment_overrun)
-       pushl $0
-       pushl $do_coprocessor_segment_overrun
-       jmp do_exception
-
-ENTRY(double_fault)
-       pushl $do_double_fault
-       jmp do_exception
-
-ENTRY(invalid_TSS)
-       pushl $do_invalid_TSS
-       jmp do_exception
-
-ENTRY(segment_not_present)
-       pushl $do_segment_not_present
-       jmp do_exception
-
-ENTRY(stack_segment)
-       pushl $do_stack_segment
-       jmp do_exception
-
-ENTRY(general_protection)
-       pushl $do_general_protection
-       jmp do_exception
-
-ENTRY(alignment_check)
-       pushl $do_alignment_check
-       jmp do_exception
-
-# This handler is special, because it gets an extra value on its stack,
-# which is the linear faulting address.
-ENTRY(page_fault)
-       pushl %ds
-       pushl %eax
-       xorl %eax,%eax
-       pushl %ebp
-       pushl %edi
-       pushl %esi
-       pushl %edx
-       decl %eax                       # eax = -1
-       pushl %ecx
-       pushl %ebx
-       cld
-       movl %es,%ecx
-       movl ORIG_EAX(%esp), %esi       # get the error code
-       movl ES(%esp), %edi             # get the faulting address
-       movl %eax, ORIG_EAX(%esp)
-       movl %ecx, ES(%esp)
-       movl %esp,%edx
-        pushl %edi                      # push the faulting address
-       pushl %esi                      # push the error code
-       pushl %edx                      # push the pt_regs pointer
-       movl $(__KERNEL_DS),%edx
-       movl %edx,%ds
-       movl %edx,%es
-       call do_page_fault
-       addl $12,%esp
-       jmp ret_from_exception
-
-ENTRY(machine_check)
-       pushl $0
-       pushl $do_machine_check
-       jmp do_exception
-
-ENTRY(spurious_interrupt_bug)
-       pushl $0
-       pushl $do_spurious_interrupt_bug
-       jmp do_exception
index 3a127e1290f9fa50359940a9cac988be67b5e419..fee36d5b9c1b74093448bbefcb339ede329af4c7 100644 (file)
@@ -36,13 +36,25 @@ void ack_hypervisor_event(unsigned int ev);
  * Assembler stubs for hyper-calls.
  */
 
+#ifdef __i386__
+#define _a1 "b"
+#define _a2 "c"
+#define _a3 "d"
+#define _a4 "S"
+#else
+#define _a1 "D"
+#define _a2 "S"
+#define _a3 "d"
+#define _a4 "b"
+#endif
+
 static __inline__ int HYPERVISOR_set_trap_table(trap_info_t *table)
 {
     int ret;
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_set_trap_table),
-        "b" (table) : "memory" );
+        _a1 (table) : "memory" );
 
     return ret;
 }
@@ -55,7 +67,7 @@ static __inline__ int HYPERVISOR_mmu_update(mmu_update_t *req,
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_mmu_update), 
-        "b" (req), "c" (count), "d" (success_count)  : "memory" );
+        _a1 (req), _a2 (count), _a3 (success_count)  : "memory" );
 
     return ret;
 }
@@ -66,7 +78,7 @@ static __inline__ int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_set_gdt), 
-        "b" (frame_list), "c" (entries) : "memory" );
+        _a1 (frame_list), _a2 (entries) : "memory" );
 
 
     return ret;
@@ -78,11 +90,12 @@ static __inline__ int HYPERVISOR_stack_switch(unsigned long ss, unsigned long es
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_stack_switch),
-        "b" (ss), "c" (esp) : "memory" );
+        _a1 (ss), _a2 (esp) : "memory" );
 
     return ret;
 }
 
+#ifdef __i386__
 static __inline__ int HYPERVISOR_set_callbacks(
     unsigned long event_selector, unsigned long event_address,
     unsigned long failsafe_selector, unsigned long failsafe_address)
@@ -91,11 +104,27 @@ static __inline__ int HYPERVISOR_set_callbacks(
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
-        "b" (event_selector), "c" (event_address), 
-        "d" (failsafe_selector), "S" (failsafe_address) : "memory" );
+        _a1 (event_selector), _a2 (event_address), 
+        _a3 (failsafe_selector), _a4 (failsafe_address) : "memory" );
+
+    return ret;
+}
+#else
+static __inline__ int HYPERVISOR_set_callbacks(
+    unsigned long event_address,
+    unsigned long failsafe_address,
+    unsigned long syscall_address)
+{
+    int ret;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
+        _a1 (event_address), _a2 (failsafe_address), 
+        _a3 (syscall_address) : "memory" );
 
     return ret;
 }
+#endif
 
 static __inline__ int HYPERVISOR_fpu_taskswitch(void)
 {
@@ -113,7 +142,7 @@ static __inline__ int HYPERVISOR_yield(void)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
-        "b" (SCHEDOP_yield) : "memory" );
+        _a1 (SCHEDOP_yield) : "memory" );
 
     return ret;
 }
@@ -124,7 +153,7 @@ static __inline__ int HYPERVISOR_block(void)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
-        "b" (SCHEDOP_block) : "memory" );
+        _a1 (SCHEDOP_block) : "memory" );
 
     return ret;
 }
@@ -135,7 +164,7 @@ static __inline__ int HYPERVISOR_shutdown(void)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
-        "b" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
+        _a1 (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
         : "memory" );
 
     return ret;
@@ -147,7 +176,7 @@ static __inline__ int HYPERVISOR_reboot(void)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
-        "b" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
+        _a1 (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
         : "memory" );
 
     return ret;
@@ -160,7 +189,7 @@ static __inline__ int HYPERVISOR_suspend(unsigned long srec)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
-        "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)), 
+        _a1 (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)), 
         "S" (srec) : "memory" );
 
     return ret;
@@ -172,7 +201,7 @@ static __inline__ long HYPERVISOR_set_timer_op(void *timer_arg)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_set_timer_op),
-        "b" (timer_arg) : "memory" );
+        _a1 (timer_arg) : "memory" );
 
     return ret;
 }
@@ -183,7 +212,7 @@ static __inline__ int HYPERVISOR_dom0_op(void *dom0_op)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_dom0_op),
-        "b" (dom0_op) : "memory" );
+        _a1 (dom0_op) : "memory" );
 
     return ret;
 }
@@ -194,7 +223,7 @@ static __inline__ int HYPERVISOR_set_debugreg(int reg, unsigned long value)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_set_debugreg),
-        "b" (reg), "c" (value) : "memory" );
+        _a1 (reg), _a2 (value) : "memory" );
 
     return ret;
 }
@@ -205,7 +234,7 @@ static __inline__ unsigned long HYPERVISOR_get_debugreg(int reg)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_get_debugreg),
-        "b" (reg) : "memory" );
+        _a1 (reg) : "memory" );
 
     return ret;
 }
@@ -217,7 +246,7 @@ static __inline__ int HYPERVISOR_update_descriptor(
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_update_descriptor), 
-        "b" (pa), "c" (word1), "d" (word2) : "memory" );
+        _a1 (pa), _a2 (word1), _a3 (word2) : "memory" );
 
     return ret;
 }
@@ -228,7 +257,7 @@ static __inline__ int HYPERVISOR_set_fast_trap(int idx)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_set_fast_trap), 
-        "b" (idx) : "memory" );
+        _a1 (idx) : "memory" );
 
     return ret;
 }
@@ -239,7 +268,7 @@ static __inline__ int HYPERVISOR_dom_mem_op(void *dom_mem_op)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_dom_mem_op),
-        "b" (dom_mem_op) : "memory" );
+        _a1 (dom_mem_op) : "memory" );
 
     return ret;
 }
@@ -250,7 +279,7 @@ static __inline__ int HYPERVISOR_multicall(void *call_list, int nr_calls)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_multicall),
-        "b" (call_list), "c" (nr_calls) : "memory" );
+        _a1 (call_list), _a2 (nr_calls) : "memory" );
 
     return ret;
 }
@@ -262,7 +291,7 @@ static __inline__ int HYPERVISOR_update_va_mapping(
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping), 
-        "b" (page_nr), "c" (new_val), "d" (flags) : "memory" );
+        _a1 (page_nr), _a2 (new_val), _a3 (flags) : "memory" );
 
     return ret;
 }
@@ -273,7 +302,7 @@ static __inline__ int HYPERVISOR_xen_version(int cmd)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_xen_version), 
-        "b" (cmd) : "memory" );
+        _a1 (cmd) : "memory" );
 
     return ret;
 }
@@ -284,7 +313,7 @@ static __inline__ int HYPERVISOR_console_io(int cmd, int count, char *str)
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_console_io),
-        "b" (cmd), "c" (count), "d" (str) : "memory" );
+        _a1 (cmd), _a2 (count), _a3 (str) : "memory" );
 
     return ret;
 }
diff --git a/h/mm.h b/h/mm.h
index e12b56495dc231ca1dd7f03a85e27c83430639a1..5c6224db286032ae6d3ccf32946ba6860103ade2 100644 (file)
--- a/h/mm.h
+++ b/h/mm.h
@@ -64,7 +64,11 @@ static __inline__ unsigned long machine_to_phys(unsigned long machine)
     return phys;
 }
 
+#ifdef __x86_64__
+#define VIRT_START              0xFFFFFFFF00000000UL
+#else
 #define VIRT_START              0xC0000000UL
+#endif
 
 #define to_phys(x)                 ((unsigned long)(x)-VIRT_START)
 #define to_virt(x)                 ((void *)((unsigned long)(x)+VIRT_START))
diff --git a/h/os.h b/h/os.h
index cf15d9212629a89b8e2ba8c9c86b3160122ebbd3..6a0447ca39feac140a7af3126c0b4b1a6196b34f 100644 (file)
--- a/h/os.h
+++ b/h/os.h
 
 #define NULL 0
 
-/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
-   a mechanism by which the user can annotate likely branch directions and
-   expect the blocks to be reordered appropriately.  Define __builtin_expect
-   to nothing for earlier compilers.  */
-
 #if __GNUC__ == 2 && __GNUC_MINOR__ < 96
 #define __builtin_expect(x, expected_value) (x)
 #endif
+#define unlikely(x)  __builtin_expect((x),0)
 
-/*
- * These are the segment descriptors provided for us by the hypervisor.
- * For now, these are hardwired -- guest OSes cannot update the GDT
- * or LDT.
- * 
- * It shouldn't be hard to support descriptor-table frobbing -- let me 
- * know if the BSD or XP ports require flexibility here.
- */
-
+#define smp_processor_id() 0
+#define preempt_disable() ((void)0)
+#define preempt_enable() ((void)0)
 
-/*
- * these are also defined in xen-public/xen.h but can't be pulled in as
- * they are used in start of day assembly. Need to clean up the .h files
- * a bit more...
- */
+#define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0))
 
-#ifndef FLAT_RING1_CS
-#define FLAT_RING1_CS          0x0819
-#define FLAT_RING1_DS          0x0821
-#define FLAT_RING3_CS          0x082b
-#define FLAT_RING3_DS          0x0833
+#ifndef __ASSEMBLY__
+#include <types.h>
 #endif
+#include <xen-public/xen.h>
 
-#define __KERNEL_CS        FLAT_RING1_CS
-#define __KERNEL_DS        FLAT_RING1_DS
+#define __KERNEL_CS  FLAT_KERNEL_CS
+#define __KERNEL_DS  FLAT_KERNEL_DS
+#define __KERNEL_SS  FLAT_KERNEL_SS
 
 /* Everything below this point is not included by assembler (.S) files. */
 #ifndef __ASSEMBLY__
 
-#include <types.h>
-#include <xen-public/xen.h>
+#define pt_regs xen_regs
 
-
-/* this struct defines the way the registers are stored on the 
-   stack during an exception or interrupt. */
-struct pt_regs {
-       long ebx;
-       long ecx;
-       long edx;
-       long esi;
-       long edi;
-       long ebp;
-       long eax;
-       int  xds;
-       int  xes;
-       long orig_eax;
-       long eip;
-       int  xcs;
-       long eflags;
-       long esp;
-       int  xss;
-};
-
-/* some function prototypes */
 void trap_init(void);
 void dump_regs(struct pt_regs *regs);
 
-
-/*
- * STI/CLI equivalents. These basically set and clear the virtual
- * event_enable flag in teh shared_info structure. Note that when
- * the enable bit is set, there may be pending events to be handled.
- * We may therefore call into do_hypervisor_callback() directly.
+/* 
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
+ * critical operations are executed. All critical operations must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
  */
-#define unlikely(x)  __builtin_expect((x),0)
-#define __save_flags(x)                                                       \
-do {                                                                          \
-    (x) = test_bit(EVENTS_MASTER_ENABLE_BIT,                                  \
-                   &HYPERVISOR_shared_info->events_mask);                     \
-    barrier();                                                                \
+
+#define __cli()                                                                \
+do {                                                                   \
+       vcpu_info_t *_vcpu;                                             \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       _vcpu->evtchn_upcall_mask = 1;                                  \
+       preempt_enable_no_resched();                                    \
+       barrier();                                                      \
+} while (0)
+
+#define __sti()                                                                \
+do {                                                                   \
+       vcpu_info_t *_vcpu;                                             \
+       barrier();                                                      \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       _vcpu->evtchn_upcall_mask = 0;                                  \
+       barrier(); /* unmask then check (avoid races) */                \
+       if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
+               force_evtchn_callback();                                \
+       preempt_enable();                                               \
 } while (0)
 
-#define __restore_flags(x)                                                    \
-do {                                                                          \
-    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
-    if (x) set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);          \
-    barrier();                                                                \
-    if ( unlikely(_shared->events) && (x) ) do_hypervisor_callback(NULL);     \
+#define __save_flags(x)                                                        \
+do {                                                                   \
+       vcpu_info_t *_vcpu;                                             \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       (x) = _vcpu->evtchn_upcall_mask;                                \
 } while (0)
 
-#define __cli()                                                               \
-do {                                                                          \
-    clear_bit(EVENTS_MASTER_ENABLE_BIT, &HYPERVISOR_shared_info->events_mask);\
-    barrier();                                                                \
+#define __restore_flags(x)                                             \
+do {                                                                   \
+       vcpu_info_t *_vcpu;                                             \
+       barrier();                                                      \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
+               barrier(); /* unmask then check (avoid races) */        \
+               if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
+                       force_evtchn_callback();                        \
+               preempt_enable();                                       \
+       } else                                                          \
+               preempt_enable_no_resched();                            \
 } while (0)
 
-#define __sti()                                                               \
-do {                                                                          \
-    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
-    set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask);                 \
-    barrier();                                                                \
-    if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL);            \
+#define safe_halt()            ((void)0)
+
+#define __save_and_cli(x)                                              \
+do {                                                                   \
+       vcpu_info_t *_vcpu;                                             \
+       preempt_disable();                                              \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
+       (x) = _vcpu->evtchn_upcall_mask;                                \
+       _vcpu->evtchn_upcall_mask = 1;                                  \
+       preempt_enable_no_resched();                                    \
+       barrier();                                                      \
 } while (0)
-#define cli() __cli()
-#define sti() __sti()
-#define save_flags(x) __save_flags(x)
-#define restore_flags(x) __restore_flags(x)
-#define save_and_cli(x) __save_and_cli(x)
-#define save_and_sti(x) __save_and_sti(x)
 
+#define local_irq_save(x)      __save_and_cli(x)
+#define local_irq_restore(x)   __restore_flags(x)
+#define local_save_flags(x)    __save_flags(x)
+#define local_irq_disable()    __cli()
+#define local_irq_enable()     __sti()
 
+#define irqs_disabled()                        \
+    HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask
 
 /* This is a barrier for the compiler only, NOT the processor! */
 #define barrier() __asm__ __volatile__("": : :"memory")
diff --git a/head.S b/head.S
deleted file mode 100644 (file)
index 52eae8f..0000000
--- a/head.S
+++ /dev/null
@@ -1,18 +0,0 @@
-#include <os.h>
-
-.globl _start, shared_info
-                        
-_start:
-        cld
-        lss stack_start,%esp
-        push %esi 
-        call start_kernel
-
-stack_start:
-       .long stack+8192, __KERNEL_DS
-
-        /* Unpleasant -- the PTE that maps this page is actually overwritten */
-        /* to map the real shared-info page! :-)                             */
-        .org 0x1000
-shared_info:
-        .org 0x2000
index a9f423c1920b4fd3e16118dd6d8d8ee3aeea7aa6..a794145e68686e13075c1143e6760445010a5677 100644 (file)
--- a/kernel.c
+++ b/kernel.c
@@ -64,8 +64,8 @@ extern char shared_info[PAGE_SIZE];
 
 static shared_info_t *map_shared_info(unsigned long pa)
 {
-    if ( HYPERVISOR_update_va_mapping((unsigned long)shared_info >> PAGE_SHIFT,
-                                      pa | 3, UVMF_INVLPG) )
+    if ( HYPERVISOR_update_va_mapping(
+        (unsigned long)shared_info, pa | 3, UVMF_INVLPG) )
     {
         printk("Failed to map shared_info!!\n");
         *(int*)0=0;
@@ -79,6 +79,9 @@ static shared_info_t *map_shared_info(unsigned long pa)
  */
 void start_kernel(start_info_t *si)
 {
+    static char hello[] = "Bootstrapping...\n";
+    (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(hello), hello);
+
     /* Copy the start_info struct to a globally-accessible area. */
     memcpy(&start_info, si, sizeof(*si));
 
@@ -86,9 +89,15 @@ void start_kernel(start_info_t *si)
     HYPERVISOR_shared_info = map_shared_info(start_info.shared_info);
 
     /* Set up event and failsafe callback addresses. */
+#ifdef __i386__
     HYPERVISOR_set_callbacks(
         __KERNEL_CS, (unsigned long)hypervisor_callback,
         __KERNEL_CS, (unsigned long)failsafe_callback);
+#else
+    HYPERVISOR_set_callbacks(
+        (unsigned long)hypervisor_callback,
+        (unsigned long)failsafe_callback, 0);
+#endif
 
     trap_init();
 
@@ -117,7 +126,7 @@ void start_kernel(start_info_t *si)
     init_mm();
 
     /* set up events */
-    init_events();
+//    init_events();
 
     /*
      * These need to be replaced with event-channel/control-interface
@@ -135,7 +144,7 @@ void start_kernel(start_info_t *si)
 #endif
 
     /* init time and timers */
-    init_time();
+//    init_time();
 
     /* do nothing */
     for ( ; ; ) HYPERVISOR_yield();
index be08740965ebb89a91c0ec6e563bcac2c68f8b1c..8e97be6d1819c71fcdfea1ee6797910278fb6730 100644 (file)
@@ -96,14 +96,14 @@ union uu {
  * (sizeof(long)*CHAR_BIT/2).
  */
 #define HHALF(x)        ((x) >> HALF_BITS)
-#define LHALF(x)        ((x) & ((1 << HALF_BITS) - 1))
+#define LHALF(x)        ((x) & ((1UL << HALF_BITS) - 1))
 #define LHUP(x)         ((x) << HALF_BITS)
 
 /*
  * Multiprecision divide.  This algorithm is from Knuth vol. 2 (2nd ed),
  * section 4.3.1, pp. 257--259.
  */
-#define        B       (1 << HALF_BITS)        /* digit base */
+#define        B       (1UL << HALF_BITS)      /* digit base */
 
 /* Combine two `digits' to make a single two-digit number. */
 #define        COMBINE(a, b) (((u_long)(a) << HALF_BITS) | (b))
diff --git a/minios-x86_32.lds b/minios-x86_32.lds
new file mode 100644 (file)
index 0000000..a53504e
--- /dev/null
@@ -0,0 +1,54 @@
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+SECTIONS
+{
+  . = 0xC0000000;
+  _text = .;                   /* Text and read-only data */
+  .text : {
+       *(.text)
+       *(.gnu.warning)
+       } = 0x9090
+
+  _etext = .;                  /* End of text section */
+
+  .rodata : { *(.rodata) *(.rodata.*) }
+
+  .data : {                    /* Data */
+       *(.data)
+       CONSTRUCTORS
+       }
+
+  _edata = .;                  /* End of data section */
+
+  . = ALIGN(8192);             /* init_task */
+  .data.init_task : { *(.data.init_task) }
+
+  . = ALIGN(4096);
+  .data.page_aligned : { *(.data.idt) }
+
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+  __bss_start = .;             /* BSS */
+  .bss : {
+       *(.bss)
+       }
+  _end = . ;
+
+  /* Sections to be discarded */
+  /DISCARD/ : {
+       *(.text.exit)
+       *(.data.exit)
+       *(.exitcall.exit)
+       }
+
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+}
diff --git a/minios-x86_64.lds b/minios-x86_64.lds
new file mode 100644 (file)
index 0000000..71b6113
--- /dev/null
@@ -0,0 +1,54 @@
+OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
+OUTPUT_ARCH(i386:x86-64)
+ENTRY(_start)
+SECTIONS
+{
+  . = 0xFFFFFFFF00000000;
+  _text = .;                   /* Text and read-only data */
+  .text : {
+       *(.text)
+       *(.gnu.warning)
+       } = 0x9090
+
+  _etext = .;                  /* End of text section */
+
+  .rodata : { *(.rodata) *(.rodata.*) }
+
+  .data : {                    /* Data */
+       *(.data)
+       CONSTRUCTORS
+       }
+
+  _edata = .;                  /* End of data section */
+
+  . = ALIGN(8192);             /* init_task */
+  .data.init_task : { *(.data.init_task) }
+
+  . = ALIGN(4096);
+  .data.page_aligned : { *(.data.idt) }
+
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+  __bss_start = .;             /* BSS */
+  .bss : {
+       *(.bss)
+       }
+  _end = . ;
+
+  /* Sections to be discarded */
+  /DISCARD/ : {
+       *(.text.exit)
+       *(.data.exit)
+       *(.exitcall.exit)
+       }
+
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+}
diff --git a/minios.lds b/minios.lds
deleted file mode 100644 (file)
index a53504e..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
-OUTPUT_ARCH(i386)
-ENTRY(_start)
-SECTIONS
-{
-  . = 0xC0000000;
-  _text = .;                   /* Text and read-only data */
-  .text : {
-       *(.text)
-       *(.gnu.warning)
-       } = 0x9090
-
-  _etext = .;                  /* End of text section */
-
-  .rodata : { *(.rodata) *(.rodata.*) }
-
-  .data : {                    /* Data */
-       *(.data)
-       CONSTRUCTORS
-       }
-
-  _edata = .;                  /* End of data section */
-
-  . = ALIGN(8192);             /* init_task */
-  .data.init_task : { *(.data.init_task) }
-
-  . = ALIGN(4096);
-  .data.page_aligned : { *(.data.idt) }
-
-  . = ALIGN(32);
-  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
-
-  __bss_start = .;             /* BSS */
-  .bss : {
-       *(.bss)
-       }
-  _end = . ;
-
-  /* Sections to be discarded */
-  /DISCARD/ : {
-       *(.text.exit)
-       *(.data.exit)
-       *(.exitcall.exit)
-       }
-
-  /* Stabs debugging sections.  */
-  .stab 0 : { *(.stab) }
-  .stabstr 0 : { *(.stabstr) }
-  .stab.excl 0 : { *(.stab.excl) }
-  .stab.exclstr 0 : { *(.stab.exclstr) }
-  .stab.index 0 : { *(.stab.index) }
-  .stab.indexstr 0 : { *(.stab.indexstr) }
-  .comment 0 : { *(.comment) }
-}
diff --git a/traps.c b/traps.c
index c0ef3350250ff6a04b6c335ee5b20054a4826440..858c54d3f23b75ae06300727895caf5b45fbed3a 100644 (file)
--- a/traps.c
+++ b/traps.c
@@ -33,38 +33,38 @@ extern void do_exit(void);
 
 void dump_regs(struct pt_regs *regs)
 {
-       int in_kernel = 1;
-       unsigned long esp;
-       unsigned short ss;
-
-       esp = (unsigned long) (&regs->esp);
-       ss = __KERNEL_DS;
-       if (regs->xcs & 2) {
-               in_kernel = 0;
-               esp = regs->esp;
-               ss = regs->xss & 0xffff;
-       }
-       printf("EIP:    %04x:[<%08lx>]\n",
-              0xffff & regs->xcs, regs->eip);
-       printf("EFLAGS: %08lx\n",regs->eflags);
-       printf("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
-               regs->eax, regs->ebx, regs->ecx, regs->edx);
-       printf("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
-               regs->esi, regs->edi, regs->ebp, esp);
-       printf("ds: %04x   es: %04x   ss: %04x\n",
-               regs->xds & 0xffff, regs->xes & 0xffff, ss);
-       printf("\n");
+    int in_kernel = 1;
+    unsigned long esp;
+    unsigned short ss;
+
+    esp = (unsigned long) (&regs->esp);
+    ss = __KERNEL_DS;
+    if (regs->cs & 2) {
+        in_kernel = 0;
+        esp = regs->esp;
+        ss = regs->ss & 0xffff;
+    }
+    printf("EIP:    %04x:[<%p>]\n",
+           0xffff & regs->cs , regs->eip);
+    printf("EFLAGS: %p\n",regs->eflags);
+    printf("eax: %p   ebx: %p   ecx: %p   edx: %p\n",
+           regs->eax, regs->ebx, regs->ecx, regs->edx);
+    printf("esi: %p   edi: %p   ebp: %p   esp: %p\n",
+           regs->esi, regs->edi, regs->ebp, esp);
+    printf("ds: %04x   es: %04x   ss: %04x\n",
+           regs->ds & 0xffff, regs->es & 0xffff, ss);
+    printf("\n");
 }      
 
 
-static __inline__ void dump_code(unsigned eip)
+static __inline__ void dump_code(unsigned long eip)
 {
-  unsigned *ptr = (unsigned *)eip;
-  int x;
-
-  printk("Bytes at eip:\n");
-  for (x = -4; x < 5; x++)
-      printf("%x", ptr[x]);
+    unsigned *ptr = (unsigned *)eip;
+    int x;
+    
+    printk("Bytes at eip:\n");
+    for (x = -4; x < 5; x++)
+        printf("%p", ptr[x]);
 }
 
 
@@ -81,14 +81,14 @@ static __inline__ void dump_code(unsigned eip)
  */
 
 static void __inline__ do_trap(int trapnr, char *str,
-                          struct pt_regs * regs, long error_code)
+                               struct pt_regs * regs, long error_code)
 {
-  printk("FATAL:  Unhandled Trap (see mini-os:traps.c)");
-  printf("%d %s", trapnr, str);
-  dump_regs(regs);
-  dump_code(regs->eip);
+    printk("FATAL:  Unhandled Trap (see mini-os:traps.c)");
+    printf("%d %s", trapnr, str);
+    dump_regs(regs);
+    dump_code(regs->eip);
 
-  do_exit();
+    do_exit();
 }
 
 #define DO_ERROR(trapnr, str, name) \
@@ -104,38 +104,36 @@ void do_##name(struct pt_regs * regs, long error_code) \
 }
 
 DO_ERROR_INFO( 0, "divide error", divide_error, FPE_INTDIV, regs->eip)
-DO_ERROR( 3, "int3", int3)
-DO_ERROR( 4, "overflow", overflow)
-DO_ERROR( 5, "bounds", bounds)
-DO_ERROR_INFO( 6, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
-DO_ERROR( 7, "device not available", device_not_available)
-DO_ERROR( 8, "double fault", double_fault)
-DO_ERROR( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
-DO_ERROR(10, "invalid TSS", invalid_TSS)
-DO_ERROR(11, "segment not present", segment_not_present)
-DO_ERROR(12, "stack segment", stack_segment)
-DO_ERROR_INFO(17, "alignment check", alignment_check, BUS_ADRALN, 0)
-DO_ERROR(18, "machine check", machine_check)
-
-void do_page_fault(struct pt_regs *regs, long error_code,
-                   unsigned long address)
+    DO_ERROR( 3, "int3", int3)
+    DO_ERROR( 4, "overflow", overflow)
+    DO_ERROR( 5, "bounds", bounds)
+    DO_ERROR_INFO( 6, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
+    DO_ERROR( 7, "device not available", device_not_available)
+    DO_ERROR( 8, "double fault", double_fault)
+    DO_ERROR( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
+    DO_ERROR(10, "invalid TSS", invalid_TSS)
+    DO_ERROR(11, "segment not present", segment_not_present)
+    DO_ERROR(12, "stack segment", stack_segment)
+    DO_ERROR_INFO(17, "alignment check", alignment_check, BUS_ADRALN, 0)
+    DO_ERROR(18, "machine check", machine_check)
+
+    void do_page_fault(struct pt_regs *regs, long error_code,
+                       unsigned long address)
 {
     printk("Page fault\n");
-    printk("Address: 0x%lx", address);
-    printk("Error Code: 0x%lx", error_code);
-    printk("eip: \t 0x%lx", regs->eip);
+    printk("Address: 0x%p", address);
+    printk("Error Code: 0x%p", error_code);
+    printk("eip: \t 0x%p", regs->eip);
     do_exit();
 }
 
 void do_general_protection(struct pt_regs * regs, long error_code)
 {
-
-  HYPERVISOR_shared_info->events_mask = 0;
-  printk("GPF\n");
-  printk("Error Code: 0x%lx", error_code);
-  dump_regs(regs);
-  dump_code(regs->eip);
-  do_exit();
+    printk("GPF\n");
+    printk("Error Code: 0x%p", error_code);
+    dump_regs(regs);
+    dump_code(regs->eip);
+    do_exit();
 }
 
 
@@ -180,26 +178,29 @@ void do_spurious_interrupt_bug(struct pt_regs * regs,
  * The 'privilege ring' field specifies the least-privileged ring that
  * can trap to that vector using a software-interrupt instruction (INT).
  */
+#ifdef __x86_64__
+#define _P 0,
+#endif
 static trap_info_t trap_table[] = {
-    {  0, 0, __KERNEL_CS, (unsigned long)divide_error                },
-    {  1, 0, __KERNEL_CS, (unsigned long)debug                       },
-    {  3, 3, __KERNEL_CS, (unsigned long)int3                        },
-    {  4, 3, __KERNEL_CS, (unsigned long)overflow                    },
-    {  5, 3, __KERNEL_CS, (unsigned long)bounds                      },
-    {  6, 0, __KERNEL_CS, (unsigned long)invalid_op                  },
-    {  7, 0, __KERNEL_CS, (unsigned long)device_not_available        },
-    {  8, 0, __KERNEL_CS, (unsigned long)double_fault                },
-    {  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
-    { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS                 },
-    { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present         },
-    { 12, 0, __KERNEL_CS, (unsigned long)stack_segment               },
-    { 13, 0, __KERNEL_CS, (unsigned long)general_protection          },
-    { 14, 0, __KERNEL_CS, (unsigned long)page_fault                  },
-    { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug      },
-    { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error           },
-    { 17, 0, __KERNEL_CS, (unsigned long)alignment_check             },
-    { 18, 0, __KERNEL_CS, (unsigned long)machine_check               },
-    { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error      },
+    {  0, 0, __KERNEL_CS, _P (unsigned long)divide_error                },
+    {  1, 0, __KERNEL_CS, _P (unsigned long)debug                       },
+    {  3, 3, __KERNEL_CS, _P (unsigned long)int3                        },
+    {  4, 3, __KERNEL_CS, _P (unsigned long)overflow                    },
+    {  5, 3, __KERNEL_CS, _P (unsigned long)bounds                      },
+    {  6, 0, __KERNEL_CS, _P (unsigned long)invalid_op                  },
+    {  7, 0, __KERNEL_CS, _P (unsigned long)device_not_available        },
+    {  8, 0, __KERNEL_CS, _P (unsigned long)double_fault                },
+    {  9, 0, __KERNEL_CS, _P (unsigned long)coprocessor_segment_overrun },
+    { 10, 0, __KERNEL_CS, _P (unsigned long)invalid_TSS                 },
+    { 11, 0, __KERNEL_CS, _P (unsigned long)segment_not_present         },
+    { 12, 0, __KERNEL_CS, _P (unsigned long)stack_segment               },
+    { 13, 0, __KERNEL_CS, _P (unsigned long)general_protection          },
+    { 14, 0, __KERNEL_CS, _P (unsigned long)page_fault                  },
+    { 15, 0, __KERNEL_CS, _P (unsigned long)spurious_interrupt_bug      },
+    { 16, 0, __KERNEL_CS, _P (unsigned long)coprocessor_error           },
+    { 17, 0, __KERNEL_CS, _P (unsigned long)alignment_check             },
+    { 18, 0, __KERNEL_CS, _P (unsigned long)machine_check               },
+    { 19, 0, __KERNEL_CS, _P (unsigned long)simd_coprocessor_error      },
     {  0, 0,           0, 0                           }
 };
     
diff --git a/x86_32.S b/x86_32.S
new file mode 100644 (file)
index 0000000..d193b12
--- /dev/null
+++ b/x86_32.S
@@ -0,0 +1,285 @@
+#include <os.h>
+
+.section __xen_guest
+        .asciz  "XEN_VER=2.0,LOADER=generic,PT_MODE_WRITABLE"
+.text
+
+.globl _start, shared_info
+                        
+_start:
+        cld
+        lss stack_start,%esp
+        push %esi 
+        call start_kernel
+
+stack_start:
+       .long stack+8192, __KERNEL_SS
+
+        /* Unpleasant -- the PTE that maps this page is actually overwritten */
+        /* to map the real shared-info page! :-)                             */
+        .org 0x1000
+shared_info:
+        .org 0x2000
+        
+ES             = 0x20
+ORIG_EAX       = 0x24
+EIP            = 0x28
+CS             = 0x2C
+
+#define ENTRY(X) .globl X ; X :
+
+#define SAVE_ALL \
+       cld; \
+       pushl %es; \
+       pushl %ds; \
+       pushl %eax; \
+       pushl %ebp; \
+       pushl %edi; \
+       pushl %esi; \
+       pushl %edx; \
+       pushl %ecx; \
+       pushl %ebx; \
+       movl $(__KERNEL_DS),%edx; \
+       movl %edx,%ds; \
+       movl %edx,%es;
+
+#define RESTORE_ALL    \
+       popl %ebx;      \
+       popl %ecx;      \
+       popl %edx;      \
+       popl %esi;      \
+       popl %edi;      \
+       popl %ebp;      \
+       popl %eax;      \
+       popl %ds;       \
+       popl %es;       \
+       addl $4,%esp;   \
+       iret;           \
+
+ENTRY(divide_error)
+       pushl $0                # no error code
+       pushl $do_divide_error
+do_exception:
+       pushl %ds
+       pushl %eax
+       xorl %eax,%eax
+       pushl %ebp
+       pushl %edi
+       pushl %esi
+       pushl %edx
+       decl %eax                       # eax = -1
+       pushl %ecx
+       pushl %ebx
+       cld
+       movl %es,%ecx
+       movl ORIG_EAX(%esp), %esi       # get the error code
+       movl ES(%esp), %edi             # get the function address
+       movl %eax, ORIG_EAX(%esp)
+       movl %ecx, ES(%esp)
+       movl %esp,%edx
+       pushl %esi                      # push the error code
+       pushl %edx                      # push the pt_regs pointer
+       movl $(__KERNEL_DS),%edx
+       movl %edx,%ds
+       movl %edx,%es
+       call *%edi
+       addl $8,%esp
+
+        
+ret_from_exception:
+        movb CS(%esp),%cl
+       test $2,%cl          # slow return to ring 2 or 3
+       jne  safesti
+        RESTORE_ALL
+
+# A note on the "critical region" in our callback handler.
+# We want to avoid stacking callback handlers due to events occurring
+# during handling of the last event. To do this, we keep events disabled
+# until weve done all processing. HOWEVER, we must enable events before
+# popping the stack frame (cant be done atomically) and so it would still
+# be possible to get enough handler activations to overflow the stack.
+# Although unlikely, bugs of that kind are hard to track down, so wed
+# like to avoid the possibility.
+# So, on entry to the handler we detect whether we interrupted an
+# existing activation in its critical region -- if so, we pop the current
+# activation and restart the handler using the previous one.
+ENTRY(hypervisor_callback)
+        pushl %eax
+        SAVE_ALL
+        movl EIP(%esp),%eax
+        cmpl $scrit,%eax
+        jb   11f
+        cmpl $ecrit,%eax
+        jb   critical_region_fixup
+11:     push %esp
+#        call do_hypervisor_callback
+        add  $4,%esp
+        movl HYPERVISOR_shared_info,%esi
+        xorl %eax,%eax
+        movb CS(%esp),%cl
+       test $2,%cl          # slow return to ring 2 or 3
+        jne  safesti
+safesti:btsl $31,4(%esi)     # reenable event callbacks
+scrit:  /**** START OF CRITICAL REGION ****/
+        cmpl %eax,(%esi)
+        jne  14f              # process more events if necessary...
+        RESTORE_ALL
+14:     btrl %eax,4(%esi)
+        jmp  11b
+ecrit:  /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame. 
+critical_region_fixup:
+        addl $critical_fixup_table-scrit,%eax
+        movzbl (%eax),%eax    # %eax contains num bytes popped
+        mov  %esp,%esi
+        add  %eax,%esi        # %esi points at end of src region
+        mov  %esp,%edi
+        add  $0x34,%edi       # %edi points at end of dst region
+        mov  %eax,%ecx
+        shr  $2,%ecx          # convert words to bytes
+        je   16f              # skip loop if nothing to copy
+15:     subl $4,%esi          # pre-decrementing copy loop
+        subl $4,%edi
+        movl (%esi),%eax
+        movl %eax,(%edi)
+        loop 15b
+16:     movl %edi,%esp        # final %edi is top of merged stack
+        jmp  11b
+         
+critical_fixup_table:        
+        .byte 0x00,0x00                       # cmpl %eax,(%esi)
+        .byte 0x00,0x00                       # jne  14f
+        .byte 0x00                            # pop  %ebx
+        .byte 0x04                            # pop  %ecx
+        .byte 0x08                            # pop  %edx
+        .byte 0x0c                            # pop  %esi
+        .byte 0x10                            # pop  %edi
+        .byte 0x14                            # pop  %ebp
+        .byte 0x18                            # pop  %eax
+        .byte 0x1c                            # pop  %ds
+        .byte 0x20                            # pop  %es
+        .byte 0x24,0x24,0x24                  # add  $4,%esp
+        .byte 0x28                            # iret
+        .byte 0x00,0x00,0x00,0x00,0x00        # btrl $31,4(%esi)
+        .byte 0x00,0x00                       # jmp  11b
+       
+# Hypervisor uses this for application faults while it executes.
+ENTRY(failsafe_callback)
+      pop  %ds
+      pop  %es
+      pop  %fs
+      pop  %gs
+      iret
+                
+ENTRY(coprocessor_error)
+       pushl $0
+       pushl $do_coprocessor_error
+       jmp do_exception
+
+ENTRY(simd_coprocessor_error)
+       pushl $0
+       pushl $do_simd_coprocessor_error
+       jmp do_exception
+
+ENTRY(device_not_available)
+        iret
+
+ENTRY(debug)
+       pushl $0
+       pushl $do_debug
+       jmp do_exception
+
+ENTRY(int3)
+       pushl $0
+       pushl $do_int3
+       jmp do_exception
+
+ENTRY(overflow)
+       pushl $0
+       pushl $do_overflow
+       jmp do_exception
+
+ENTRY(bounds)
+       pushl $0
+       pushl $do_bounds
+       jmp do_exception
+
+ENTRY(invalid_op)
+       pushl $0
+       pushl $do_invalid_op
+       jmp do_exception
+
+ENTRY(coprocessor_segment_overrun)
+       pushl $0
+       pushl $do_coprocessor_segment_overrun
+       jmp do_exception
+
+ENTRY(double_fault)
+       pushl $do_double_fault
+       jmp do_exception
+
+ENTRY(invalid_TSS)
+       pushl $do_invalid_TSS
+       jmp do_exception
+
+ENTRY(segment_not_present)
+       pushl $do_segment_not_present
+       jmp do_exception
+
+ENTRY(stack_segment)
+       pushl $do_stack_segment
+       jmp do_exception
+
+ENTRY(general_protection)
+       pushl $do_general_protection
+       jmp do_exception
+
+ENTRY(alignment_check)
+       pushl $do_alignment_check
+       jmp do_exception
+
+# This handler is special, because it gets an extra value on its stack,
+# which is the linear faulting address.
+ENTRY(page_fault)
+       pushl %ds
+       pushl %eax
+       xorl %eax,%eax
+       pushl %ebp
+       pushl %edi
+       pushl %esi
+       pushl %edx
+       decl %eax                       # eax = -1
+       pushl %ecx
+       pushl %ebx
+       cld
+       movl %es,%ecx
+       movl ORIG_EAX(%esp), %esi       # get the error code
+       movl ES(%esp), %edi             # get the faulting address
+       movl %eax, ORIG_EAX(%esp)
+       movl %ecx, ES(%esp)
+       movl %esp,%edx
+        pushl %edi                      # push the faulting address
+       pushl %esi                      # push the error code
+       pushl %edx                      # push the pt_regs pointer
+       movl $(__KERNEL_DS),%edx
+       movl %edx,%ds
+       movl %edx,%es
+       call do_page_fault
+       addl $12,%esp
+       jmp ret_from_exception
+
+ENTRY(machine_check)
+       pushl $0
+       pushl $do_machine_check
+       jmp do_exception
+
+ENTRY(spurious_interrupt_bug)
+       pushl $0
+       pushl $do_spurious_interrupt_bug
+       jmp do_exception
diff --git a/x86_64.S b/x86_64.S
new file mode 100644 (file)
index 0000000..4a4708d
--- /dev/null
+++ b/x86_64.S
@@ -0,0 +1,78 @@
+#include <os.h>
+
+.section __xen_guest
+        .asciz  "XEN_VER=2.0,LOADER=generic,PT_MODE_WRITABLE"
+.text
+
+#define ENTRY(X) .globl X ; X :
+.globl _start, shared_info
+                        
+_start:
+        cld
+        movq stack_start(%rip),%rsp
+        movq %rsi,%rdi
+        call start_kernel
+
+stack_start:
+        .quad stack+8192
+
+        /* Unpleasant -- the PTE that maps this page is actually overwritten */
+        /* to map the real shared-info page! :-)                             */
+        .org 0x1000
+shared_info:
+        .org 0x2000
+
+ENTRY(hypervisor_callback)
+
+ENTRY(failsafe_callback)
+      iret
+                
+ENTRY(divide_error)
+       pushq $0
+
+ENTRY(coprocessor_error)
+       pushq $0
+
+ENTRY(simd_coprocessor_error)
+       pushq $0
+
+ENTRY(device_not_available)
+        iret
+
+ENTRY(debug)
+       pushq $0
+
+ENTRY(int3)
+       pushq $0
+
+ENTRY(overflow)
+       pushq $0
+
+ENTRY(bounds)
+       pushq $0
+
+ENTRY(invalid_op)
+       pushq $0
+
+ENTRY(coprocessor_segment_overrun)
+       pushq $0
+
+ENTRY(double_fault)
+
+ENTRY(invalid_TSS)
+
+ENTRY(segment_not_present)
+
+ENTRY(stack_segment)
+
+ENTRY(general_protection)
+
+ENTRY(alignment_check)
+
+ENTRY(page_fault)
+
+ENTRY(machine_check)
+       pushq $0
+
+ENTRY(spurious_interrupt_bug)
+       pushq $0