ia64/xen-unstable

changeset 11073:ec4979587156

[MINIOS] A first step to re-architecture mini-os for a port to ia64.
Create architecture specific directories below mini-os for sources
and below mini-os/include for headers.

Signed-off-by: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
author kfraser@localhost.localdomain
date Thu Aug 10 10:43:20 2006 +0100 (2006-08-10)
parents 485baffca4fd
children 713b0878da2f
files extras/mini-os/Makefile extras/mini-os/arch/x86/traps.c extras/mini-os/include/hypercall-x86_32.h extras/mini-os/include/hypercall-x86_64.h extras/mini-os/include/hypervisor.h extras/mini-os/include/os.h extras/mini-os/include/spinlock.h extras/mini-os/include/traps.h extras/mini-os/include/types.h extras/mini-os/include/x86/os.h extras/mini-os/include/x86/spinlock.h extras/mini-os/include/x86/traps.h extras/mini-os/include/x86/x86_32/hypercall-x86_32.h extras/mini-os/include/x86/x86_64/hypercall-x86_64.h extras/mini-os/traps.c
line diff
     1.1 --- a/extras/mini-os/Makefile	Thu Aug 10 10:39:58 2006 +0100
     1.2 +++ b/extras/mini-os/Makefile	Thu Aug 10 10:43:20 2006 +0100
     1.3 @@ -11,26 +11,54 @@ override TARGET_ARCH     := $(XEN_TARGET
     1.4  CFLAGS := -fno-builtin -Wall -Werror -Wredundant-decls -Wno-format
     1.5  CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline
     1.6  
     1.7 -override CPPFLAGS := -Iinclude $(CPPFLAGS)
     1.8  ASFLAGS = -D__ASSEMBLY__
     1.9  
    1.10  LDLIBS =  -L. -lminios
    1.11  LDFLAGS := -N -T minios-$(TARGET_ARCH).lds
    1.12  
    1.13 +# For possible special source directories.
    1.14 +EXTRA_SRC =
    1.15 +# For possible special header directories.
    1.16 +EXTRA_INC =
    1.17 +
    1.18 +# Standard name for architecture specific subdirectories.
    1.19 +TARGET_ARCH_DIR = $(TARGET_ARCH)
    1.20 +# This is used for architecture specific links.
    1.21 +ARCH_LINKS =
    1.22 +
    1.23  ifeq ($(TARGET_ARCH),x86_32)
    1.24  CFLAGS += -m32 -march=i686
    1.25  LDFLAGS += -m elf_i386
    1.26 +TARGET_ARCH_DIR = x86
    1.27 +EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
    1.28 +EXTRA_SRC += arch/$(EXTRA_INC)
    1.29  endif
    1.30  
    1.31  ifeq ($(TARGET_ARCH)$(pae),x86_32y)
    1.32  CFLAGS  += -DCONFIG_X86_PAE=1
    1.33  ASFLAGS += -DCONFIG_X86_PAE=1
    1.34 +TARGET_ARCH_DIR = x86
    1.35 +EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
    1.36 +EXTRA_SRC += arch/$(EXTRA_INC)
    1.37  endif
    1.38  
    1.39  ifeq ($(TARGET_ARCH),x86_64)
    1.40  CFLAGS += -m64 -mno-red-zone -fpic -fno-reorder-blocks
    1.41  CFLAGS += -fno-asynchronous-unwind-tables
    1.42  LDFLAGS += -m elf_x86_64
    1.43 +TARGET_ARCH_DIR = x86
    1.44 +EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
    1.45 +EXTRA_SRC += arch/$(EXTRA_INC)
    1.46 +endif
    1.47 +
    1.48 +ifeq ($(TARGET_ARCH),ia64)
    1.49 +CFLAGS += -mfixed-range=f12-f15,f32-f127
    1.50 +ASFLAGS += -x assembler-with-cpp -ansi -Wall
    1.51 +ASFLAGS += -mfixed-range=f12-f15,f32-f127
    1.52 +ARCH_LINKS = IA64_LINKS		# Special link on ia64 needed
    1.53 +define arch_links
    1.54 +[ -e include/ia64/asm-xsi-offsets.h ] || ln -sf ../../../../xen/include/asm-ia64/asm-xsi-offsets.h include/ia64/asm-xsi-offsets.h
    1.55 +endef
    1.56  endif
    1.57  
    1.58  ifeq ($(debug),y)
    1.59 @@ -39,6 +67,10 @@ else
    1.60  CFLAGS += -O3
    1.61  endif
    1.62  
    1.63 +# Add the special header directories to the include paths.
    1.64 +extra_incl := $(foreach dir,$(EXTRA_INC),-Iinclude/$(dir))
    1.65 +override CPPFLAGS := -Iinclude $(CPPFLAGS) -Iinclude/$(TARGET_ARCH_DIR)	$(extra_incl)
    1.66 +
    1.67  TARGET := mini-os
    1.68  
    1.69  HEAD := $(TARGET_ARCH).o
    1.70 @@ -46,15 +78,32 @@ OBJS := $(patsubst %.c,%.o,$(wildcard *.
    1.71  OBJS += $(patsubst %.c,%.o,$(wildcard lib/*.c))
    1.72  OBJS += $(patsubst %.c,%.o,$(wildcard xenbus/*.c))
    1.73  OBJS += $(patsubst %.c,%.o,$(wildcard console/*.c))
    1.74 +OBJS += $(patsubst %.S,%.o,$(wildcard arch/$(TARGET_ARCH_DIR)/*.S))
    1.75 +OBJS += $(patsubst %.c,%.o,$(wildcard arch/$(TARGET_ARCH_DIR)/*.c))
    1.76 +# For special wanted source directories.
    1.77 +extra_objs := $(foreach dir,$(EXTRA_SRC),$(patsubst %.c,%.o,$(wildcard $(dir)/*.c)))
    1.78 +OBJS += $(extra_objs)
    1.79 +extra_objs := $(foreach dir,$(EXTRA_SRC),$(patsubst %.S,%.o,$(wildcard $(dir)/*.S)))
    1.80 +OBJS += $(extra_objs)
    1.81  
    1.82  HDRS := $(wildcard include/*.h)
    1.83  HDRS += $(wildcard include/xen/*.h)
    1.84 +HDRS += $(wildcard include/$(TARGET_ARCH_DIR)/*.h)
    1.85 +# For special wanted header directories.
    1.86 +extra_heads := $(foreach dir,$(EXTRA_INC),$(wildcard $(dir)/*.h))
    1.87 +HDRS += $(extra_heads)
    1.88  
    1.89  .PHONY: default
    1.90  default: $(TARGET)
    1.91  
    1.92 +# Create special architecture specific links.
    1.93 +ifneq ($(ARCH_LINKS),)
    1.94 +$(ARCH_LINKS):
    1.95 +	$(arch_links)
    1.96 +endif
    1.97 +
    1.98  .PHONY: links
    1.99 -links:
   1.100 +links:	$(ARCH_LINKS)
   1.101  	[ -e include/xen ] || ln -sf ../../../xen/include/public include/xen
   1.102  
   1.103  libminios.a: links $(OBJS) $(HEAD)
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/extras/mini-os/arch/x86/traps.c	Thu Aug 10 10:43:20 2006 +0100
     2.3 @@ -0,0 +1,229 @@
     2.4 +
     2.5 +#include <os.h>
     2.6 +#include <traps.h>
     2.7 +#include <hypervisor.h>
     2.8 +#include <mm.h>
     2.9 +#include <lib.h>
    2.10 +#include <sched.h>
    2.11 +
    2.12 +/*
    2.13 + * These are assembler stubs in entry.S.
    2.14 + * They are the actual entry points for virtual exceptions.
    2.15 + */
    2.16 +void divide_error(void);
    2.17 +void debug(void);
    2.18 +void int3(void);
    2.19 +void overflow(void);
    2.20 +void bounds(void);
    2.21 +void invalid_op(void);
    2.22 +void device_not_available(void);
    2.23 +void coprocessor_segment_overrun(void);
    2.24 +void invalid_TSS(void);
    2.25 +void segment_not_present(void);
    2.26 +void stack_segment(void);
    2.27 +void general_protection(void);
    2.28 +void page_fault(void);
    2.29 +void coprocessor_error(void);
    2.30 +void simd_coprocessor_error(void);
    2.31 +void alignment_check(void);
    2.32 +void spurious_interrupt_bug(void);
    2.33 +void machine_check(void);
    2.34 +
    2.35 +
    2.36 +void dump_regs(struct pt_regs *regs)
    2.37 +{
    2.38 +    printk("Thread: %s\n", current->name);
    2.39 +#ifdef __i386__    
    2.40 +    printk("EIP: %x, EFLAGS %x.\n", regs->eip, regs->eflags);
    2.41 +    printk("EBX: %08x ECX: %08x EDX: %08x\n",
    2.42 +	   regs->ebx, regs->ecx, regs->edx);
    2.43 +    printk("ESI: %08x EDI: %08x EBP: %08x EAX: %08x\n",
    2.44 +	   regs->esi, regs->edi, regs->ebp, regs->eax);
    2.45 +    printk("DS: %04x ES: %04x orig_eax: %08x, eip: %08x\n",
    2.46 +	   regs->xds, regs->xes, regs->orig_eax, regs->eip);
    2.47 +    printk("CS: %04x EFLAGS: %08x esp: %08x ss: %04x\n",
    2.48 +	   regs->xcs, regs->eflags, regs->esp, regs->xss);
    2.49 +#else
    2.50 +    printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
    2.51 +    printk("\nRSP: %04lx:%016lx  EFLAGS: %08lx\n", 
    2.52 +           regs->ss, regs->rsp, regs->eflags);
    2.53 +    printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
    2.54 +           regs->rax, regs->rbx, regs->rcx);
    2.55 +    printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
    2.56 +           regs->rdx, regs->rsi, regs->rdi); 
    2.57 +    printk("RBP: %016lx R08: %016lx R09: %016lx\n",
    2.58 +           regs->rbp, regs->r8, regs->r9); 
    2.59 +    printk("R10: %016lx R11: %016lx R12: %016lx\n",
    2.60 +           regs->r10, regs->r11, regs->r12); 
    2.61 +    printk("R13: %016lx R14: %016lx R15: %016lx\n",
    2.62 +           regs->r13, regs->r14, regs->r15); 
    2.63 +#endif
    2.64 +}
    2.65 +
    2.66 +static void do_trap(int trapnr, char *str, struct pt_regs * regs, unsigned long error_code)
    2.67 +{
    2.68 +    printk("FATAL:  Unhandled Trap %d (%s), error code=0x%lx\n", trapnr, str, error_code);
    2.69 +    printk("Regs address %p\n", regs);
    2.70 +    dump_regs(regs);
    2.71 +    do_exit();
    2.72 +}
    2.73 +
    2.74 +#define DO_ERROR(trapnr, str, name) \
    2.75 +void do_##name(struct pt_regs * regs, unsigned long error_code) \
    2.76 +{ \
    2.77 +	do_trap(trapnr, str, regs, error_code); \
    2.78 +}
    2.79 +
    2.80 +#define DO_ERROR_INFO(trapnr, str, name, sicode, siaddr) \
    2.81 +void do_##name(struct pt_regs * regs, unsigned long error_code) \
    2.82 +{ \
    2.83 +	do_trap(trapnr, str, regs, error_code); \
    2.84 +}
    2.85 +
    2.86 +DO_ERROR_INFO( 0, "divide error", divide_error, FPE_INTDIV, regs->eip)
    2.87 +DO_ERROR( 3, "int3", int3)
    2.88 +DO_ERROR( 4, "overflow", overflow)
    2.89 +DO_ERROR( 5, "bounds", bounds)
    2.90 +DO_ERROR_INFO( 6, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
    2.91 +DO_ERROR( 7, "device not available", device_not_available)
    2.92 +DO_ERROR( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
    2.93 +DO_ERROR(10, "invalid TSS", invalid_TSS)
    2.94 +DO_ERROR(11, "segment not present", segment_not_present)
    2.95 +DO_ERROR(12, "stack segment", stack_segment)
    2.96 +DO_ERROR_INFO(17, "alignment check", alignment_check, BUS_ADRALN, 0)
    2.97 +DO_ERROR(18, "machine check", machine_check)
    2.98 +
    2.99 +void page_walk(unsigned long virt_address)
   2.100 +{
   2.101 +        pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
   2.102 +        unsigned long addr = virt_address;
   2.103 +        printk("Pagetable walk from virt %lx, base %lx:\n", virt_address, start_info.pt_base);
   2.104 +    
   2.105 +#if defined(__x86_64__)
   2.106 +        page = tab[l4_table_offset(addr)];
   2.107 +        tab = pte_to_virt(page);
   2.108 +        printk(" L4 = %"PRIpte" (%p)  [offset = %lx]\n", page, tab, l4_table_offset(addr));
   2.109 +#endif
   2.110 +#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
   2.111 +        page = tab[l3_table_offset(addr)];
   2.112 +        tab = pte_to_virt(page);
   2.113 +        printk("  L3 = %"PRIpte" (%p)  [offset = %lx]\n", page, tab, l3_table_offset(addr));
   2.114 +#endif
   2.115 +        page = tab[l2_table_offset(addr)];
   2.116 +        tab = pte_to_virt(page);
   2.117 +        printk("   L2 = %"PRIpte" (%p)  [offset = %lx]\n", page, tab, l2_table_offset(addr));
   2.118 +        
   2.119 +        page = tab[l1_table_offset(addr)];
   2.120 +        printk("    L1 = %"PRIpte" (%p)  [offset = %lx]\n", page, tab, l1_table_offset(addr));
   2.121 +
   2.122 +}
   2.123 +
   2.124 +#define read_cr2() \
   2.125 +        (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
   2.126 +
   2.127 +static int handling_pg_fault = 0;
   2.128 +
   2.129 +void do_page_fault(struct pt_regs *regs, unsigned long error_code)
   2.130 +{
   2.131 +    unsigned long addr = read_cr2();
   2.132 +    /* If we are already handling a page fault, and got another one
   2.133 +       that means we faulted in pagetable walk. Continuing here would cause
   2.134 +       a recursive fault */       
   2.135 +    if(handling_pg_fault) 
   2.136 +    {
   2.137 +        printk("Page fault in pagetable walk (access to invalid memory?).\n"); 
   2.138 +        do_exit();
   2.139 +    }
   2.140 +    handling_pg_fault = 1;
   2.141 +
   2.142 +#if defined(__x86_64__)
   2.143 +    printk("Page fault at linear address %p, rip %p, code %lx\n",
   2.144 +           addr, regs->rip, error_code);
   2.145 +#else
   2.146 +    printk("Page fault at linear address %p, eip %p, code %lx\n",
   2.147 +           addr, regs->eip, error_code);
   2.148 +#endif
   2.149 +
   2.150 +    dump_regs(regs);
   2.151 +    page_walk(addr);
   2.152 +    do_exit();
   2.153 +    /* We should never get here ... but still */
   2.154 +    handling_pg_fault = 0;
   2.155 +}
   2.156 +
   2.157 +void do_general_protection(struct pt_regs *regs, long error_code)
   2.158 +{
   2.159 +#ifdef __i386__
   2.160 +    printk("GPF eip: %p, error_code=%lx\n", regs->eip, error_code);
   2.161 +#else    
   2.162 +    printk("GPF rip: %p, error_code=%lx\n", regs->rip, error_code);
   2.163 +#endif
   2.164 +    dump_regs(regs);
   2.165 +    do_exit();
   2.166 +}
   2.167 +
   2.168 +
   2.169 +void do_debug(struct pt_regs * regs)
   2.170 +{
   2.171 +    printk("Debug exception\n");
   2.172 +#define TF_MASK 0x100
   2.173 +    regs->eflags &= ~TF_MASK;
   2.174 +    dump_regs(regs);
   2.175 +    do_exit();
   2.176 +}
   2.177 +
   2.178 +void do_coprocessor_error(struct pt_regs * regs)
   2.179 +{
   2.180 +    printk("Copro error\n");
   2.181 +    dump_regs(regs);
   2.182 +    do_exit();
   2.183 +}
   2.184 +
   2.185 +void simd_math_error(void *eip)
   2.186 +{
   2.187 +    printk("SIMD error\n");
   2.188 +}
   2.189 +
   2.190 +void do_simd_coprocessor_error(struct pt_regs * regs)
   2.191 +{
   2.192 +    printk("SIMD copro error\n");
   2.193 +}
   2.194 +
   2.195 +void do_spurious_interrupt_bug(struct pt_regs * regs)
   2.196 +{
   2.197 +}
   2.198 +
   2.199 +/*
   2.200 + * Submit a virtual IDT to teh hypervisor. This consists of tuples
   2.201 + * (interrupt vector, privilege ring, CS:EIP of handler).
   2.202 + * The 'privilege ring' field specifies the least-privileged ring that
   2.203 + * can trap to that vector using a software-interrupt instruction (INT).
   2.204 + */
   2.205 +static trap_info_t trap_table[] = {
   2.206 +    {  0, 0, __KERNEL_CS, (unsigned long)divide_error                },
   2.207 +    {  1, 0, __KERNEL_CS, (unsigned long)debug                       },
   2.208 +    {  3, 3, __KERNEL_CS, (unsigned long)int3                        },
   2.209 +    {  4, 3, __KERNEL_CS, (unsigned long)overflow                    },
   2.210 +    {  5, 3, __KERNEL_CS, (unsigned long)bounds                      },
   2.211 +    {  6, 0, __KERNEL_CS, (unsigned long)invalid_op                  },
   2.212 +    {  7, 0, __KERNEL_CS, (unsigned long)device_not_available        },
   2.213 +    {  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
   2.214 +    { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS                 },
   2.215 +    { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present         },
   2.216 +    { 12, 0, __KERNEL_CS, (unsigned long)stack_segment               },
   2.217 +    { 13, 0, __KERNEL_CS, (unsigned long)general_protection          },
   2.218 +    { 14, 0, __KERNEL_CS, (unsigned long)page_fault                  },
   2.219 +    { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug      },
   2.220 +    { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error           },
   2.221 +    { 17, 0, __KERNEL_CS, (unsigned long)alignment_check             },
   2.222 +    { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error      },
   2.223 +    {  0, 0,           0, 0                           }
   2.224 +};
   2.225 +    
   2.226 +
   2.227 +
   2.228 +void trap_init(void)
   2.229 +{
   2.230 +    HYPERVISOR_set_trap_table(trap_table);    
   2.231 +}
   2.232 +
     3.1 --- a/extras/mini-os/include/hypercall-x86_32.h	Thu Aug 10 10:39:58 2006 +0100
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,326 +0,0 @@
     3.4 -/******************************************************************************
     3.5 - * hypercall-x86_32.h
     3.6 - * 
     3.7 - * Copied from XenLinux.
     3.8 - * 
     3.9 - * Copyright (c) 2002-2004, K A Fraser
    3.10 - * 
    3.11 - * This file may be distributed separately from the Linux kernel, or
    3.12 - * incorporated into other software packages, subject to the following license:
    3.13 - * 
    3.14 - * Permission is hereby granted, free of charge, to any person obtaining a copy
    3.15 - * of this source file (the "Software"), to deal in the Software without
    3.16 - * restriction, including without limitation the rights to use, copy, modify,
    3.17 - * merge, publish, distribute, sublicense, and/or sell copies of the Software,
    3.18 - * and to permit persons to whom the Software is furnished to do so, subject to
    3.19 - * the following conditions:
    3.20 - * 
    3.21 - * The above copyright notice and this permission notice shall be included in
    3.22 - * all copies or substantial portions of the Software.
    3.23 - * 
    3.24 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    3.25 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    3.26 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    3.27 - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    3.28 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
    3.29 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
    3.30 - * IN THE SOFTWARE.
    3.31 - */
    3.32 -
    3.33 -#ifndef __HYPERCALL_X86_32_H__
    3.34 -#define __HYPERCALL_X86_32_H__
    3.35 -
    3.36 -#include <xen/xen.h>
    3.37 -#include <xen/sched.h>
    3.38 -#include <xen/nmi.h>
    3.39 -#include <mm.h>
    3.40 -
    3.41 -#define __STR(x) #x
    3.42 -#define STR(x) __STR(x)
    3.43 -
    3.44 -extern char hypercall_page[PAGE_SIZE];
    3.45 -
    3.46 -#define _hypercall0(type, name)			\
    3.47 -({						\
    3.48 -	long __res;				\
    3.49 -	asm volatile (				\
    3.50 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
    3.51 -		: "=a" (__res)			\
    3.52 -		:				\
    3.53 -		: "memory" );			\
    3.54 -	(type)__res;				\
    3.55 -})
    3.56 -
    3.57 -#define _hypercall1(type, name, a1)				\
    3.58 -({								\
    3.59 -	long __res, __ign1;					\
    3.60 -	asm volatile (						\
    3.61 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
    3.62 -		: "=a" (__res), "=b" (__ign1)			\
    3.63 -		: "1" ((long)(a1))				\
    3.64 -		: "memory" );					\
    3.65 -	(type)__res;						\
    3.66 -})
    3.67 -
    3.68 -#define _hypercall2(type, name, a1, a2)				\
    3.69 -({								\
    3.70 -	long __res, __ign1, __ign2;				\
    3.71 -	asm volatile (						\
    3.72 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
    3.73 -		: "=a" (__res), "=b" (__ign1), "=c" (__ign2)	\
    3.74 -		: "1" ((long)(a1)), "2" ((long)(a2))		\
    3.75 -		: "memory" );					\
    3.76 -	(type)__res;						\
    3.77 -})
    3.78 -
    3.79 -#define _hypercall3(type, name, a1, a2, a3)			\
    3.80 -({								\
    3.81 -	long __res, __ign1, __ign2, __ign3;			\
    3.82 -	asm volatile (						\
    3.83 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
    3.84 -		: "=a" (__res), "=b" (__ign1), "=c" (__ign2), 	\
    3.85 -		"=d" (__ign3)					\
    3.86 -		: "1" ((long)(a1)), "2" ((long)(a2)),		\
    3.87 -		"3" ((long)(a3))				\
    3.88 -		: "memory" );					\
    3.89 -	(type)__res;						\
    3.90 -})
    3.91 -
    3.92 -#define _hypercall4(type, name, a1, a2, a3, a4)			\
    3.93 -({								\
    3.94 -	long __res, __ign1, __ign2, __ign3, __ign4;		\
    3.95 -	asm volatile (						\
    3.96 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
    3.97 -		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
    3.98 -		"=d" (__ign3), "=S" (__ign4)			\
    3.99 -		: "1" ((long)(a1)), "2" ((long)(a2)),		\
   3.100 -		"3" ((long)(a3)), "4" ((long)(a4))		\
   3.101 -		: "memory" );					\
   3.102 -	(type)__res;						\
   3.103 -})
   3.104 -
   3.105 -#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
   3.106 -({								\
   3.107 -	long __res, __ign1, __ign2, __ign3, __ign4, __ign5;	\
   3.108 -	asm volatile (						\
   3.109 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   3.110 -		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
   3.111 -		"=d" (__ign3), "=S" (__ign4), "=D" (__ign5)	\
   3.112 -		: "1" ((long)(a1)), "2" ((long)(a2)),		\
   3.113 -		"3" ((long)(a3)), "4" ((long)(a4)),		\
   3.114 -		"5" ((long)(a5))				\
   3.115 -		: "memory" );					\
   3.116 -	(type)__res;						\
   3.117 -})
   3.118 -
   3.119 -static inline int
   3.120 -HYPERVISOR_set_trap_table(
   3.121 -	trap_info_t *table)
   3.122 -{
   3.123 -	return _hypercall1(int, set_trap_table, table);
   3.124 -}
   3.125 -
   3.126 -static inline int
   3.127 -HYPERVISOR_mmu_update(
   3.128 -	mmu_update_t *req, int count, int *success_count, domid_t domid)
   3.129 -{
   3.130 -	return _hypercall4(int, mmu_update, req, count, success_count, domid);
   3.131 -}
   3.132 -
   3.133 -static inline int
   3.134 -HYPERVISOR_mmuext_op(
   3.135 -	struct mmuext_op *op, int count, int *success_count, domid_t domid)
   3.136 -{
   3.137 -	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
   3.138 -}
   3.139 -
   3.140 -static inline int
   3.141 -HYPERVISOR_set_gdt(
   3.142 -	unsigned long *frame_list, int entries)
   3.143 -{
   3.144 -	return _hypercall2(int, set_gdt, frame_list, entries);
   3.145 -}
   3.146 -
   3.147 -static inline int
   3.148 -HYPERVISOR_stack_switch(
   3.149 -	unsigned long ss, unsigned long esp)
   3.150 -{
   3.151 -	return _hypercall2(int, stack_switch, ss, esp);
   3.152 -}
   3.153 -
   3.154 -static inline int
   3.155 -HYPERVISOR_set_callbacks(
   3.156 -	unsigned long event_selector, unsigned long event_address,
   3.157 -	unsigned long failsafe_selector, unsigned long failsafe_address)
   3.158 -{
   3.159 -	return _hypercall4(int, set_callbacks,
   3.160 -			   event_selector, event_address,
   3.161 -			   failsafe_selector, failsafe_address);
   3.162 -}
   3.163 -
   3.164 -static inline int
   3.165 -HYPERVISOR_fpu_taskswitch(
   3.166 -	int set)
   3.167 -{
   3.168 -	return _hypercall1(int, fpu_taskswitch, set);
   3.169 -}
   3.170 -
   3.171 -static inline int
   3.172 -HYPERVISOR_sched_op(
   3.173 -	int cmd, unsigned long arg)
   3.174 -{
   3.175 -	return _hypercall2(int, sched_op, cmd, arg);
   3.176 -}
   3.177 -
   3.178 -static inline long
   3.179 -HYPERVISOR_set_timer_op(
   3.180 -	u64 timeout)
   3.181 -{
   3.182 -	unsigned long timeout_hi = (unsigned long)(timeout>>32);
   3.183 -	unsigned long timeout_lo = (unsigned long)timeout;
   3.184 -	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
   3.185 -}
   3.186 -
   3.187 -static inline int
   3.188 -HYPERVISOR_dom0_op(
   3.189 -	dom0_op_t *dom0_op)
   3.190 -{
   3.191 -	dom0_op->interface_version = DOM0_INTERFACE_VERSION;
   3.192 -	return _hypercall1(int, dom0_op, dom0_op);
   3.193 -}
   3.194 -
   3.195 -static inline int
   3.196 -HYPERVISOR_set_debugreg(
   3.197 -	int reg, unsigned long value)
   3.198 -{
   3.199 -	return _hypercall2(int, set_debugreg, reg, value);
   3.200 -}
   3.201 -
   3.202 -static inline unsigned long
   3.203 -HYPERVISOR_get_debugreg(
   3.204 -	int reg)
   3.205 -{
   3.206 -	return _hypercall1(unsigned long, get_debugreg, reg);
   3.207 -}
   3.208 -
   3.209 -static inline int
   3.210 -HYPERVISOR_update_descriptor(
   3.211 -	u64 ma, u64 desc)
   3.212 -{
   3.213 -	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
   3.214 -}
   3.215 -
   3.216 -static inline int
   3.217 -HYPERVISOR_memory_op(
   3.218 -	unsigned int cmd, void *arg)
   3.219 -{
   3.220 -	return _hypercall2(int, memory_op, cmd, arg);
   3.221 -}
   3.222 -
   3.223 -static inline int
   3.224 -HYPERVISOR_multicall(
   3.225 -	void *call_list, int nr_calls)
   3.226 -{
   3.227 -	return _hypercall2(int, multicall, call_list, nr_calls);
   3.228 -}
   3.229 -
   3.230 -static inline int
   3.231 -HYPERVISOR_update_va_mapping(
   3.232 -	unsigned long va, pte_t new_val, unsigned long flags)
   3.233 -{
   3.234 -	unsigned long pte_hi = 0;
   3.235 -#ifdef CONFIG_X86_PAE
   3.236 -	pte_hi = new_val.pte_high;
   3.237 -#endif
   3.238 -	return _hypercall4(int, update_va_mapping, va,
   3.239 -			   new_val.pte_low, pte_hi, flags);
   3.240 -}
   3.241 -
   3.242 -static inline int
   3.243 -HYPERVISOR_event_channel_op(
   3.244 -	void *op)
   3.245 -{
   3.246 -	return _hypercall1(int, event_channel_op, op);
   3.247 -}
   3.248 -
   3.249 -static inline int
   3.250 -HYPERVISOR_xen_version(
   3.251 -	int cmd, void *arg)
   3.252 -{
   3.253 -	return _hypercall2(int, xen_version, cmd, arg);
   3.254 -}
   3.255 -
   3.256 -static inline int
   3.257 -HYPERVISOR_console_io(
   3.258 -	int cmd, int count, char *str)
   3.259 -{
   3.260 -	return _hypercall3(int, console_io, cmd, count, str);
   3.261 -}
   3.262 -
   3.263 -static inline int
   3.264 -HYPERVISOR_physdev_op(
   3.265 -	void *physdev_op)
   3.266 -{
   3.267 -	return _hypercall1(int, physdev_op, physdev_op);
   3.268 -}
   3.269 -
   3.270 -static inline int
   3.271 -HYPERVISOR_grant_table_op(
   3.272 -	unsigned int cmd, void *uop, unsigned int count)
   3.273 -{
   3.274 -	return _hypercall3(int, grant_table_op, cmd, uop, count);
   3.275 -}
   3.276 -
   3.277 -static inline int
   3.278 -HYPERVISOR_update_va_mapping_otherdomain(
   3.279 -	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
   3.280 -{
   3.281 -	unsigned long pte_hi = 0;
   3.282 -#ifdef CONFIG_X86_PAE
   3.283 -	pte_hi = new_val.pte_high;
   3.284 -#endif
   3.285 -	return _hypercall5(int, update_va_mapping_otherdomain, va,
   3.286 -			   new_val.pte_low, pte_hi, flags, domid);
   3.287 -}
   3.288 -
   3.289 -static inline int
   3.290 -HYPERVISOR_vm_assist(
   3.291 -	unsigned int cmd, unsigned int type)
   3.292 -{
   3.293 -	return _hypercall2(int, vm_assist, cmd, type);
   3.294 -}
   3.295 -
   3.296 -static inline int
   3.297 -HYPERVISOR_vcpu_op(
   3.298 -	int cmd, int vcpuid, void *extra_args)
   3.299 -{
   3.300 -	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
   3.301 -}
   3.302 -
   3.303 -static inline int
   3.304 -HYPERVISOR_suspend(
   3.305 -	unsigned long srec)
   3.306 -{
   3.307 -	return _hypercall3(int, sched_op, SCHEDOP_shutdown,
   3.308 -			   SHUTDOWN_suspend, srec);
   3.309 -}
   3.310 -
   3.311 -static inline int
   3.312 -HYPERVISOR_nmi_op(
   3.313 -	unsigned long op,
   3.314 -	unsigned long arg)
   3.315 -{
   3.316 -	return _hypercall2(int, nmi_op, op, arg);
   3.317 -}
   3.318 -
   3.319 -#endif /* __HYPERCALL_X86_32_H__ */
   3.320 -
   3.321 -/*
   3.322 - * Local variables:
   3.323 - *  c-file-style: "linux"
   3.324 - *  indent-tabs-mode: t
   3.325 - *  c-indent-level: 8
   3.326 - *  c-basic-offset: 8
   3.327 - *  tab-width: 8
   3.328 - * End:
   3.329 - */
     4.1 --- a/extras/mini-os/include/hypercall-x86_64.h	Thu Aug 10 10:39:58 2006 +0100
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,326 +0,0 @@
     4.4 -/******************************************************************************
     4.5 - * hypercall-x86_64.h
     4.6 - * 
     4.7 - * Copied from XenLinux.
     4.8 - * 
     4.9 - * Copyright (c) 2002-2004, K A Fraser
    4.10 - * 
    4.11 - * 64-bit updates:
    4.12 - *   Benjamin Liu <benjamin.liu@intel.com>
    4.13 - *   Jun Nakajima <jun.nakajima@intel.com>
    4.14 - * 
    4.15 - * This file may be distributed separately from the Linux kernel, or
    4.16 - * incorporated into other software packages, subject to the following license:
    4.17 - * 
    4.18 - * Permission is hereby granted, free of charge, to any person obtaining a copy
    4.19 - * of this source file (the "Software"), to deal in the Software without
    4.20 - * restriction, including without limitation the rights to use, copy, modify,
    4.21 - * merge, publish, distribute, sublicense, and/or sell copies of the Software,
    4.22 - * and to permit persons to whom the Software is furnished to do so, subject to
    4.23 - * the following conditions:
    4.24 - * 
    4.25 - * The above copyright notice and this permission notice shall be included in
    4.26 - * all copies or substantial portions of the Software.
    4.27 - * 
    4.28 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    4.29 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    4.30 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    4.31 - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    4.32 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
    4.33 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
    4.34 - * IN THE SOFTWARE.
    4.35 - */
    4.36 -
    4.37 -#ifndef __HYPERCALL_X86_64_H__
    4.38 -#define __HYPERCALL_X86_64_H__
    4.39 -
    4.40 -#include <xen/xen.h>
    4.41 -#include <xen/sched.h>
    4.42 -#include <mm.h>
    4.43 -
    4.44 -#define __STR(x) #x
    4.45 -#define STR(x) __STR(x)
    4.46 -
    4.47 -extern char hypercall_page[PAGE_SIZE];
    4.48 -
    4.49 -#define _hypercall0(type, name)			\
    4.50 -({						\
    4.51 -	long __res;				\
    4.52 -	asm volatile (				\
    4.53 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
    4.54 -		: "=a" (__res)			\
    4.55 -		:				\
    4.56 -		: "memory" );			\
    4.57 -	(type)__res;				\
    4.58 -})
    4.59 -
    4.60 -#define _hypercall1(type, name, a1)				\
    4.61 -({								\
    4.62 -	long __res, __ign1;					\
    4.63 -	asm volatile (						\
    4.64 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
    4.65 -		: "=a" (__res), "=D" (__ign1)			\
    4.66 -		: "1" ((long)(a1))				\
    4.67 -		: "memory" );					\
    4.68 -	(type)__res;						\
    4.69 -})
    4.70 -
    4.71 -#define _hypercall2(type, name, a1, a2)				\
    4.72 -({								\
    4.73 -	long __res, __ign1, __ign2;				\
    4.74 -	asm volatile (						\
    4.75 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
    4.76 -		: "=a" (__res), "=D" (__ign1), "=S" (__ign2)	\
    4.77 -		: "1" ((long)(a1)), "2" ((long)(a2))		\
    4.78 -		: "memory" );					\
    4.79 -	(type)__res;						\
    4.80 -})
    4.81 -
    4.82 -#define _hypercall3(type, name, a1, a2, a3)			\
    4.83 -({								\
    4.84 -	long __res, __ign1, __ign2, __ign3;			\
    4.85 -	asm volatile (						\
    4.86 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
    4.87 -		: "=a" (__res), "=D" (__ign1), "=S" (__ign2), 	\
    4.88 -		"=d" (__ign3)					\
    4.89 -		: "1" ((long)(a1)), "2" ((long)(a2)),		\
    4.90 -		"3" ((long)(a3))				\
    4.91 -		: "memory" );					\
    4.92 -	(type)__res;						\
    4.93 -})
    4.94 -
    4.95 -#define _hypercall4(type, name, a1, a2, a3, a4)			\
    4.96 -({								\
    4.97 -	long __res, __ign1, __ign2, __ign3;			\
    4.98 -	asm volatile (						\
    4.99 -		"movq %7,%%r10; "				\
   4.100 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   4.101 -		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
   4.102 -		"=d" (__ign3)					\
   4.103 -		: "1" ((long)(a1)), "2" ((long)(a2)),		\
   4.104 -		"3" ((long)(a3)), "g" ((long)(a4))		\
   4.105 -		: "memory", "r10" );				\
   4.106 -	(type)__res;						\
   4.107 -})
   4.108 -
   4.109 -#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
   4.110 -({								\
   4.111 -	long __res, __ign1, __ign2, __ign3;			\
   4.112 -	asm volatile (						\
   4.113 -		"movq %7,%%r10; movq %8,%%r8; "			\
   4.114 -		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   4.115 -		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
   4.116 -		"=d" (__ign3)					\
   4.117 -		: "1" ((long)(a1)), "2" ((long)(a2)),		\
   4.118 -		"3" ((long)(a3)), "g" ((long)(a4)),		\
   4.119 -		"g" ((long)(a5))				\
   4.120 -		: "memory", "r10", "r8" );			\
   4.121 -	(type)__res;						\
   4.122 -})
   4.123 -
   4.124 -static inline int
   4.125 -HYPERVISOR_set_trap_table(
   4.126 -	trap_info_t *table)
   4.127 -{
   4.128 -	return _hypercall1(int, set_trap_table, table);
   4.129 -}
   4.130 -
   4.131 -static inline int
   4.132 -HYPERVISOR_mmu_update(
   4.133 -	mmu_update_t *req, int count, int *success_count, domid_t domid)
   4.134 -{
   4.135 -	return _hypercall4(int, mmu_update, req, count, success_count, domid);
   4.136 -}
   4.137 -
   4.138 -static inline int
   4.139 -HYPERVISOR_mmuext_op(
   4.140 -	struct mmuext_op *op, int count, int *success_count, domid_t domid)
   4.141 -{
   4.142 -	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
   4.143 -}
   4.144 -
   4.145 -static inline int
   4.146 -HYPERVISOR_set_gdt(
   4.147 -	unsigned long *frame_list, int entries)
   4.148 -{
   4.149 -	return _hypercall2(int, set_gdt, frame_list, entries);
   4.150 -}
   4.151 -
   4.152 -static inline int
   4.153 -HYPERVISOR_stack_switch(
   4.154 -	unsigned long ss, unsigned long esp)
   4.155 -{
   4.156 -	return _hypercall2(int, stack_switch, ss, esp);
   4.157 -}
   4.158 -
   4.159 -static inline int
   4.160 -HYPERVISOR_set_callbacks(
   4.161 -	unsigned long event_address, unsigned long failsafe_address, 
   4.162 -	unsigned long syscall_address)
   4.163 -{
   4.164 -	return _hypercall3(int, set_callbacks,
   4.165 -			   event_address, failsafe_address, syscall_address);
   4.166 -}
   4.167 -
   4.168 -static inline int
   4.169 -HYPERVISOR_fpu_taskswitch(
   4.170 -	int set)
   4.171 -{
   4.172 -	return _hypercall1(int, fpu_taskswitch, set);
   4.173 -}
   4.174 -
   4.175 -static inline int
   4.176 -HYPERVISOR_sched_op(
   4.177 -	int cmd, unsigned long arg)
   4.178 -{
   4.179 -	return _hypercall2(int, sched_op, cmd, arg);
   4.180 -}
   4.181 -
   4.182 -static inline long
   4.183 -HYPERVISOR_set_timer_op(
   4.184 -	u64 timeout)
   4.185 -{
   4.186 -	return _hypercall1(long, set_timer_op, timeout);
   4.187 -}
   4.188 -
   4.189 -static inline int
   4.190 -HYPERVISOR_dom0_op(
   4.191 -	dom0_op_t *dom0_op)
   4.192 -{
   4.193 -	dom0_op->interface_version = DOM0_INTERFACE_VERSION;
   4.194 -	return _hypercall1(int, dom0_op, dom0_op);
   4.195 -}
   4.196 -
   4.197 -static inline int
   4.198 -HYPERVISOR_set_debugreg(
   4.199 -	int reg, unsigned long value)
   4.200 -{
   4.201 -	return _hypercall2(int, set_debugreg, reg, value);
   4.202 -}
   4.203 -
   4.204 -static inline unsigned long
   4.205 -HYPERVISOR_get_debugreg(
   4.206 -	int reg)
   4.207 -{
   4.208 -	return _hypercall1(unsigned long, get_debugreg, reg);
   4.209 -}
   4.210 -
   4.211 -static inline int
   4.212 -HYPERVISOR_update_descriptor(
   4.213 -	unsigned long ma, unsigned long word)
   4.214 -{
   4.215 -	return _hypercall2(int, update_descriptor, ma, word);
   4.216 -}
   4.217 -
   4.218 -static inline int
   4.219 -HYPERVISOR_memory_op(
   4.220 -	unsigned int cmd, void *arg)
   4.221 -{
   4.222 -	return _hypercall2(int, memory_op, cmd, arg);
   4.223 -}
   4.224 -
   4.225 -static inline int
   4.226 -HYPERVISOR_multicall(
   4.227 -	void *call_list, int nr_calls)
   4.228 -{
   4.229 -	return _hypercall2(int, multicall, call_list, nr_calls);
   4.230 -}
   4.231 -
   4.232 -static inline int
   4.233 -HYPERVISOR_update_va_mapping(
   4.234 -	unsigned long va, pte_t new_val, unsigned long flags)
   4.235 -{
   4.236 -	return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
   4.237 -}
   4.238 -
   4.239 -static inline int
   4.240 -HYPERVISOR_event_channel_op(
   4.241 -	void *op)
   4.242 -{
   4.243 -	return _hypercall1(int, event_channel_op, op);
   4.244 -}
   4.245 -
   4.246 -static inline int
   4.247 -HYPERVISOR_xen_version(
   4.248 -	int cmd, void *arg)
   4.249 -{
   4.250 -	return _hypercall2(int, xen_version, cmd, arg);
   4.251 -}
   4.252 -
   4.253 -static inline int
   4.254 -HYPERVISOR_console_io(
   4.255 -	int cmd, int count, char *str)
   4.256 -{
   4.257 -	return _hypercall3(int, console_io, cmd, count, str);
   4.258 -}
   4.259 -
   4.260 -static inline int
   4.261 -HYPERVISOR_physdev_op(
   4.262 -	void *physdev_op)
   4.263 -{
   4.264 -	return _hypercall1(int, physdev_op, physdev_op);
   4.265 -}
   4.266 -
   4.267 -static inline int
   4.268 -HYPERVISOR_grant_table_op(
   4.269 -	unsigned int cmd, void *uop, unsigned int count)
   4.270 -{
   4.271 -	return _hypercall3(int, grant_table_op, cmd, uop, count);
   4.272 -}
   4.273 -
   4.274 -static inline int
   4.275 -HYPERVISOR_update_va_mapping_otherdomain(
   4.276 -	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
   4.277 -{
   4.278 -	return _hypercall4(int, update_va_mapping_otherdomain, va,
   4.279 -			   new_val.pte, flags, domid);
   4.280 -}
   4.281 -
   4.282 -static inline int
   4.283 -HYPERVISOR_vm_assist(
   4.284 -	unsigned int cmd, unsigned int type)
   4.285 -{
   4.286 -	return _hypercall2(int, vm_assist, cmd, type);
   4.287 -}
   4.288 -
   4.289 -static inline int
   4.290 -HYPERVISOR_vcpu_op(
   4.291 -	int cmd, int vcpuid, void *extra_args)
   4.292 -{
   4.293 -	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
   4.294 -}
   4.295 -
   4.296 -static inline int
   4.297 -HYPERVISOR_set_segment_base(
   4.298 -	int reg, unsigned long value)
   4.299 -{
   4.300 -	return _hypercall2(int, set_segment_base, reg, value);
   4.301 -}
   4.302 -
   4.303 -static inline int
   4.304 -HYPERVISOR_suspend(
   4.305 -	unsigned long srec)
   4.306 -{
   4.307 -	return _hypercall3(int, sched_op, SCHEDOP_shutdown,
   4.308 -			   SHUTDOWN_suspend, srec);
   4.309 -}
   4.310 -
   4.311 -static inline int
   4.312 -HYPERVISOR_nmi_op(
   4.313 -	unsigned long op,
   4.314 -	unsigned long arg)
   4.315 -{
   4.316 -	return _hypercall2(int, nmi_op, op, arg);
   4.317 -}
   4.318 -
   4.319 -#endif /* __HYPERCALL_X86_64_H__ */
   4.320 -
   4.321 -/*
   4.322 - * Local variables:
   4.323 - *  c-file-style: "linux"
   4.324 - *  indent-tabs-mode: t
   4.325 - *  c-indent-level: 8
   4.326 - *  c-basic-offset: 8
   4.327 - *  tab-width: 8
   4.328 - * End:
   4.329 - */
     5.1 --- a/extras/mini-os/include/hypervisor.h	Thu Aug 10 10:39:58 2006 +0100
     5.2 +++ b/extras/mini-os/include/hypervisor.h	Thu Aug 10 10:43:20 2006 +0100
     5.3 @@ -7,6 +7,7 @@
     5.4   * Copyright (c) 2002, K A Fraser
     5.5   * Copyright (c) 2005, Grzegorz Milos
     5.6   * Updates: Aravindh Puthiyaparambil <aravindh.puthiyaparambil@unisys.com>
     5.7 + * Updates: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com> for ia64
     5.8   */
     5.9  
    5.10  #ifndef _HYPERVISOR_H_
    5.11 @@ -19,6 +20,8 @@
    5.12  #include <hypercall-x86_32.h>
    5.13  #elif defined(__x86_64__)
    5.14  #include <hypercall-x86_64.h>
    5.15 +#elif defined(__ia64__)
    5.16 +#include <hypercall-ia64.h>
    5.17  #else
    5.18  #error "Unsupported architecture"
    5.19  #endif
     6.1 --- a/extras/mini-os/include/os.h	Thu Aug 10 10:39:58 2006 +0100
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,561 +0,0 @@
     6.4 -/******************************************************************************
     6.5 - * os.h
     6.6 - * 
     6.7 - * random collection of macros and definition
     6.8 - */
     6.9 -
    6.10 -#ifndef _OS_H_
    6.11 -#define _OS_H_
    6.12 -
    6.13 -#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
    6.14 -#define __builtin_expect(x, expected_value) (x)
    6.15 -#endif
    6.16 -#define unlikely(x)  __builtin_expect((x),0)
    6.17 -
    6.18 -#define smp_processor_id() 0
    6.19 -
    6.20 -
    6.21 -#ifndef __ASSEMBLY__
    6.22 -#include <types.h>
    6.23 -#include <hypervisor.h>
    6.24 -
    6.25 -extern void do_exit(void);
    6.26 -#define BUG do_exit
    6.27 -
    6.28 -#endif
    6.29 -#include <xen/xen.h>
    6.30 -
    6.31 -
    6.32 -#define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0, 0))
    6.33 -
    6.34 -#define __KERNEL_CS  FLAT_KERNEL_CS
    6.35 -#define __KERNEL_DS  FLAT_KERNEL_DS
    6.36 -#define __KERNEL_SS  FLAT_KERNEL_SS
    6.37 -
    6.38 -#define TRAP_divide_error      0
    6.39 -#define TRAP_debug             1
    6.40 -#define TRAP_nmi               2
    6.41 -#define TRAP_int3              3
    6.42 -#define TRAP_overflow          4
    6.43 -#define TRAP_bounds            5
    6.44 -#define TRAP_invalid_op        6
    6.45 -#define TRAP_no_device         7
    6.46 -#define TRAP_double_fault      8
    6.47 -#define TRAP_copro_seg         9
    6.48 -#define TRAP_invalid_tss      10
    6.49 -#define TRAP_no_segment       11
    6.50 -#define TRAP_stack_error      12
    6.51 -#define TRAP_gp_fault         13
    6.52 -#define TRAP_page_fault       14
    6.53 -#define TRAP_spurious_int     15
    6.54 -#define TRAP_copro_error      16
    6.55 -#define TRAP_alignment_check  17
    6.56 -#define TRAP_machine_check    18
    6.57 -#define TRAP_simd_error       19
    6.58 -#define TRAP_deferred_nmi     31
    6.59 -
    6.60 -/* Everything below this point is not included by assembler (.S) files. */
    6.61 -#ifndef __ASSEMBLY__
    6.62 -
    6.63 -extern shared_info_t *HYPERVISOR_shared_info;
    6.64 -
    6.65 -void trap_init(void);
    6.66 -
    6.67 -
    6.68 -
    6.69 -/* 
    6.70 - * The use of 'barrier' in the following reflects their use as local-lock
    6.71 - * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
    6.72 - * critical operations are executed. All critical operations must complete
    6.73 - * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
    6.74 - * includes these barriers, for example.
    6.75 - */
    6.76 -
    6.77 -#define __cli()								\
    6.78 -do {									\
    6.79 -	vcpu_info_t *_vcpu;						\
    6.80 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
    6.81 -	_vcpu->evtchn_upcall_mask = 1;					\
    6.82 -	barrier();							\
    6.83 -} while (0)
    6.84 -
    6.85 -#define __sti()								\
    6.86 -do {									\
    6.87 -	vcpu_info_t *_vcpu;						\
    6.88 -	barrier();							\
    6.89 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
    6.90 -	_vcpu->evtchn_upcall_mask = 0;					\
    6.91 -	barrier(); /* unmask then check (avoid races) */		\
    6.92 -	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
    6.93 -		force_evtchn_callback();				\
    6.94 -} while (0)
    6.95 -
    6.96 -#define __save_flags(x)							\
    6.97 -do {									\
    6.98 -	vcpu_info_t *_vcpu;						\
    6.99 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
   6.100 -	(x) = _vcpu->evtchn_upcall_mask;				\
   6.101 -} while (0)
   6.102 -
   6.103 -#define __restore_flags(x)						\
   6.104 -do {									\
   6.105 -	vcpu_info_t *_vcpu;						\
   6.106 -	barrier();							\
   6.107 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
   6.108 -	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
   6.109 -		barrier(); /* unmask then check (avoid races) */	\
   6.110 -		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
   6.111 -			force_evtchn_callback();			\
   6.112 -	}\
   6.113 -} while (0)
   6.114 -
   6.115 -#define safe_halt()		((void)0)
   6.116 -
   6.117 -#define __save_and_cli(x)						\
   6.118 -do {									\
   6.119 -	vcpu_info_t *_vcpu;						\
   6.120 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
   6.121 -	(x) = _vcpu->evtchn_upcall_mask;				\
   6.122 -	_vcpu->evtchn_upcall_mask = 1;					\
   6.123 -	barrier();							\
   6.124 -} while (0)
   6.125 -
   6.126 -#define local_irq_save(x)	__save_and_cli(x)
   6.127 -#define local_irq_restore(x)	__restore_flags(x)
   6.128 -#define local_save_flags(x)	__save_flags(x)
   6.129 -#define local_irq_disable()	__cli()
   6.130 -#define local_irq_enable()	__sti()
   6.131 -
   6.132 -#define irqs_disabled()			\
   6.133 -    HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
   6.134 -
   6.135 -/* This is a barrier for the compiler only, NOT the processor! */
   6.136 -#define barrier() __asm__ __volatile__("": : :"memory")
   6.137 -
   6.138 -#if defined(__i386__)
   6.139 -#define mb()    __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
   6.140 -#define rmb()   __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
   6.141 -#define wmb()	__asm__ __volatile__ ("": : :"memory")
   6.142 -#elif defined(__x86_64__)
   6.143 -#define mb()    __asm__ __volatile__ ("mfence":::"memory")
   6.144 -#define rmb()   __asm__ __volatile__ ("lfence":::"memory")
   6.145 -#define wmb()	__asm__ __volatile__ ("sfence" ::: "memory") /* From CONFIG_UNORDERED_IO (linux) */
   6.146 -#endif
   6.147 -
   6.148 -
   6.149 -#define LOCK_PREFIX ""
   6.150 -#define LOCK ""
   6.151 -#define ADDR (*(volatile long *) addr)
   6.152 -/*
   6.153 - * Make sure gcc doesn't try to be clever and move things around
   6.154 - * on us. We need to use _exactly_ the address the user gave us,
   6.155 - * not some alias that contains the same information.
   6.156 - */
   6.157 -typedef struct { volatile int counter; } atomic_t;
   6.158 -
   6.159 -
   6.160 -/************************** i386 *******************************/
   6.161 -#if defined (__i386__)
   6.162 -
   6.163 -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
   6.164 -struct __xchg_dummy { unsigned long a[100]; };
   6.165 -#define __xg(x) ((struct __xchg_dummy *)(x))
   6.166 -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
   6.167 -{
   6.168 -	switch (size) {
   6.169 -		case 1:
   6.170 -			__asm__ __volatile__("xchgb %b0,%1"
   6.171 -				:"=q" (x)
   6.172 -				:"m" (*__xg(ptr)), "0" (x)
   6.173 -				:"memory");
   6.174 -			break;
   6.175 -		case 2:
   6.176 -			__asm__ __volatile__("xchgw %w0,%1"
   6.177 -				:"=r" (x)
   6.178 -				:"m" (*__xg(ptr)), "0" (x)
   6.179 -				:"memory");
   6.180 -			break;
   6.181 -		case 4:
   6.182 -			__asm__ __volatile__("xchgl %0,%1"
   6.183 -				:"=r" (x)
   6.184 -				:"m" (*__xg(ptr)), "0" (x)
   6.185 -				:"memory");
   6.186 -			break;
   6.187 -	}
   6.188 -	return x;
   6.189 -}
   6.190 -
   6.191 -/**
   6.192 - * test_and_clear_bit - Clear a bit and return its old value
   6.193 - * @nr: Bit to clear
   6.194 - * @addr: Address to count from
   6.195 - *
   6.196 - * This operation is atomic and cannot be reordered.
   6.197 - * It can be reorderdered on other architectures other than x86.
   6.198 - * It also implies a memory barrier.
   6.199 - */
   6.200 -static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
   6.201 -{
   6.202 -	int oldbit;
   6.203 -
   6.204 -	__asm__ __volatile__( LOCK
   6.205 -		"btrl %2,%1\n\tsbbl %0,%0"
   6.206 -		:"=r" (oldbit),"=m" (ADDR)
   6.207 -		:"Ir" (nr) : "memory");
   6.208 -	return oldbit;
   6.209 -}
   6.210 -
   6.211 -static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
   6.212 -{
   6.213 -	return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
   6.214 -}
   6.215 -
   6.216 -static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
   6.217 -{
   6.218 -	int oldbit;
   6.219 -
   6.220 -	__asm__ __volatile__(
   6.221 -		"btl %2,%1\n\tsbbl %0,%0"
   6.222 -		:"=r" (oldbit)
   6.223 -		:"m" (ADDR),"Ir" (nr));
   6.224 -	return oldbit;
   6.225 -}
   6.226 -
   6.227 -#define test_bit(nr,addr) \
   6.228 -(__builtin_constant_p(nr) ? \
   6.229 - constant_test_bit((nr),(addr)) : \
   6.230 - variable_test_bit((nr),(addr)))
   6.231 -
   6.232 -/**
   6.233 - * set_bit - Atomically set a bit in memory
   6.234 - * @nr: the bit to set
   6.235 - * @addr: the address to start counting from
   6.236 - *
   6.237 - * This function is atomic and may not be reordered.  See __set_bit()
   6.238 - * if you do not require the atomic guarantees.
   6.239 - *
   6.240 - * Note: there are no guarantees that this function will not be reordered
   6.241 - * on non x86 architectures, so if you are writting portable code,
   6.242 - * make sure not to rely on its reordering guarantees.
   6.243 - *
   6.244 - * Note that @nr may be almost arbitrarily large; this function is not
   6.245 - * restricted to acting on a single-word quantity.
   6.246 - */
   6.247 -static inline void set_bit(int nr, volatile unsigned long * addr)
   6.248 -{
   6.249 -	__asm__ __volatile__( LOCK
   6.250 -		"btsl %1,%0"
   6.251 -		:"=m" (ADDR)
   6.252 -		:"Ir" (nr));
   6.253 -}
   6.254 -
   6.255 -/**
   6.256 - * clear_bit - Clears a bit in memory
   6.257 - * @nr: Bit to clear
   6.258 - * @addr: Address to start counting from
   6.259 - *
   6.260 - * clear_bit() is atomic and may not be reordered.  However, it does
   6.261 - * not contain a memory barrier, so if it is used for locking purposes,
   6.262 - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
   6.263 - * in order to ensure changes are visible on other processors.
   6.264 - */
   6.265 -static inline void clear_bit(int nr, volatile unsigned long * addr)
   6.266 -{
   6.267 -	__asm__ __volatile__( LOCK
   6.268 -		"btrl %1,%0"
   6.269 -		:"=m" (ADDR)
   6.270 -		:"Ir" (nr));
   6.271 -}
   6.272 -
   6.273 -/**
   6.274 - * __ffs - find first bit in word.
   6.275 - * @word: The word to search
   6.276 - *
   6.277 - * Undefined if no bit exists, so code should check against 0 first.
   6.278 - */
   6.279 -static inline unsigned long __ffs(unsigned long word)
   6.280 -{
   6.281 -	__asm__("bsfl %1,%0"
   6.282 -		:"=r" (word)
   6.283 -		:"rm" (word));
   6.284 -	return word;
   6.285 -}
   6.286 -
   6.287 -
   6.288 -/*
   6.289 - * These have to be done with inline assembly: that way the bit-setting
   6.290 - * is guaranteed to be atomic. All bit operations return 0 if the bit
   6.291 - * was cleared before the operation and != 0 if it was not.
   6.292 - *
   6.293 - * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
   6.294 - */
   6.295 -#define ADDR (*(volatile long *) addr)
   6.296 -
   6.297 -#define rdtscll(val) \
   6.298 -     __asm__ __volatile__("rdtsc" : "=A" (val))
   6.299 -
   6.300 -
   6.301 -
   6.302 -#elif defined(__x86_64__)/* ifdef __i386__ */
   6.303 -/************************** x86_84 *******************************/
   6.304 -
   6.305 -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
   6.306 -#define __xg(x) ((volatile long *)(x))
   6.307 -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
   6.308 -{
   6.309 -	switch (size) {
   6.310 -		case 1:
   6.311 -			__asm__ __volatile__("xchgb %b0,%1"
   6.312 -				:"=q" (x)
   6.313 -				:"m" (*__xg(ptr)), "0" (x)
   6.314 -				:"memory");
   6.315 -			break;
   6.316 -		case 2:
   6.317 -			__asm__ __volatile__("xchgw %w0,%1"
   6.318 -				:"=r" (x)
   6.319 -				:"m" (*__xg(ptr)), "0" (x)
   6.320 -				:"memory");
   6.321 -			break;
   6.322 -		case 4:
   6.323 -			__asm__ __volatile__("xchgl %k0,%1"
   6.324 -				:"=r" (x)
   6.325 -				:"m" (*__xg(ptr)), "0" (x)
   6.326 -				:"memory");
   6.327 -			break;
   6.328 -		case 8:
   6.329 -			__asm__ __volatile__("xchgq %0,%1"
   6.330 -				:"=r" (x)
   6.331 -				:"m" (*__xg(ptr)), "0" (x)
   6.332 -				:"memory");
   6.333 -			break;
   6.334 -	}
   6.335 -	return x;
   6.336 -}
   6.337 -
   6.338 -/**
   6.339 - * test_and_clear_bit - Clear a bit and return its old value
   6.340 - * @nr: Bit to clear
   6.341 - * @addr: Address to count from
   6.342 - *
   6.343 - * This operation is atomic and cannot be reordered.  
   6.344 - * It also implies a memory barrier.
   6.345 - */
   6.346 -static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
   6.347 -{
   6.348 -	int oldbit;
   6.349 -
   6.350 -	__asm__ __volatile__( LOCK_PREFIX
   6.351 -		"btrl %2,%1\n\tsbbl %0,%0"
   6.352 -		:"=r" (oldbit),"=m" (ADDR)
   6.353 -		:"dIr" (nr) : "memory");
   6.354 -	return oldbit;
   6.355 -}
   6.356 -
   6.357 -static __inline__ int constant_test_bit(int nr, const volatile void * addr)
   6.358 -{
   6.359 -	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
   6.360 -}
   6.361 -
   6.362 -static __inline__ int variable_test_bit(int nr, volatile const void * addr)
   6.363 -{
   6.364 -	int oldbit;
   6.365 -
   6.366 -	__asm__ __volatile__(
   6.367 -		"btl %2,%1\n\tsbbl %0,%0"
   6.368 -		:"=r" (oldbit)
   6.369 -		:"m" (ADDR),"dIr" (nr));
   6.370 -	return oldbit;
   6.371 -}
   6.372 -
   6.373 -#define test_bit(nr,addr) \
   6.374 -(__builtin_constant_p(nr) ? \
   6.375 - constant_test_bit((nr),(addr)) : \
   6.376 - variable_test_bit((nr),(addr)))
   6.377 -
   6.378 -
   6.379 -/**
   6.380 - * set_bit - Atomically set a bit in memory
   6.381 - * @nr: the bit to set
   6.382 - * @addr: the address to start counting from
   6.383 - *
   6.384 - * This function is atomic and may not be reordered.  See __set_bit()
   6.385 - * if you do not require the atomic guarantees.
   6.386 - * Note that @nr may be almost arbitrarily large; this function is not
   6.387 - * restricted to acting on a single-word quantity.
   6.388 - */
   6.389 -static __inline__ void set_bit(int nr, volatile void * addr)
   6.390 -{
   6.391 -	__asm__ __volatile__( LOCK_PREFIX
   6.392 -		"btsl %1,%0"
   6.393 -		:"=m" (ADDR)
   6.394 -		:"dIr" (nr) : "memory");
   6.395 -}
   6.396 -
   6.397 -/**
   6.398 - * clear_bit - Clears a bit in memory
   6.399 - * @nr: Bit to clear
   6.400 - * @addr: Address to start counting from
   6.401 - *
   6.402 - * clear_bit() is atomic and may not be reordered.  However, it does
   6.403 - * not contain a memory barrier, so if it is used for locking purposes,
   6.404 - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
   6.405 - * in order to ensure changes are visible on other processors.
   6.406 - */
   6.407 -static __inline__ void clear_bit(int nr, volatile void * addr)
   6.408 -{
   6.409 -	__asm__ __volatile__( LOCK_PREFIX
   6.410 -		"btrl %1,%0"
   6.411 -		:"=m" (ADDR)
   6.412 -		:"dIr" (nr));
   6.413 -}
   6.414 -
   6.415 -/**
   6.416 - * __ffs - find first bit in word.
   6.417 - * @word: The word to search
   6.418 - *
   6.419 - * Undefined if no bit exists, so code should check against 0 first.
   6.420 - */
   6.421 -static __inline__ unsigned long __ffs(unsigned long word)
   6.422 -{
   6.423 -	__asm__("bsfq %1,%0"
   6.424 -		:"=r" (word)
   6.425 -		:"rm" (word));
   6.426 -	return word;
   6.427 -}
   6.428 -
   6.429 -#define ADDR (*(volatile long *) addr)
   6.430 -
   6.431 -#define rdtscll(val) do { \
   6.432 -     unsigned int __a,__d; \
   6.433 -     asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
   6.434 -     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
   6.435 -} while(0)
   6.436 -
   6.437 -#define wrmsr(msr,val1,val2) \
   6.438 -      __asm__ __volatile__("wrmsr" \
   6.439 -                           : /* no outputs */ \
   6.440 -                           : "c" (msr), "a" (val1), "d" (val2))
   6.441 -
   6.442 -#define wrmsrl(msr,val) wrmsr(msr,(u32)((u64)(val)),((u64)(val))>>32)
   6.443 -
   6.444 -
   6.445 -#else /* ifdef __x86_64__ */
   6.446 -#error "Unsupported architecture"
   6.447 -#endif
   6.448 -
   6.449 -
   6.450 -/********************* common i386 and x86_64  ****************************/
   6.451 -struct __synch_xchg_dummy { unsigned long a[100]; };
   6.452 -#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
   6.453 -
   6.454 -#define synch_cmpxchg(ptr, old, new) \
   6.455 -((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
   6.456 -                                     (unsigned long)(old), \
   6.457 -                                     (unsigned long)(new), \
   6.458 -                                     sizeof(*(ptr))))
   6.459 -
   6.460 -static inline unsigned long __synch_cmpxchg(volatile void *ptr,
   6.461 -        unsigned long old,
   6.462 -        unsigned long new, int size)
   6.463 -{
   6.464 -    unsigned long prev;
   6.465 -    switch (size) {
   6.466 -        case 1:
   6.467 -            __asm__ __volatile__("lock; cmpxchgb %b1,%2"
   6.468 -                    : "=a"(prev)
   6.469 -                    : "q"(new), "m"(*__synch_xg(ptr)),
   6.470 -                    "0"(old)
   6.471 -                    : "memory");
   6.472 -            return prev;
   6.473 -        case 2:
   6.474 -            __asm__ __volatile__("lock; cmpxchgw %w1,%2"
   6.475 -                    : "=a"(prev)
   6.476 -                    : "r"(new), "m"(*__synch_xg(ptr)),
   6.477 -                    "0"(old)
   6.478 -                    : "memory");
   6.479 -            return prev;
   6.480 -#ifdef __x86_64__
   6.481 -        case 4:
   6.482 -            __asm__ __volatile__("lock; cmpxchgl %k1,%2"
   6.483 -                    : "=a"(prev)
   6.484 -                    : "r"(new), "m"(*__synch_xg(ptr)),
   6.485 -                    "0"(old)
   6.486 -                    : "memory");
   6.487 -            return prev;
   6.488 -        case 8:
   6.489 -            __asm__ __volatile__("lock; cmpxchgq %1,%2"
   6.490 -                    : "=a"(prev)
   6.491 -                    : "r"(new), "m"(*__synch_xg(ptr)),
   6.492 -                    "0"(old)
   6.493 -                    : "memory");
   6.494 -            return prev;
   6.495 -#else
   6.496 -        case 4:
   6.497 -            __asm__ __volatile__("lock; cmpxchgl %1,%2"
   6.498 -                    : "=a"(prev)
   6.499 -                    : "r"(new), "m"(*__synch_xg(ptr)),
   6.500 -                    "0"(old)
   6.501 -                    : "memory");
   6.502 -            return prev;
   6.503 -#endif
   6.504 -    }
   6.505 -    return old;
   6.506 -}
   6.507 -
   6.508 -
   6.509 -static __inline__ void synch_set_bit(int nr, volatile void * addr)
   6.510 -{
   6.511 -    __asm__ __volatile__ ( 
   6.512 -        "lock btsl %1,%0"
   6.513 -        : "=m" (ADDR) : "Ir" (nr) : "memory" );
   6.514 -}
   6.515 -
   6.516 -static __inline__ void synch_clear_bit(int nr, volatile void * addr)
   6.517 -{
   6.518 -    __asm__ __volatile__ (
   6.519 -        "lock btrl %1,%0"
   6.520 -        : "=m" (ADDR) : "Ir" (nr) : "memory" );
   6.521 -}
   6.522 -
   6.523 -static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
   6.524 -{
   6.525 -    int oldbit;
   6.526 -    __asm__ __volatile__ (
   6.527 -        "lock btsl %2,%1\n\tsbbl %0,%0"
   6.528 -        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
   6.529 -    return oldbit;
   6.530 -}
   6.531 -
   6.532 -static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
   6.533 -{
   6.534 -    int oldbit;
   6.535 -    __asm__ __volatile__ (
   6.536 -        "lock btrl %2,%1\n\tsbbl %0,%0"
   6.537 -        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
   6.538 -    return oldbit;
   6.539 -}
   6.540 -
   6.541 -static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
   6.542 -{
   6.543 -    return ((1UL << (nr & 31)) & 
   6.544 -            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
   6.545 -}
   6.546 -
   6.547 -static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
   6.548 -{
   6.549 -    int oldbit;
   6.550 -    __asm__ __volatile__ (
   6.551 -        "btl %2,%1\n\tsbbl %0,%0"
   6.552 -        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
   6.553 -    return oldbit;
   6.554 -}
   6.555 -
   6.556 -#define synch_test_bit(nr,addr) \
   6.557 -(__builtin_constant_p(nr) ? \
   6.558 - synch_const_test_bit((nr),(addr)) : \
   6.559 - synch_var_test_bit((nr),(addr)))
   6.560 -
   6.561 -
   6.562 -
   6.563 -#endif /* not assembly */
   6.564 -#endif /* _OS_H_ */
     7.1 --- a/extras/mini-os/include/spinlock.h	Thu Aug 10 10:39:58 2006 +0100
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,121 +0,0 @@
     7.4 -#ifndef __ASM_SPINLOCK_H
     7.5 -#define __ASM_SPINLOCK_H
     7.6 -
     7.7 -#include <lib.h>
     7.8 -
     7.9 -/*
    7.10 - * Your basic SMP spinlocks, allowing only a single CPU anywhere
    7.11 - */
    7.12 -
    7.13 -typedef struct {
    7.14 -	volatile unsigned int slock;
    7.15 -} spinlock_t;
    7.16 -
    7.17 -#define SPINLOCK_MAGIC	0xdead4ead
    7.18 -
    7.19 -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
    7.20 -
    7.21 -#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
    7.22 -
    7.23 -/*
    7.24 - * Simple spin lock operations.  There are two variants, one clears IRQ's
    7.25 - * on the local processor, one does not.
    7.26 - *
    7.27 - * We make no fairness assumptions. They have a cost.
    7.28 - */
    7.29 -
    7.30 -#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->slock) <= 0)
    7.31 -#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
    7.32 -
    7.33 -#define spin_lock_string \
    7.34 -        "1:\n" \
    7.35 -	LOCK \
    7.36 -	"decb %0\n\t" \
    7.37 -	"jns 3f\n" \
    7.38 -	"2:\t" \
    7.39 -	"rep;nop\n\t" \
    7.40 -	"cmpb $0,%0\n\t" \
    7.41 -	"jle 2b\n\t" \
    7.42 -	"jmp 1b\n" \
    7.43 -	"3:\n\t"
    7.44 -
    7.45 -#define spin_lock_string_flags \
    7.46 -        "1:\n" \
    7.47 -	LOCK \
    7.48 -	"decb %0\n\t" \
    7.49 -	"jns 4f\n\t" \
    7.50 -	"2:\t" \
    7.51 -	"testl $0x200, %1\n\t" \
    7.52 -	"jz 3f\n\t" \
    7.53 -	"#sti\n\t" \
    7.54 -	"3:\t" \
    7.55 -	"rep;nop\n\t" \
    7.56 -	"cmpb $0, %0\n\t" \
    7.57 -	"jle 3b\n\t" \
    7.58 -	"#cli\n\t" \
    7.59 -	"jmp 1b\n" \
    7.60 -	"4:\n\t"
    7.61 -
    7.62 -/*
    7.63 - * This works. Despite all the confusion.
    7.64 - * (except on PPro SMP or if we are using OOSTORE)
    7.65 - * (PPro errata 66, 92)
    7.66 - */
    7.67 -
    7.68 -#define spin_unlock_string \
    7.69 -	"xchgb %b0, %1" \
    7.70 -		:"=q" (oldval), "=m" (lock->slock) \
    7.71 -		:"0" (oldval) : "memory"
    7.72 -
    7.73 -static inline void _raw_spin_unlock(spinlock_t *lock)
    7.74 -{
    7.75 -	char oldval = 1;
    7.76 -	__asm__ __volatile__(
    7.77 -		spin_unlock_string
    7.78 -	);
    7.79 -}
    7.80 -
    7.81 -static inline int _raw_spin_trylock(spinlock_t *lock)
    7.82 -{
    7.83 -	char oldval;
    7.84 -	__asm__ __volatile__(
    7.85 -		"xchgb %b0,%1\n"
    7.86 -		:"=q" (oldval), "=m" (lock->slock)
    7.87 -		:"0" (0) : "memory");
    7.88 -	return oldval > 0;
    7.89 -}
    7.90 -
    7.91 -static inline void _raw_spin_lock(spinlock_t *lock)
    7.92 -{
    7.93 -	__asm__ __volatile__(
    7.94 -		spin_lock_string
    7.95 -		:"=m" (lock->slock) : : "memory");
    7.96 -}
    7.97 -
    7.98 -static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
    7.99 -{
   7.100 -	__asm__ __volatile__(
   7.101 -		spin_lock_string_flags
   7.102 -		:"=m" (lock->slock) : "r" (flags) : "memory");
   7.103 -}
   7.104 -
   7.105 -#define _spin_trylock(lock)     ({_raw_spin_trylock(lock) ? \
   7.106 -                                1 : ({ 0;});})
   7.107 -
   7.108 -#define _spin_lock(lock)        \
   7.109 -do {                            \
   7.110 -        _raw_spin_lock(lock);   \
   7.111 -} while(0)
   7.112 -
   7.113 -#define _spin_unlock(lock)      \
   7.114 -do {                            \
   7.115 -        _raw_spin_unlock(lock); \
   7.116 -} while (0)
   7.117 -
   7.118 -
   7.119 -#define spin_lock(lock)       _spin_lock(lock)
   7.120 -#define spin_unlock(lock)       _spin_unlock(lock)
   7.121 -
   7.122 -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
   7.123 -
   7.124 -#endif
     8.1 --- a/extras/mini-os/include/traps.h	Thu Aug 10 10:39:58 2006 +0100
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,73 +0,0 @@
     8.4 -/* 
     8.5 - ****************************************************************************
     8.6 - * (C) 2005 - Grzegorz Milos - Intel Reseach Cambridge
     8.7 - ****************************************************************************
     8.8 - *
     8.9 - *        File: traps.h
    8.10 - *      Author: Grzegorz Milos (gm281@cam.ac.uk)
    8.11 - *              
    8.12 - *        Date: Jun 2005
    8.13 - * 
    8.14 - * Environment: Xen Minimal OS
    8.15 - * Description: Deals with traps
    8.16 - *
    8.17 - ****************************************************************************
    8.18 - */
    8.19 -
    8.20 -#ifndef _TRAPS_H_
    8.21 -#define _TRAPS_H_
    8.22 -
    8.23 -#ifdef __i386__
    8.24 -struct pt_regs {
    8.25 -	long ebx;
    8.26 -	long ecx;
    8.27 -	long edx;
    8.28 -	long esi;
    8.29 -	long edi;
    8.30 -	long ebp;
    8.31 -	long eax;
    8.32 -	int  xds;
    8.33 -	int  xes;
    8.34 -	long orig_eax;
    8.35 -	long eip;
    8.36 -	int  xcs;
    8.37 -	long eflags;
    8.38 -	long esp;
    8.39 -	int  xss;
    8.40 -};
    8.41 -#elif __x86_64__
    8.42 -
    8.43 -struct pt_regs {
    8.44 -	unsigned long r15;
    8.45 -	unsigned long r14;
    8.46 -	unsigned long r13;
    8.47 -	unsigned long r12;
    8.48 -	unsigned long rbp;
    8.49 -	unsigned long rbx;
    8.50 -/* arguments: non interrupts/non tracing syscalls only save upto here*/
    8.51 - 	unsigned long r11;
    8.52 -	unsigned long r10;	
    8.53 -	unsigned long r9;
    8.54 -	unsigned long r8;
    8.55 -	unsigned long rax;
    8.56 -	unsigned long rcx;
    8.57 -	unsigned long rdx;
    8.58 -	unsigned long rsi;
    8.59 -	unsigned long rdi;
    8.60 -	unsigned long orig_rax;
    8.61 -/* end of arguments */ 	
    8.62 -/* cpu exception frame or undefined */
    8.63 -	unsigned long rip;
    8.64 -	unsigned long cs;
    8.65 -	unsigned long eflags; 
    8.66 -	unsigned long rsp; 
    8.67 -	unsigned long ss;
    8.68 -/* top of stack page */ 
    8.69 -};
    8.70 -
    8.71 -
    8.72 -#endif
    8.73 -
    8.74 -void dump_regs(struct pt_regs *regs);
    8.75 -
    8.76 -#endif /* _TRAPS_H_ */
     9.1 --- a/extras/mini-os/include/types.h	Thu Aug 10 10:39:58 2006 +0100
     9.2 +++ b/extras/mini-os/include/types.h	Thu Aug 10 10:43:20 2006 +0100
     9.3 @@ -29,7 +29,7 @@ typedef unsigned int        u32;
     9.4  #ifdef __i386__
     9.5  typedef signed long long    s64;
     9.6  typedef unsigned long long  u64;
     9.7 -#elif defined(__x86_64__)
     9.8 +#elif defined(__x86_64__) || defined(__ia64__)
     9.9  typedef signed long         s64;
    9.10  typedef unsigned long       u64;
    9.11  #endif
    9.12 @@ -49,7 +49,7 @@ typedef struct { unsigned long pte_low; 
    9.13  typedef struct { unsigned long pte_low, pte_high; } pte_t;
    9.14  #endif /* CONFIG_X86_PAE */
    9.15  
    9.16 -#elif defined(__x86_64__)
    9.17 +#elif defined(__x86_64__) || defined(__ia64__)
    9.18  typedef long                quad_t;
    9.19  typedef unsigned long       u_quad_t;
    9.20  typedef unsigned long       uintptr_t;
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/extras/mini-os/include/x86/os.h	Thu Aug 10 10:43:20 2006 +0100
    10.3 @@ -0,0 +1,561 @@
    10.4 +/******************************************************************************
    10.5 + * os.h
    10.6 + * 
    10.7 + * random collection of macros and definition
    10.8 + */
    10.9 +
   10.10 +#ifndef _OS_H_
   10.11 +#define _OS_H_
   10.12 +
   10.13 +#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
   10.14 +#define __builtin_expect(x, expected_value) (x)
   10.15 +#endif
   10.16 +#define unlikely(x)  __builtin_expect((x),0)
   10.17 +
   10.18 +#define smp_processor_id() 0
   10.19 +
   10.20 +
   10.21 +#ifndef __ASSEMBLY__
   10.22 +#include <types.h>
   10.23 +#include <hypervisor.h>
   10.24 +
   10.25 +extern void do_exit(void);
   10.26 +#define BUG do_exit
   10.27 +
   10.28 +#endif
   10.29 +#include <xen/xen.h>
   10.30 +
   10.31 +
   10.32 +#define force_evtchn_callback() ((void)HYPERVISOR_xen_version(0, 0))
   10.33 +
   10.34 +#define __KERNEL_CS  FLAT_KERNEL_CS
   10.35 +#define __KERNEL_DS  FLAT_KERNEL_DS
   10.36 +#define __KERNEL_SS  FLAT_KERNEL_SS
   10.37 +
   10.38 +#define TRAP_divide_error      0
   10.39 +#define TRAP_debug             1
   10.40 +#define TRAP_nmi               2
   10.41 +#define TRAP_int3              3
   10.42 +#define TRAP_overflow          4
   10.43 +#define TRAP_bounds            5
   10.44 +#define TRAP_invalid_op        6
   10.45 +#define TRAP_no_device         7
   10.46 +#define TRAP_double_fault      8
   10.47 +#define TRAP_copro_seg         9
   10.48 +#define TRAP_invalid_tss      10
   10.49 +#define TRAP_no_segment       11
   10.50 +#define TRAP_stack_error      12
   10.51 +#define TRAP_gp_fault         13
   10.52 +#define TRAP_page_fault       14
   10.53 +#define TRAP_spurious_int     15
   10.54 +#define TRAP_copro_error      16
   10.55 +#define TRAP_alignment_check  17
   10.56 +#define TRAP_machine_check    18
   10.57 +#define TRAP_simd_error       19
   10.58 +#define TRAP_deferred_nmi     31
   10.59 +
   10.60 +/* Everything below this point is not included by assembler (.S) files. */
   10.61 +#ifndef __ASSEMBLY__
   10.62 +
   10.63 +extern shared_info_t *HYPERVISOR_shared_info;
   10.64 +
   10.65 +void trap_init(void);
   10.66 +
   10.67 +
   10.68 +
   10.69 +/* 
   10.70 + * The use of 'barrier' in the following reflects their use as local-lock
   10.71 + * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
   10.72 + * critical operations are executed. All critical operations must complete
   10.73 + * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
   10.74 + * includes these barriers, for example.
   10.75 + */
   10.76 +
   10.77 +#define __cli()								\
   10.78 +do {									\
   10.79 +	vcpu_info_t *_vcpu;						\
   10.80 +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
   10.81 +	_vcpu->evtchn_upcall_mask = 1;					\
   10.82 +	barrier();							\
   10.83 +} while (0)
   10.84 +
   10.85 +#define __sti()								\
   10.86 +do {									\
   10.87 +	vcpu_info_t *_vcpu;						\
   10.88 +	barrier();							\
   10.89 +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
   10.90 +	_vcpu->evtchn_upcall_mask = 0;					\
   10.91 +	barrier(); /* unmask then check (avoid races) */		\
   10.92 +	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
   10.93 +		force_evtchn_callback();				\
   10.94 +} while (0)
   10.95 +
   10.96 +#define __save_flags(x)							\
   10.97 +do {									\
   10.98 +	vcpu_info_t *_vcpu;						\
   10.99 +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
  10.100 +	(x) = _vcpu->evtchn_upcall_mask;				\
  10.101 +} while (0)
  10.102 +
  10.103 +#define __restore_flags(x)						\
  10.104 +do {									\
  10.105 +	vcpu_info_t *_vcpu;						\
  10.106 +	barrier();							\
  10.107 +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
  10.108 +	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
  10.109 +		barrier(); /* unmask then check (avoid races) */	\
  10.110 +		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
  10.111 +			force_evtchn_callback();			\
  10.112 +	}\
  10.113 +} while (0)
  10.114 +
  10.115 +#define safe_halt()		((void)0)
  10.116 +
  10.117 +#define __save_and_cli(x)						\
  10.118 +do {									\
  10.119 +	vcpu_info_t *_vcpu;						\
  10.120 +	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
  10.121 +	(x) = _vcpu->evtchn_upcall_mask;				\
  10.122 +	_vcpu->evtchn_upcall_mask = 1;					\
  10.123 +	barrier();							\
  10.124 +} while (0)
  10.125 +
  10.126 +#define local_irq_save(x)	__save_and_cli(x)
  10.127 +#define local_irq_restore(x)	__restore_flags(x)
  10.128 +#define local_save_flags(x)	__save_flags(x)
  10.129 +#define local_irq_disable()	__cli()
  10.130 +#define local_irq_enable()	__sti()
  10.131 +
  10.132 +#define irqs_disabled()			\
  10.133 +    HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
  10.134 +
  10.135 +/* This is a barrier for the compiler only, NOT the processor! */
  10.136 +#define barrier() __asm__ __volatile__("": : :"memory")
  10.137 +
  10.138 +#if defined(__i386__)
  10.139 +#define mb()    __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
  10.140 +#define rmb()   __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
  10.141 +#define wmb()	__asm__ __volatile__ ("": : :"memory")
  10.142 +#elif defined(__x86_64__)
  10.143 +#define mb()    __asm__ __volatile__ ("mfence":::"memory")
  10.144 +#define rmb()   __asm__ __volatile__ ("lfence":::"memory")
  10.145 +#define wmb()	__asm__ __volatile__ ("sfence" ::: "memory") /* From CONFIG_UNORDERED_IO (linux) */
  10.146 +#endif
  10.147 +
  10.148 +
  10.149 +#define LOCK_PREFIX ""
  10.150 +#define LOCK ""
  10.151 +#define ADDR (*(volatile long *) addr)
  10.152 +/*
  10.153 + * Make sure gcc doesn't try to be clever and move things around
  10.154 + * on us. We need to use _exactly_ the address the user gave us,
  10.155 + * not some alias that contains the same information.
  10.156 + */
  10.157 +typedef struct { volatile int counter; } atomic_t;
  10.158 +
  10.159 +
  10.160 +/************************** i386 *******************************/
  10.161 +#if defined (__i386__)
  10.162 +
  10.163 +#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
  10.164 +struct __xchg_dummy { unsigned long a[100]; };
  10.165 +#define __xg(x) ((struct __xchg_dummy *)(x))
  10.166 +static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  10.167 +{
  10.168 +	switch (size) {
  10.169 +		case 1:
  10.170 +			__asm__ __volatile__("xchgb %b0,%1"
  10.171 +				:"=q" (x)
  10.172 +				:"m" (*__xg(ptr)), "0" (x)
  10.173 +				:"memory");
  10.174 +			break;
  10.175 +		case 2:
  10.176 +			__asm__ __volatile__("xchgw %w0,%1"
  10.177 +				:"=r" (x)
  10.178 +				:"m" (*__xg(ptr)), "0" (x)
  10.179 +				:"memory");
  10.180 +			break;
  10.181 +		case 4:
  10.182 +			__asm__ __volatile__("xchgl %0,%1"
  10.183 +				:"=r" (x)
  10.184 +				:"m" (*__xg(ptr)), "0" (x)
  10.185 +				:"memory");
  10.186 +			break;
  10.187 +	}
  10.188 +	return x;
  10.189 +}
  10.190 +
  10.191 +/**
  10.192 + * test_and_clear_bit - Clear a bit and return its old value
  10.193 + * @nr: Bit to clear
  10.194 + * @addr: Address to count from
  10.195 + *
  10.196 + * This operation is atomic and cannot be reordered.
  10.197 + * It can be reorderdered on other architectures other than x86.
  10.198 + * It also implies a memory barrier.
  10.199 + */
  10.200 +static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
  10.201 +{
  10.202 +	int oldbit;
  10.203 +
  10.204 +	__asm__ __volatile__( LOCK
  10.205 +		"btrl %2,%1\n\tsbbl %0,%0"
  10.206 +		:"=r" (oldbit),"=m" (ADDR)
  10.207 +		:"Ir" (nr) : "memory");
  10.208 +	return oldbit;
  10.209 +}
  10.210 +
  10.211 +static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
  10.212 +{
  10.213 +	return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
  10.214 +}
  10.215 +
  10.216 +static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
  10.217 +{
  10.218 +	int oldbit;
  10.219 +
  10.220 +	__asm__ __volatile__(
  10.221 +		"btl %2,%1\n\tsbbl %0,%0"
  10.222 +		:"=r" (oldbit)
  10.223 +		:"m" (ADDR),"Ir" (nr));
  10.224 +	return oldbit;
  10.225 +}
  10.226 +
  10.227 +#define test_bit(nr,addr) \
  10.228 +(__builtin_constant_p(nr) ? \
  10.229 + constant_test_bit((nr),(addr)) : \
  10.230 + variable_test_bit((nr),(addr)))
  10.231 +
  10.232 +/**
  10.233 + * set_bit - Atomically set a bit in memory
  10.234 + * @nr: the bit to set
  10.235 + * @addr: the address to start counting from
  10.236 + *
  10.237 + * This function is atomic and may not be reordered.  See __set_bit()
  10.238 + * if you do not require the atomic guarantees.
  10.239 + *
  10.240 + * Note: there are no guarantees that this function will not be reordered
  10.241 + * on non x86 architectures, so if you are writting portable code,
  10.242 + * make sure not to rely on its reordering guarantees.
  10.243 + *
  10.244 + * Note that @nr may be almost arbitrarily large; this function is not
  10.245 + * restricted to acting on a single-word quantity.
  10.246 + */
  10.247 +static inline void set_bit(int nr, volatile unsigned long * addr)
  10.248 +{
  10.249 +	__asm__ __volatile__( LOCK
  10.250 +		"btsl %1,%0"
  10.251 +		:"=m" (ADDR)
  10.252 +		:"Ir" (nr));
  10.253 +}
  10.254 +
  10.255 +/**
  10.256 + * clear_bit - Clears a bit in memory
  10.257 + * @nr: Bit to clear
  10.258 + * @addr: Address to start counting from
  10.259 + *
  10.260 + * clear_bit() is atomic and may not be reordered.  However, it does
  10.261 + * not contain a memory barrier, so if it is used for locking purposes,
  10.262 + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  10.263 + * in order to ensure changes are visible on other processors.
  10.264 + */
  10.265 +static inline void clear_bit(int nr, volatile unsigned long * addr)
  10.266 +{
  10.267 +	__asm__ __volatile__( LOCK
  10.268 +		"btrl %1,%0"
  10.269 +		:"=m" (ADDR)
  10.270 +		:"Ir" (nr));
  10.271 +}
  10.272 +
  10.273 +/**
  10.274 + * __ffs - find first bit in word.
  10.275 + * @word: The word to search
  10.276 + *
  10.277 + * Undefined if no bit exists, so code should check against 0 first.
  10.278 + */
  10.279 +static inline unsigned long __ffs(unsigned long word)
  10.280 +{
  10.281 +	__asm__("bsfl %1,%0"
  10.282 +		:"=r" (word)
  10.283 +		:"rm" (word));
  10.284 +	return word;
  10.285 +}
  10.286 +
  10.287 +
  10.288 +/*
  10.289 + * These have to be done with inline assembly: that way the bit-setting
  10.290 + * is guaranteed to be atomic. All bit operations return 0 if the bit
  10.291 + * was cleared before the operation and != 0 if it was not.
  10.292 + *
  10.293 + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  10.294 + */
  10.295 +#define ADDR (*(volatile long *) addr)
  10.296 +
  10.297 +#define rdtscll(val) \
  10.298 +     __asm__ __volatile__("rdtsc" : "=A" (val))
  10.299 +
  10.300 +
  10.301 +
  10.302 +#elif defined(__x86_64__)/* ifdef __i386__ */
  10.303 +/************************** x86_84 *******************************/
  10.304 +
  10.305 +#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
  10.306 +#define __xg(x) ((volatile long *)(x))
  10.307 +static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  10.308 +{
  10.309 +	switch (size) {
  10.310 +		case 1:
  10.311 +			__asm__ __volatile__("xchgb %b0,%1"
  10.312 +				:"=q" (x)
  10.313 +				:"m" (*__xg(ptr)), "0" (x)
  10.314 +				:"memory");
  10.315 +			break;
  10.316 +		case 2:
  10.317 +			__asm__ __volatile__("xchgw %w0,%1"
  10.318 +				:"=r" (x)
  10.319 +				:"m" (*__xg(ptr)), "0" (x)
  10.320 +				:"memory");
  10.321 +			break;
  10.322 +		case 4:
  10.323 +			__asm__ __volatile__("xchgl %k0,%1"
  10.324 +				:"=r" (x)
  10.325 +				:"m" (*__xg(ptr)), "0" (x)
  10.326 +				:"memory");
  10.327 +			break;
  10.328 +		case 8:
  10.329 +			__asm__ __volatile__("xchgq %0,%1"
  10.330 +				:"=r" (x)
  10.331 +				:"m" (*__xg(ptr)), "0" (x)
  10.332 +				:"memory");
  10.333 +			break;
  10.334 +	}
  10.335 +	return x;
  10.336 +}
  10.337 +
  10.338 +/**
  10.339 + * test_and_clear_bit - Clear a bit and return its old value
  10.340 + * @nr: Bit to clear
  10.341 + * @addr: Address to count from
  10.342 + *
  10.343 + * This operation is atomic and cannot be reordered.  
  10.344 + * It also implies a memory barrier.
  10.345 + */
  10.346 +static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  10.347 +{
  10.348 +	int oldbit;
  10.349 +
  10.350 +	__asm__ __volatile__( LOCK_PREFIX
  10.351 +		"btrl %2,%1\n\tsbbl %0,%0"
  10.352 +		:"=r" (oldbit),"=m" (ADDR)
  10.353 +		:"dIr" (nr) : "memory");
  10.354 +	return oldbit;
  10.355 +}
  10.356 +
  10.357 +static __inline__ int constant_test_bit(int nr, const volatile void * addr)
  10.358 +{
  10.359 +	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  10.360 +}
  10.361 +
  10.362 +static __inline__ int variable_test_bit(int nr, volatile const void * addr)
  10.363 +{
  10.364 +	int oldbit;
  10.365 +
  10.366 +	__asm__ __volatile__(
  10.367 +		"btl %2,%1\n\tsbbl %0,%0"
  10.368 +		:"=r" (oldbit)
  10.369 +		:"m" (ADDR),"dIr" (nr));
  10.370 +	return oldbit;
  10.371 +}
  10.372 +
  10.373 +#define test_bit(nr,addr) \
  10.374 +(__builtin_constant_p(nr) ? \
  10.375 + constant_test_bit((nr),(addr)) : \
  10.376 + variable_test_bit((nr),(addr)))
  10.377 +
  10.378 +
  10.379 +/**
  10.380 + * set_bit - Atomically set a bit in memory
  10.381 + * @nr: the bit to set
  10.382 + * @addr: the address to start counting from
  10.383 + *
  10.384 + * This function is atomic and may not be reordered.  See __set_bit()
  10.385 + * if you do not require the atomic guarantees.
  10.386 + * Note that @nr may be almost arbitrarily large; this function is not
  10.387 + * restricted to acting on a single-word quantity.
  10.388 + */
  10.389 +static __inline__ void set_bit(int nr, volatile void * addr)
  10.390 +{
  10.391 +	__asm__ __volatile__( LOCK_PREFIX
  10.392 +		"btsl %1,%0"
  10.393 +		:"=m" (ADDR)
  10.394 +		:"dIr" (nr) : "memory");
  10.395 +}
  10.396 +
  10.397 +/**
  10.398 + * clear_bit - Clears a bit in memory
  10.399 + * @nr: Bit to clear
  10.400 + * @addr: Address to start counting from
  10.401 + *
  10.402 + * clear_bit() is atomic and may not be reordered.  However, it does
  10.403 + * not contain a memory barrier, so if it is used for locking purposes,
  10.404 + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  10.405 + * in order to ensure changes are visible on other processors.
  10.406 + */
  10.407 +static __inline__ void clear_bit(int nr, volatile void * addr)
  10.408 +{
  10.409 +	__asm__ __volatile__( LOCK_PREFIX
  10.410 +		"btrl %1,%0"
  10.411 +		:"=m" (ADDR)
  10.412 +		:"dIr" (nr));
  10.413 +}
  10.414 +
  10.415 +/**
  10.416 + * __ffs - find first bit in word.
  10.417 + * @word: The word to search
  10.418 + *
  10.419 + * Undefined if no bit exists, so code should check against 0 first.
  10.420 + */
  10.421 +static __inline__ unsigned long __ffs(unsigned long word)
  10.422 +{
  10.423 +	__asm__("bsfq %1,%0"
  10.424 +		:"=r" (word)
  10.425 +		:"rm" (word));
  10.426 +	return word;
  10.427 +}
  10.428 +
  10.429 +#define ADDR (*(volatile long *) addr)
  10.430 +
  10.431 +#define rdtscll(val) do { \
  10.432 +     unsigned int __a,__d; \
  10.433 +     asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
  10.434 +     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
  10.435 +} while(0)
  10.436 +
  10.437 +#define wrmsr(msr,val1,val2) \
  10.438 +      __asm__ __volatile__("wrmsr" \
  10.439 +                           : /* no outputs */ \
  10.440 +                           : "c" (msr), "a" (val1), "d" (val2))
  10.441 +
  10.442 +#define wrmsrl(msr,val) wrmsr(msr,(u32)((u64)(val)),((u64)(val))>>32)
  10.443 +
  10.444 +
  10.445 +#else /* ifdef __x86_64__ */
  10.446 +#error "Unsupported architecture"
  10.447 +#endif
  10.448 +
  10.449 +
  10.450 +/********************* common i386 and x86_64  ****************************/
  10.451 +struct __synch_xchg_dummy { unsigned long a[100]; };
  10.452 +#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
  10.453 +
  10.454 +#define synch_cmpxchg(ptr, old, new) \
  10.455 +((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
  10.456 +                                     (unsigned long)(old), \
  10.457 +                                     (unsigned long)(new), \
  10.458 +                                     sizeof(*(ptr))))
  10.459 +
  10.460 +static inline unsigned long __synch_cmpxchg(volatile void *ptr,
  10.461 +        unsigned long old,
  10.462 +        unsigned long new, int size)
  10.463 +{
  10.464 +    unsigned long prev;
  10.465 +    switch (size) {
  10.466 +        case 1:
  10.467 +            __asm__ __volatile__("lock; cmpxchgb %b1,%2"
  10.468 +                    : "=a"(prev)
  10.469 +                    : "q"(new), "m"(*__synch_xg(ptr)),
  10.470 +                    "0"(old)
  10.471 +                    : "memory");
  10.472 +            return prev;
  10.473 +        case 2:
  10.474 +            __asm__ __volatile__("lock; cmpxchgw %w1,%2"
  10.475 +                    : "=a"(prev)
  10.476 +                    : "r"(new), "m"(*__synch_xg(ptr)),
  10.477 +                    "0"(old)
  10.478 +                    : "memory");
  10.479 +            return prev;
  10.480 +#ifdef __x86_64__
  10.481 +        case 4:
  10.482 +            __asm__ __volatile__("lock; cmpxchgl %k1,%2"
  10.483 +                    : "=a"(prev)
  10.484 +                    : "r"(new), "m"(*__synch_xg(ptr)),
  10.485 +                    "0"(old)
  10.486 +                    : "memory");
  10.487 +            return prev;
  10.488 +        case 8:
  10.489 +            __asm__ __volatile__("lock; cmpxchgq %1,%2"
  10.490 +                    : "=a"(prev)
  10.491 +                    : "r"(new), "m"(*__synch_xg(ptr)),
  10.492 +                    "0"(old)
  10.493 +                    : "memory");
  10.494 +            return prev;
  10.495 +#else
  10.496 +        case 4:
  10.497 +            __asm__ __volatile__("lock; cmpxchgl %1,%2"
  10.498 +                    : "=a"(prev)
  10.499 +                    : "r"(new), "m"(*__synch_xg(ptr)),
  10.500 +                    "0"(old)
  10.501 +                    : "memory");
  10.502 +            return prev;
  10.503 +#endif
  10.504 +    }
  10.505 +    return old;
  10.506 +}
  10.507 +
  10.508 +
  10.509 +static __inline__ void synch_set_bit(int nr, volatile void * addr)
  10.510 +{
  10.511 +    __asm__ __volatile__ ( 
  10.512 +        "lock btsl %1,%0"
  10.513 +        : "=m" (ADDR) : "Ir" (nr) : "memory" );
  10.514 +}
  10.515 +
  10.516 +static __inline__ void synch_clear_bit(int nr, volatile void * addr)
  10.517 +{
  10.518 +    __asm__ __volatile__ (
  10.519 +        "lock btrl %1,%0"
  10.520 +        : "=m" (ADDR) : "Ir" (nr) : "memory" );
  10.521 +}
  10.522 +
  10.523 +static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
  10.524 +{
  10.525 +    int oldbit;
  10.526 +    __asm__ __volatile__ (
  10.527 +        "lock btsl %2,%1\n\tsbbl %0,%0"
  10.528 +        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
  10.529 +    return oldbit;
  10.530 +}
  10.531 +
  10.532 +static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
  10.533 +{
  10.534 +    int oldbit;
  10.535 +    __asm__ __volatile__ (
  10.536 +        "lock btrl %2,%1\n\tsbbl %0,%0"
  10.537 +        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
  10.538 +    return oldbit;
  10.539 +}
  10.540 +
  10.541 +static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
  10.542 +{
  10.543 +    return ((1UL << (nr & 31)) & 
  10.544 +            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  10.545 +}
  10.546 +
  10.547 +static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
  10.548 +{
  10.549 +    int oldbit;
  10.550 +    __asm__ __volatile__ (
  10.551 +        "btl %2,%1\n\tsbbl %0,%0"
  10.552 +        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
  10.553 +    return oldbit;
  10.554 +}
  10.555 +
  10.556 +#define synch_test_bit(nr,addr) \
  10.557 +(__builtin_constant_p(nr) ? \
  10.558 + synch_const_test_bit((nr),(addr)) : \
  10.559 + synch_var_test_bit((nr),(addr)))
  10.560 +
  10.561 +
  10.562 +
  10.563 +#endif /* not assembly */
  10.564 +#endif /* _OS_H_ */
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/extras/mini-os/include/x86/spinlock.h	Thu Aug 10 10:43:20 2006 +0100
    11.3 @@ -0,0 +1,121 @@
    11.4 +#ifndef __ASM_SPINLOCK_H
    11.5 +#define __ASM_SPINLOCK_H
    11.6 +
    11.7 +#include <lib.h>
    11.8 +
    11.9 +/*
   11.10 + * Your basic SMP spinlocks, allowing only a single CPU anywhere
   11.11 + */
   11.12 +
   11.13 +typedef struct {
   11.14 +	volatile unsigned int slock;
   11.15 +} spinlock_t;
   11.16 +
   11.17 +#define SPINLOCK_MAGIC	0xdead4ead
   11.18 +
   11.19 +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
   11.20 +
   11.21 +#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
   11.22 +
   11.23 +/*
   11.24 + * Simple spin lock operations.  There are two variants, one clears IRQ's
   11.25 + * on the local processor, one does not.
   11.26 + *
   11.27 + * We make no fairness assumptions. They have a cost.
   11.28 + */
   11.29 +
   11.30 +#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->slock) <= 0)
   11.31 +#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
   11.32 +
   11.33 +#define spin_lock_string \
   11.34 +        "1:\n" \
   11.35 +	LOCK \
   11.36 +	"decb %0\n\t" \
   11.37 +	"jns 3f\n" \
   11.38 +	"2:\t" \
   11.39 +	"rep;nop\n\t" \
   11.40 +	"cmpb $0,%0\n\t" \
   11.41 +	"jle 2b\n\t" \
   11.42 +	"jmp 1b\n" \
   11.43 +	"3:\n\t"
   11.44 +
   11.45 +#define spin_lock_string_flags \
   11.46 +        "1:\n" \
   11.47 +	LOCK \
   11.48 +	"decb %0\n\t" \
   11.49 +	"jns 4f\n\t" \
   11.50 +	"2:\t" \
   11.51 +	"testl $0x200, %1\n\t" \
   11.52 +	"jz 3f\n\t" \
   11.53 +	"#sti\n\t" \
   11.54 +	"3:\t" \
   11.55 +	"rep;nop\n\t" \
   11.56 +	"cmpb $0, %0\n\t" \
   11.57 +	"jle 3b\n\t" \
   11.58 +	"#cli\n\t" \
   11.59 +	"jmp 1b\n" \
   11.60 +	"4:\n\t"
   11.61 +
   11.62 +/*
   11.63 + * This works. Despite all the confusion.
   11.64 + * (except on PPro SMP or if we are using OOSTORE)
   11.65 + * (PPro errata 66, 92)
   11.66 + */
   11.67 +
   11.68 +#define spin_unlock_string \
   11.69 +	"xchgb %b0, %1" \
   11.70 +		:"=q" (oldval), "=m" (lock->slock) \
   11.71 +		:"0" (oldval) : "memory"
   11.72 +
   11.73 +static inline void _raw_spin_unlock(spinlock_t *lock)
   11.74 +{
   11.75 +	char oldval = 1;
   11.76 +	__asm__ __volatile__(
   11.77 +		spin_unlock_string
   11.78 +	);
   11.79 +}
   11.80 +
   11.81 +static inline int _raw_spin_trylock(spinlock_t *lock)
   11.82 +{
   11.83 +	char oldval;
   11.84 +	__asm__ __volatile__(
   11.85 +		"xchgb %b0,%1\n"
   11.86 +		:"=q" (oldval), "=m" (lock->slock)
   11.87 +		:"0" (0) : "memory");
   11.88 +	return oldval > 0;
   11.89 +}
   11.90 +
   11.91 +static inline void _raw_spin_lock(spinlock_t *lock)
   11.92 +{
   11.93 +	__asm__ __volatile__(
   11.94 +		spin_lock_string
   11.95 +		:"=m" (lock->slock) : : "memory");
   11.96 +}
   11.97 +
   11.98 +static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
   11.99 +{
  11.100 +	__asm__ __volatile__(
  11.101 +		spin_lock_string_flags
  11.102 +		:"=m" (lock->slock) : "r" (flags) : "memory");
  11.103 +}
  11.104 +
  11.105 +#define _spin_trylock(lock)     ({_raw_spin_trylock(lock) ? \
  11.106 +                                1 : ({ 0;});})
  11.107 +
  11.108 +#define _spin_lock(lock)        \
  11.109 +do {                            \
  11.110 +        _raw_spin_lock(lock);   \
  11.111 +} while(0)
  11.112 +
  11.113 +#define _spin_unlock(lock)      \
  11.114 +do {                            \
  11.115 +        _raw_spin_unlock(lock); \
  11.116 +} while (0)
  11.117 +
  11.118 +
  11.119 +#define spin_lock(lock)       _spin_lock(lock)
  11.120 +#define spin_unlock(lock)       _spin_unlock(lock)
  11.121 +
  11.122 +#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
  11.123 +
  11.124 +#endif
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/extras/mini-os/include/x86/traps.h	Thu Aug 10 10:43:20 2006 +0100
    12.3 @@ -0,0 +1,73 @@
    12.4 +/* 
    12.5 + ****************************************************************************
    12.6 + * (C) 2005 - Grzegorz Milos - Intel Reseach Cambridge
    12.7 + ****************************************************************************
    12.8 + *
    12.9 + *        File: traps.h
   12.10 + *      Author: Grzegorz Milos (gm281@cam.ac.uk)
   12.11 + *              
   12.12 + *        Date: Jun 2005
   12.13 + * 
   12.14 + * Environment: Xen Minimal OS
   12.15 + * Description: Deals with traps
   12.16 + *
   12.17 + ****************************************************************************
   12.18 + */
   12.19 +
   12.20 +#ifndef _TRAPS_H_
   12.21 +#define _TRAPS_H_
   12.22 +
   12.23 +#ifdef __i386__
   12.24 +struct pt_regs {
   12.25 +	long ebx;
   12.26 +	long ecx;
   12.27 +	long edx;
   12.28 +	long esi;
   12.29 +	long edi;
   12.30 +	long ebp;
   12.31 +	long eax;
   12.32 +	int  xds;
   12.33 +	int  xes;
   12.34 +	long orig_eax;
   12.35 +	long eip;
   12.36 +	int  xcs;
   12.37 +	long eflags;
   12.38 +	long esp;
   12.39 +	int  xss;
   12.40 +};
   12.41 +#elif __x86_64__
   12.42 +
   12.43 +struct pt_regs {
   12.44 +	unsigned long r15;
   12.45 +	unsigned long r14;
   12.46 +	unsigned long r13;
   12.47 +	unsigned long r12;
   12.48 +	unsigned long rbp;
   12.49 +	unsigned long rbx;
   12.50 +/* arguments: non interrupts/non tracing syscalls only save upto here*/
   12.51 + 	unsigned long r11;
   12.52 +	unsigned long r10;	
   12.53 +	unsigned long r9;
   12.54 +	unsigned long r8;
   12.55 +	unsigned long rax;
   12.56 +	unsigned long rcx;
   12.57 +	unsigned long rdx;
   12.58 +	unsigned long rsi;
   12.59 +	unsigned long rdi;
   12.60 +	unsigned long orig_rax;
   12.61 +/* end of arguments */ 	
   12.62 +/* cpu exception frame or undefined */
   12.63 +	unsigned long rip;
   12.64 +	unsigned long cs;
   12.65 +	unsigned long eflags; 
   12.66 +	unsigned long rsp; 
   12.67 +	unsigned long ss;
   12.68 +/* top of stack page */ 
   12.69 +};
   12.70 +
   12.71 +
   12.72 +#endif
   12.73 +
   12.74 +void dump_regs(struct pt_regs *regs);
   12.75 +
   12.76 +#endif /* _TRAPS_H_ */
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h	Thu Aug 10 10:43:20 2006 +0100
    13.3 @@ -0,0 +1,326 @@
    13.4 +/******************************************************************************
    13.5 + * hypercall-x86_32.h
    13.6 + * 
    13.7 + * Copied from XenLinux.
    13.8 + * 
    13.9 + * Copyright (c) 2002-2004, K A Fraser
   13.10 + * 
   13.11 + * This file may be distributed separately from the Linux kernel, or
   13.12 + * incorporated into other software packages, subject to the following license:
   13.13 + * 
   13.14 + * Permission is hereby granted, free of charge, to any person obtaining a copy
   13.15 + * of this source file (the "Software"), to deal in the Software without
   13.16 + * restriction, including without limitation the rights to use, copy, modify,
   13.17 + * merge, publish, distribute, sublicense, and/or sell copies of the Software,
   13.18 + * and to permit persons to whom the Software is furnished to do so, subject to
   13.19 + * the following conditions:
   13.20 + * 
   13.21 + * The above copyright notice and this permission notice shall be included in
   13.22 + * all copies or substantial portions of the Software.
   13.23 + * 
   13.24 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   13.25 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   13.26 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   13.27 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   13.28 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   13.29 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
   13.30 + * IN THE SOFTWARE.
   13.31 + */
   13.32 +
   13.33 +#ifndef __HYPERCALL_X86_32_H__
   13.34 +#define __HYPERCALL_X86_32_H__
   13.35 +
   13.36 +#include <xen/xen.h>
   13.37 +#include <xen/sched.h>
   13.38 +#include <xen/nmi.h>
   13.39 +#include <mm.h>
   13.40 +
   13.41 +#define __STR(x) #x
   13.42 +#define STR(x) __STR(x)
   13.43 +
   13.44 +extern char hypercall_page[PAGE_SIZE];
   13.45 +
   13.46 +#define _hypercall0(type, name)			\
   13.47 +({						\
   13.48 +	long __res;				\
   13.49 +	asm volatile (				\
   13.50 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   13.51 +		: "=a" (__res)			\
   13.52 +		:				\
   13.53 +		: "memory" );			\
   13.54 +	(type)__res;				\
   13.55 +})
   13.56 +
   13.57 +#define _hypercall1(type, name, a1)				\
   13.58 +({								\
   13.59 +	long __res, __ign1;					\
   13.60 +	asm volatile (						\
   13.61 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   13.62 +		: "=a" (__res), "=b" (__ign1)			\
   13.63 +		: "1" ((long)(a1))				\
   13.64 +		: "memory" );					\
   13.65 +	(type)__res;						\
   13.66 +})
   13.67 +
   13.68 +#define _hypercall2(type, name, a1, a2)				\
   13.69 +({								\
   13.70 +	long __res, __ign1, __ign2;				\
   13.71 +	asm volatile (						\
   13.72 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   13.73 +		: "=a" (__res), "=b" (__ign1), "=c" (__ign2)	\
   13.74 +		: "1" ((long)(a1)), "2" ((long)(a2))		\
   13.75 +		: "memory" );					\
   13.76 +	(type)__res;						\
   13.77 +})
   13.78 +
   13.79 +#define _hypercall3(type, name, a1, a2, a3)			\
   13.80 +({								\
   13.81 +	long __res, __ign1, __ign2, __ign3;			\
   13.82 +	asm volatile (						\
   13.83 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   13.84 +		: "=a" (__res), "=b" (__ign1), "=c" (__ign2), 	\
   13.85 +		"=d" (__ign3)					\
   13.86 +		: "1" ((long)(a1)), "2" ((long)(a2)),		\
   13.87 +		"3" ((long)(a3))				\
   13.88 +		: "memory" );					\
   13.89 +	(type)__res;						\
   13.90 +})
   13.91 +
   13.92 +#define _hypercall4(type, name, a1, a2, a3, a4)			\
   13.93 +({								\
   13.94 +	long __res, __ign1, __ign2, __ign3, __ign4;		\
   13.95 +	asm volatile (						\
   13.96 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   13.97 +		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
   13.98 +		"=d" (__ign3), "=S" (__ign4)			\
   13.99 +		: "1" ((long)(a1)), "2" ((long)(a2)),		\
  13.100 +		"3" ((long)(a3)), "4" ((long)(a4))		\
  13.101 +		: "memory" );					\
  13.102 +	(type)__res;						\
  13.103 +})
  13.104 +
  13.105 +#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
  13.106 +({								\
  13.107 +	long __res, __ign1, __ign2, __ign3, __ign4, __ign5;	\
  13.108 +	asm volatile (						\
  13.109 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
  13.110 +		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
  13.111 +		"=d" (__ign3), "=S" (__ign4), "=D" (__ign5)	\
  13.112 +		: "1" ((long)(a1)), "2" ((long)(a2)),		\
  13.113 +		"3" ((long)(a3)), "4" ((long)(a4)),		\
  13.114 +		"5" ((long)(a5))				\
  13.115 +		: "memory" );					\
  13.116 +	(type)__res;						\
  13.117 +})
  13.118 +
  13.119 +static inline int
  13.120 +HYPERVISOR_set_trap_table(
  13.121 +	trap_info_t *table)
  13.122 +{
  13.123 +	return _hypercall1(int, set_trap_table, table);
  13.124 +}
  13.125 +
  13.126 +static inline int
  13.127 +HYPERVISOR_mmu_update(
  13.128 +	mmu_update_t *req, int count, int *success_count, domid_t domid)
  13.129 +{
  13.130 +	return _hypercall4(int, mmu_update, req, count, success_count, domid);
  13.131 +}
  13.132 +
  13.133 +static inline int
  13.134 +HYPERVISOR_mmuext_op(
  13.135 +	struct mmuext_op *op, int count, int *success_count, domid_t domid)
  13.136 +{
  13.137 +	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
  13.138 +}
  13.139 +
  13.140 +static inline int
  13.141 +HYPERVISOR_set_gdt(
  13.142 +	unsigned long *frame_list, int entries)
  13.143 +{
  13.144 +	return _hypercall2(int, set_gdt, frame_list, entries);
  13.145 +}
  13.146 +
  13.147 +static inline int
  13.148 +HYPERVISOR_stack_switch(
  13.149 +	unsigned long ss, unsigned long esp)
  13.150 +{
  13.151 +	return _hypercall2(int, stack_switch, ss, esp);
  13.152 +}
  13.153 +
  13.154 +static inline int
  13.155 +HYPERVISOR_set_callbacks(
  13.156 +	unsigned long event_selector, unsigned long event_address,
  13.157 +	unsigned long failsafe_selector, unsigned long failsafe_address)
  13.158 +{
  13.159 +	return _hypercall4(int, set_callbacks,
  13.160 +			   event_selector, event_address,
  13.161 +			   failsafe_selector, failsafe_address);
  13.162 +}
  13.163 +
  13.164 +static inline int
  13.165 +HYPERVISOR_fpu_taskswitch(
  13.166 +	int set)
  13.167 +{
  13.168 +	return _hypercall1(int, fpu_taskswitch, set);
  13.169 +}
  13.170 +
  13.171 +static inline int
  13.172 +HYPERVISOR_sched_op(
  13.173 +	int cmd, unsigned long arg)
  13.174 +{
  13.175 +	return _hypercall2(int, sched_op, cmd, arg);
  13.176 +}
  13.177 +
  13.178 +static inline long
  13.179 +HYPERVISOR_set_timer_op(
  13.180 +	u64 timeout)
  13.181 +{
  13.182 +	unsigned long timeout_hi = (unsigned long)(timeout>>32);
  13.183 +	unsigned long timeout_lo = (unsigned long)timeout;
  13.184 +	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
  13.185 +}
  13.186 +
  13.187 +static inline int
  13.188 +HYPERVISOR_dom0_op(
  13.189 +	dom0_op_t *dom0_op)
  13.190 +{
  13.191 +	dom0_op->interface_version = DOM0_INTERFACE_VERSION;
  13.192 +	return _hypercall1(int, dom0_op, dom0_op);
  13.193 +}
  13.194 +
  13.195 +static inline int
  13.196 +HYPERVISOR_set_debugreg(
  13.197 +	int reg, unsigned long value)
  13.198 +{
  13.199 +	return _hypercall2(int, set_debugreg, reg, value);
  13.200 +}
  13.201 +
  13.202 +static inline unsigned long
  13.203 +HYPERVISOR_get_debugreg(
  13.204 +	int reg)
  13.205 +{
  13.206 +	return _hypercall1(unsigned long, get_debugreg, reg);
  13.207 +}
  13.208 +
  13.209 +static inline int
  13.210 +HYPERVISOR_update_descriptor(
  13.211 +	u64 ma, u64 desc)
  13.212 +{
  13.213 +	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
  13.214 +}
  13.215 +
  13.216 +static inline int
  13.217 +HYPERVISOR_memory_op(
  13.218 +	unsigned int cmd, void *arg)
  13.219 +{
  13.220 +	return _hypercall2(int, memory_op, cmd, arg);
  13.221 +}
  13.222 +
  13.223 +static inline int
  13.224 +HYPERVISOR_multicall(
  13.225 +	void *call_list, int nr_calls)
  13.226 +{
  13.227 +	return _hypercall2(int, multicall, call_list, nr_calls);
  13.228 +}
  13.229 +
  13.230 +static inline int
  13.231 +HYPERVISOR_update_va_mapping(
  13.232 +	unsigned long va, pte_t new_val, unsigned long flags)
  13.233 +{
  13.234 +	unsigned long pte_hi = 0;
  13.235 +#ifdef CONFIG_X86_PAE
  13.236 +	pte_hi = new_val.pte_high;
  13.237 +#endif
  13.238 +	return _hypercall4(int, update_va_mapping, va,
  13.239 +			   new_val.pte_low, pte_hi, flags);
  13.240 +}
  13.241 +
  13.242 +static inline int
  13.243 +HYPERVISOR_event_channel_op(
  13.244 +	void *op)
  13.245 +{
  13.246 +	return _hypercall1(int, event_channel_op, op);
  13.247 +}
  13.248 +
  13.249 +static inline int
  13.250 +HYPERVISOR_xen_version(
  13.251 +	int cmd, void *arg)
  13.252 +{
  13.253 +	return _hypercall2(int, xen_version, cmd, arg);
  13.254 +}
  13.255 +
  13.256 +static inline int
  13.257 +HYPERVISOR_console_io(
  13.258 +	int cmd, int count, char *str)
  13.259 +{
  13.260 +	return _hypercall3(int, console_io, cmd, count, str);
  13.261 +}
  13.262 +
  13.263 +static inline int
  13.264 +HYPERVISOR_physdev_op(
  13.265 +	void *physdev_op)
  13.266 +{
  13.267 +	return _hypercall1(int, physdev_op, physdev_op);
  13.268 +}
  13.269 +
  13.270 +static inline int
  13.271 +HYPERVISOR_grant_table_op(
  13.272 +	unsigned int cmd, void *uop, unsigned int count)
  13.273 +{
  13.274 +	return _hypercall3(int, grant_table_op, cmd, uop, count);
  13.275 +}
  13.276 +
  13.277 +static inline int
  13.278 +HYPERVISOR_update_va_mapping_otherdomain(
  13.279 +	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
  13.280 +{
  13.281 +	unsigned long pte_hi = 0;
  13.282 +#ifdef CONFIG_X86_PAE
  13.283 +	pte_hi = new_val.pte_high;
  13.284 +#endif
  13.285 +	return _hypercall5(int, update_va_mapping_otherdomain, va,
  13.286 +			   new_val.pte_low, pte_hi, flags, domid);
  13.287 +}
  13.288 +
  13.289 +static inline int
  13.290 +HYPERVISOR_vm_assist(
  13.291 +	unsigned int cmd, unsigned int type)
  13.292 +{
  13.293 +	return _hypercall2(int, vm_assist, cmd, type);
  13.294 +}
  13.295 +
  13.296 +static inline int
  13.297 +HYPERVISOR_vcpu_op(
  13.298 +	int cmd, int vcpuid, void *extra_args)
  13.299 +{
  13.300 +	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
  13.301 +}
  13.302 +
  13.303 +static inline int
  13.304 +HYPERVISOR_suspend(
  13.305 +	unsigned long srec)
  13.306 +{
  13.307 +	return _hypercall3(int, sched_op, SCHEDOP_shutdown,
  13.308 +			   SHUTDOWN_suspend, srec);
  13.309 +}
  13.310 +
  13.311 +static inline int
  13.312 +HYPERVISOR_nmi_op(
  13.313 +	unsigned long op,
  13.314 +	unsigned long arg)
  13.315 +{
  13.316 +	return _hypercall2(int, nmi_op, op, arg);
  13.317 +}
  13.318 +
  13.319 +#endif /* __HYPERCALL_X86_32_H__ */
  13.320 +
  13.321 +/*
  13.322 + * Local variables:
  13.323 + *  c-file-style: "linux"
  13.324 + *  indent-tabs-mode: t
  13.325 + *  c-indent-level: 8
  13.326 + *  c-basic-offset: 8
  13.327 + *  tab-width: 8
  13.328 + * End:
  13.329 + */
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h	Thu Aug 10 10:43:20 2006 +0100
    14.3 @@ -0,0 +1,326 @@
    14.4 +/******************************************************************************
    14.5 + * hypercall-x86_64.h
    14.6 + * 
    14.7 + * Copied from XenLinux.
    14.8 + * 
    14.9 + * Copyright (c) 2002-2004, K A Fraser
   14.10 + * 
   14.11 + * 64-bit updates:
   14.12 + *   Benjamin Liu <benjamin.liu@intel.com>
   14.13 + *   Jun Nakajima <jun.nakajima@intel.com>
   14.14 + * 
   14.15 + * This file may be distributed separately from the Linux kernel, or
   14.16 + * incorporated into other software packages, subject to the following license:
   14.17 + * 
   14.18 + * Permission is hereby granted, free of charge, to any person obtaining a copy
   14.19 + * of this source file (the "Software"), to deal in the Software without
   14.20 + * restriction, including without limitation the rights to use, copy, modify,
   14.21 + * merge, publish, distribute, sublicense, and/or sell copies of the Software,
   14.22 + * and to permit persons to whom the Software is furnished to do so, subject to
   14.23 + * the following conditions:
   14.24 + * 
   14.25 + * The above copyright notice and this permission notice shall be included in
   14.26 + * all copies or substantial portions of the Software.
   14.27 + * 
   14.28 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   14.29 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   14.30 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   14.31 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   14.32 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   14.33 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
   14.34 + * IN THE SOFTWARE.
   14.35 + */
   14.36 +
   14.37 +#ifndef __HYPERCALL_X86_64_H__
   14.38 +#define __HYPERCALL_X86_64_H__
   14.39 +
   14.40 +#include <xen/xen.h>
   14.41 +#include <xen/sched.h>
   14.42 +#include <mm.h>
   14.43 +
   14.44 +#define __STR(x) #x
   14.45 +#define STR(x) __STR(x)
   14.46 +
   14.47 +extern char hypercall_page[PAGE_SIZE];
   14.48 +
   14.49 +#define _hypercall0(type, name)			\
   14.50 +({						\
   14.51 +	long __res;				\
   14.52 +	asm volatile (				\
   14.53 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   14.54 +		: "=a" (__res)			\
   14.55 +		:				\
   14.56 +		: "memory" );			\
   14.57 +	(type)__res;				\
   14.58 +})
   14.59 +
   14.60 +#define _hypercall1(type, name, a1)				\
   14.61 +({								\
   14.62 +	long __res, __ign1;					\
   14.63 +	asm volatile (						\
   14.64 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   14.65 +		: "=a" (__res), "=D" (__ign1)			\
   14.66 +		: "1" ((long)(a1))				\
   14.67 +		: "memory" );					\
   14.68 +	(type)__res;						\
   14.69 +})
   14.70 +
   14.71 +#define _hypercall2(type, name, a1, a2)				\
   14.72 +({								\
   14.73 +	long __res, __ign1, __ign2;				\
   14.74 +	asm volatile (						\
   14.75 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   14.76 +		: "=a" (__res), "=D" (__ign1), "=S" (__ign2)	\
   14.77 +		: "1" ((long)(a1)), "2" ((long)(a2))		\
   14.78 +		: "memory" );					\
   14.79 +	(type)__res;						\
   14.80 +})
   14.81 +
   14.82 +#define _hypercall3(type, name, a1, a2, a3)			\
   14.83 +({								\
   14.84 +	long __res, __ign1, __ign2, __ign3;			\
   14.85 +	asm volatile (						\
   14.86 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
   14.87 +		: "=a" (__res), "=D" (__ign1), "=S" (__ign2), 	\
   14.88 +		"=d" (__ign3)					\
   14.89 +		: "1" ((long)(a1)), "2" ((long)(a2)),		\
   14.90 +		"3" ((long)(a3))				\
   14.91 +		: "memory" );					\
   14.92 +	(type)__res;						\
   14.93 +})
   14.94 +
   14.95 +#define _hypercall4(type, name, a1, a2, a3, a4)			\
   14.96 +({								\
   14.97 +	long __res, __ign1, __ign2, __ign3;			\
   14.98 +	asm volatile (						\
   14.99 +		"movq %7,%%r10; "				\
  14.100 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
  14.101 +		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
  14.102 +		"=d" (__ign3)					\
  14.103 +		: "1" ((long)(a1)), "2" ((long)(a2)),		\
  14.104 +		"3" ((long)(a3)), "g" ((long)(a4))		\
  14.105 +		: "memory", "r10" );				\
  14.106 +	(type)__res;						\
  14.107 +})
  14.108 +
  14.109 +#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
  14.110 +({								\
  14.111 +	long __res, __ign1, __ign2, __ign3;			\
  14.112 +	asm volatile (						\
  14.113 +		"movq %7,%%r10; movq %8,%%r8; "			\
  14.114 +		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
  14.115 +		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
  14.116 +		"=d" (__ign3)					\
  14.117 +		: "1" ((long)(a1)), "2" ((long)(a2)),		\
  14.118 +		"3" ((long)(a3)), "g" ((long)(a4)),		\
  14.119 +		"g" ((long)(a5))				\
  14.120 +		: "memory", "r10", "r8" );			\
  14.121 +	(type)__res;						\
  14.122 +})
  14.123 +
  14.124 +static inline int
  14.125 +HYPERVISOR_set_trap_table(
  14.126 +	trap_info_t *table)
  14.127 +{
  14.128 +	return _hypercall1(int, set_trap_table, table);
  14.129 +}
  14.130 +
  14.131 +static inline int
  14.132 +HYPERVISOR_mmu_update(
  14.133 +	mmu_update_t *req, int count, int *success_count, domid_t domid)
  14.134 +{
  14.135 +	return _hypercall4(int, mmu_update, req, count, success_count, domid);
  14.136 +}
  14.137 +
  14.138 +static inline int
  14.139 +HYPERVISOR_mmuext_op(
  14.140 +	struct mmuext_op *op, int count, int *success_count, domid_t domid)
  14.141 +{
  14.142 +	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
  14.143 +}
  14.144 +
  14.145 +static inline int
  14.146 +HYPERVISOR_set_gdt(
  14.147 +	unsigned long *frame_list, int entries)
  14.148 +{
  14.149 +	return _hypercall2(int, set_gdt, frame_list, entries);
  14.150 +}
  14.151 +
  14.152 +static inline int
  14.153 +HYPERVISOR_stack_switch(
  14.154 +	unsigned long ss, unsigned long esp)
  14.155 +{
  14.156 +	return _hypercall2(int, stack_switch, ss, esp);
  14.157 +}
  14.158 +
  14.159 +static inline int
  14.160 +HYPERVISOR_set_callbacks(
  14.161 +	unsigned long event_address, unsigned long failsafe_address, 
  14.162 +	unsigned long syscall_address)
  14.163 +{
  14.164 +	return _hypercall3(int, set_callbacks,
  14.165 +			   event_address, failsafe_address, syscall_address);
  14.166 +}
  14.167 +
  14.168 +static inline int
  14.169 +HYPERVISOR_fpu_taskswitch(
  14.170 +	int set)
  14.171 +{
  14.172 +	return _hypercall1(int, fpu_taskswitch, set);
  14.173 +}
  14.174 +
  14.175 +static inline int
  14.176 +HYPERVISOR_sched_op(
  14.177 +	int cmd, unsigned long arg)
  14.178 +{
  14.179 +	return _hypercall2(int, sched_op, cmd, arg);
  14.180 +}
  14.181 +
  14.182 +static inline long
  14.183 +HYPERVISOR_set_timer_op(
  14.184 +	u64 timeout)
  14.185 +{
  14.186 +	return _hypercall1(long, set_timer_op, timeout);
  14.187 +}
  14.188 +
  14.189 +static inline int
  14.190 +HYPERVISOR_dom0_op(
  14.191 +	dom0_op_t *dom0_op)
  14.192 +{
  14.193 +	dom0_op->interface_version = DOM0_INTERFACE_VERSION;
  14.194 +	return _hypercall1(int, dom0_op, dom0_op);
  14.195 +}
  14.196 +
  14.197 +static inline int
  14.198 +HYPERVISOR_set_debugreg(
  14.199 +	int reg, unsigned long value)
  14.200 +{
  14.201 +	return _hypercall2(int, set_debugreg, reg, value);
  14.202 +}
  14.203 +
  14.204 +static inline unsigned long
  14.205 +HYPERVISOR_get_debugreg(
  14.206 +	int reg)
  14.207 +{
  14.208 +	return _hypercall1(unsigned long, get_debugreg, reg);
  14.209 +}
  14.210 +
  14.211 +static inline int
  14.212 +HYPERVISOR_update_descriptor(
  14.213 +	unsigned long ma, unsigned long word)
  14.214 +{
  14.215 +	return _hypercall2(int, update_descriptor, ma, word);
  14.216 +}
  14.217 +
  14.218 +static inline int
  14.219 +HYPERVISOR_memory_op(
  14.220 +	unsigned int cmd, void *arg)
  14.221 +{
  14.222 +	return _hypercall2(int, memory_op, cmd, arg);
  14.223 +}
  14.224 +
  14.225 +static inline int
  14.226 +HYPERVISOR_multicall(
  14.227 +	void *call_list, int nr_calls)
  14.228 +{
  14.229 +	return _hypercall2(int, multicall, call_list, nr_calls);
  14.230 +}
  14.231 +
  14.232 +static inline int
  14.233 +HYPERVISOR_update_va_mapping(
  14.234 +	unsigned long va, pte_t new_val, unsigned long flags)
  14.235 +{
  14.236 +	return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
  14.237 +}
  14.238 +
  14.239 +static inline int
  14.240 +HYPERVISOR_event_channel_op(
  14.241 +	void *op)
  14.242 +{
  14.243 +	return _hypercall1(int, event_channel_op, op);
  14.244 +}
  14.245 +
  14.246 +static inline int
  14.247 +HYPERVISOR_xen_version(
  14.248 +	int cmd, void *arg)
  14.249 +{
  14.250 +	return _hypercall2(int, xen_version, cmd, arg);
  14.251 +}
  14.252 +
  14.253 +static inline int
  14.254 +HYPERVISOR_console_io(
  14.255 +	int cmd, int count, char *str)
  14.256 +{
  14.257 +	return _hypercall3(int, console_io, cmd, count, str);
  14.258 +}
  14.259 +
  14.260 +static inline int
  14.261 +HYPERVISOR_physdev_op(
  14.262 +	void *physdev_op)
  14.263 +{
  14.264 +	return _hypercall1(int, physdev_op, physdev_op);
  14.265 +}
  14.266 +
  14.267 +static inline int
  14.268 +HYPERVISOR_grant_table_op(
  14.269 +	unsigned int cmd, void *uop, unsigned int count)
  14.270 +{
  14.271 +	return _hypercall3(int, grant_table_op, cmd, uop, count);
  14.272 +}
  14.273 +
  14.274 +static inline int
  14.275 +HYPERVISOR_update_va_mapping_otherdomain(
  14.276 +	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
  14.277 +{
  14.278 +	return _hypercall4(int, update_va_mapping_otherdomain, va,
  14.279 +			   new_val.pte, flags, domid);
  14.280 +}
  14.281 +
  14.282 +static inline int
  14.283 +HYPERVISOR_vm_assist(
  14.284 +	unsigned int cmd, unsigned int type)
  14.285 +{
  14.286 +	return _hypercall2(int, vm_assist, cmd, type);
  14.287 +}
  14.288 +
  14.289 +static inline int
  14.290 +HYPERVISOR_vcpu_op(
  14.291 +	int cmd, int vcpuid, void *extra_args)
  14.292 +{
  14.293 +	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
  14.294 +}
  14.295 +
  14.296 +static inline int
  14.297 +HYPERVISOR_set_segment_base(
  14.298 +	int reg, unsigned long value)
  14.299 +{
  14.300 +	return _hypercall2(int, set_segment_base, reg, value);
  14.301 +}
  14.302 +
  14.303 +static inline int
  14.304 +HYPERVISOR_suspend(
  14.305 +	unsigned long srec)
  14.306 +{
  14.307 +	return _hypercall3(int, sched_op, SCHEDOP_shutdown,
  14.308 +			   SHUTDOWN_suspend, srec);
  14.309 +}
  14.310 +
  14.311 +static inline int
  14.312 +HYPERVISOR_nmi_op(
  14.313 +	unsigned long op,
  14.314 +	unsigned long arg)
  14.315 +{
  14.316 +	return _hypercall2(int, nmi_op, op, arg);
  14.317 +}
  14.318 +
  14.319 +#endif /* __HYPERCALL_X86_64_H__ */
  14.320 +
  14.321 +/*
  14.322 + * Local variables:
  14.323 + *  c-file-style: "linux"
  14.324 + *  indent-tabs-mode: t
  14.325 + *  c-indent-level: 8
  14.326 + *  c-basic-offset: 8
  14.327 + *  tab-width: 8
  14.328 + * End:
  14.329 + */
    15.1 --- a/extras/mini-os/traps.c	Thu Aug 10 10:39:58 2006 +0100
    15.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.3 @@ -1,229 +0,0 @@
    15.4 -
    15.5 -#include <os.h>
    15.6 -#include <traps.h>
    15.7 -#include <hypervisor.h>
    15.8 -#include <mm.h>
    15.9 -#include <lib.h>
   15.10 -#include <sched.h>
   15.11 -
   15.12 -/*
   15.13 - * These are assembler stubs in entry.S.
   15.14 - * They are the actual entry points for virtual exceptions.
   15.15 - */
   15.16 -void divide_error(void);
   15.17 -void debug(void);
   15.18 -void int3(void);
   15.19 -void overflow(void);
   15.20 -void bounds(void);
   15.21 -void invalid_op(void);
   15.22 -void device_not_available(void);
   15.23 -void coprocessor_segment_overrun(void);
   15.24 -void invalid_TSS(void);
   15.25 -void segment_not_present(void);
   15.26 -void stack_segment(void);
   15.27 -void general_protection(void);
   15.28 -void page_fault(void);
   15.29 -void coprocessor_error(void);
   15.30 -void simd_coprocessor_error(void);
   15.31 -void alignment_check(void);
   15.32 -void spurious_interrupt_bug(void);
   15.33 -void machine_check(void);
   15.34 -
   15.35 -
   15.36 -void dump_regs(struct pt_regs *regs)
   15.37 -{
   15.38 -    printk("Thread: %s\n", current->name);
   15.39 -#ifdef __i386__    
   15.40 -    printk("EIP: %x, EFLAGS %x.\n", regs->eip, regs->eflags);
   15.41 -    printk("EBX: %08x ECX: %08x EDX: %08x\n",
   15.42 -	   regs->ebx, regs->ecx, regs->edx);
   15.43 -    printk("ESI: %08x EDI: %08x EBP: %08x EAX: %08x\n",
   15.44 -	   regs->esi, regs->edi, regs->ebp, regs->eax);
   15.45 -    printk("DS: %04x ES: %04x orig_eax: %08x, eip: %08x\n",
   15.46 -	   regs->xds, regs->xes, regs->orig_eax, regs->eip);
   15.47 -    printk("CS: %04x EFLAGS: %08x esp: %08x ss: %04x\n",
   15.48 -	   regs->xcs, regs->eflags, regs->esp, regs->xss);
   15.49 -#else
   15.50 -    printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
   15.51 -    printk("\nRSP: %04lx:%016lx  EFLAGS: %08lx\n", 
   15.52 -           regs->ss, regs->rsp, regs->eflags);
   15.53 -    printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
   15.54 -           regs->rax, regs->rbx, regs->rcx);
   15.55 -    printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
   15.56 -           regs->rdx, regs->rsi, regs->rdi); 
   15.57 -    printk("RBP: %016lx R08: %016lx R09: %016lx\n",
   15.58 -           regs->rbp, regs->r8, regs->r9); 
   15.59 -    printk("R10: %016lx R11: %016lx R12: %016lx\n",
   15.60 -           regs->r10, regs->r11, regs->r12); 
   15.61 -    printk("R13: %016lx R14: %016lx R15: %016lx\n",
   15.62 -           regs->r13, regs->r14, regs->r15); 
   15.63 -#endif
   15.64 -}
   15.65 -
   15.66 -static void do_trap(int trapnr, char *str, struct pt_regs * regs, unsigned long error_code)
   15.67 -{
   15.68 -    printk("FATAL:  Unhandled Trap %d (%s), error code=0x%lx\n", trapnr, str, error_code);
   15.69 -    printk("Regs address %p\n", regs);
   15.70 -    dump_regs(regs);
   15.71 -    do_exit();
   15.72 -}
   15.73 -
   15.74 -#define DO_ERROR(trapnr, str, name) \
   15.75 -void do_##name(struct pt_regs * regs, unsigned long error_code) \
   15.76 -{ \
   15.77 -	do_trap(trapnr, str, regs, error_code); \
   15.78 -}
   15.79 -
   15.80 -#define DO_ERROR_INFO(trapnr, str, name, sicode, siaddr) \
   15.81 -void do_##name(struct pt_regs * regs, unsigned long error_code) \
   15.82 -{ \
   15.83 -	do_trap(trapnr, str, regs, error_code); \
   15.84 -}
   15.85 -
   15.86 -DO_ERROR_INFO( 0, "divide error", divide_error, FPE_INTDIV, regs->eip)
   15.87 -DO_ERROR( 3, "int3", int3)
   15.88 -DO_ERROR( 4, "overflow", overflow)
   15.89 -DO_ERROR( 5, "bounds", bounds)
   15.90 -DO_ERROR_INFO( 6, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
   15.91 -DO_ERROR( 7, "device not available", device_not_available)
   15.92 -DO_ERROR( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
   15.93 -DO_ERROR(10, "invalid TSS", invalid_TSS)
   15.94 -DO_ERROR(11, "segment not present", segment_not_present)
   15.95 -DO_ERROR(12, "stack segment", stack_segment)
   15.96 -DO_ERROR_INFO(17, "alignment check", alignment_check, BUS_ADRALN, 0)
   15.97 -DO_ERROR(18, "machine check", machine_check)
   15.98 -
   15.99 -void page_walk(unsigned long virt_address)
  15.100 -{
  15.101 -        pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
  15.102 -        unsigned long addr = virt_address;
  15.103 -        printk("Pagetable walk from virt %lx, base %lx:\n", virt_address, start_info.pt_base);
  15.104 -    
  15.105 -#if defined(__x86_64__)
  15.106 -        page = tab[l4_table_offset(addr)];
  15.107 -        tab = pte_to_virt(page);
  15.108 -        printk(" L4 = %"PRIpte" (%p)  [offset = %lx]\n", page, tab, l4_table_offset(addr));
  15.109 -#endif
  15.110 -#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
  15.111 -        page = tab[l3_table_offset(addr)];
  15.112 -        tab = pte_to_virt(page);
  15.113 -        printk("  L3 = %"PRIpte" (%p)  [offset = %lx]\n", page, tab, l3_table_offset(addr));
  15.114 -#endif
  15.115 -        page = tab[l2_table_offset(addr)];
  15.116 -        tab = pte_to_virt(page);
  15.117 -        printk("   L2 = %"PRIpte" (%p)  [offset = %lx]\n", page, tab, l2_table_offset(addr));
  15.118 -        
  15.119 -        page = tab[l1_table_offset(addr)];
  15.120 -        printk("    L1 = %"PRIpte" (%p)  [offset = %lx]\n", page, tab, l1_table_offset(addr));
  15.121 -
  15.122 -}
  15.123 -
  15.124 -#define read_cr2() \
  15.125 -        (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
  15.126 -
  15.127 -static int handling_pg_fault = 0;
  15.128 -
  15.129 -void do_page_fault(struct pt_regs *regs, unsigned long error_code)
  15.130 -{
  15.131 -    unsigned long addr = read_cr2();
  15.132 -    /* If we are already handling a page fault, and got another one
  15.133 -       that means we faulted in pagetable walk. Continuing here would cause
  15.134 -       a recursive fault */       
  15.135 -    if(handling_pg_fault) 
  15.136 -    {
  15.137 -        printk("Page fault in pagetable walk (access to invalid memory?).\n"); 
  15.138 -        do_exit();
  15.139 -    }
  15.140 -    handling_pg_fault = 1;
  15.141 -
  15.142 -#if defined(__x86_64__)
  15.143 -    printk("Page fault at linear address %p, rip %p, code %lx\n",
  15.144 -           addr, regs->rip, error_code);
  15.145 -#else
  15.146 -    printk("Page fault at linear address %p, eip %p, code %lx\n",
  15.147 -           addr, regs->eip, error_code);
  15.148 -#endif
  15.149 -
  15.150 -    dump_regs(regs);
  15.151 -    page_walk(addr);
  15.152 -    do_exit();
  15.153 -    /* We should never get here ... but still */
  15.154 -    handling_pg_fault = 0;
  15.155 -}
  15.156 -
  15.157 -void do_general_protection(struct pt_regs *regs, long error_code)
  15.158 -{
  15.159 -#ifdef __i386__
  15.160 -    printk("GPF eip: %p, error_code=%lx\n", regs->eip, error_code);
  15.161 -#else    
  15.162 -    printk("GPF rip: %p, error_code=%lx\n", regs->rip, error_code);
  15.163 -#endif
  15.164 -    dump_regs(regs);
  15.165 -    do_exit();
  15.166 -}
  15.167 -
  15.168 -
  15.169 -void do_debug(struct pt_regs * regs)
  15.170 -{
  15.171 -    printk("Debug exception\n");
  15.172 -#define TF_MASK 0x100
  15.173 -    regs->eflags &= ~TF_MASK;
  15.174 -    dump_regs(regs);
  15.175 -    do_exit();
  15.176 -}
  15.177 -
  15.178 -void do_coprocessor_error(struct pt_regs * regs)
  15.179 -{
  15.180 -    printk("Copro error\n");
  15.181 -    dump_regs(regs);
  15.182 -    do_exit();
  15.183 -}
  15.184 -
  15.185 -void simd_math_error(void *eip)
  15.186 -{
  15.187 -    printk("SIMD error\n");
  15.188 -}
  15.189 -
  15.190 -void do_simd_coprocessor_error(struct pt_regs * regs)
  15.191 -{
  15.192 -    printk("SIMD copro error\n");
  15.193 -}
  15.194 -
  15.195 -void do_spurious_interrupt_bug(struct pt_regs * regs)
  15.196 -{
  15.197 -}
  15.198 -
  15.199 -/*
  15.200 - * Submit a virtual IDT to teh hypervisor. This consists of tuples
  15.201 - * (interrupt vector, privilege ring, CS:EIP of handler).
  15.202 - * The 'privilege ring' field specifies the least-privileged ring that
  15.203 - * can trap to that vector using a software-interrupt instruction (INT).
  15.204 - */
  15.205 -static trap_info_t trap_table[] = {
  15.206 -    {  0, 0, __KERNEL_CS, (unsigned long)divide_error                },
  15.207 -    {  1, 0, __KERNEL_CS, (unsigned long)debug                       },
  15.208 -    {  3, 3, __KERNEL_CS, (unsigned long)int3                        },
  15.209 -    {  4, 3, __KERNEL_CS, (unsigned long)overflow                    },
  15.210 -    {  5, 3, __KERNEL_CS, (unsigned long)bounds                      },
  15.211 -    {  6, 0, __KERNEL_CS, (unsigned long)invalid_op                  },
  15.212 -    {  7, 0, __KERNEL_CS, (unsigned long)device_not_available        },
  15.213 -    {  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
  15.214 -    { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS                 },
  15.215 -    { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present         },
  15.216 -    { 12, 0, __KERNEL_CS, (unsigned long)stack_segment               },
  15.217 -    { 13, 0, __KERNEL_CS, (unsigned long)general_protection          },
  15.218 -    { 14, 0, __KERNEL_CS, (unsigned long)page_fault                  },
  15.219 -    { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug      },
  15.220 -    { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error           },
  15.221 -    { 17, 0, __KERNEL_CS, (unsigned long)alignment_check             },
  15.222 -    { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error      },
  15.223 -    {  0, 0,           0, 0                           }
  15.224 -};
  15.225 -    
  15.226 -
  15.227 -
  15.228 -void trap_init(void)
  15.229 -{
  15.230 -    HYPERVISOR_set_trap_table(trap_table);    
  15.231 -}
  15.232 -