]> xenbits.xensource.com Git - rumpuser-xen.git/commitdiff
Apply olive oil to the headers.
authorAntti Kantee <pooka@iki.fi>
Tue, 3 Sep 2013 00:10:41 +0000 (03:10 +0300)
committerAntti Kantee <pooka@iki.fi>
Tue, 3 Sep 2013 00:10:41 +0000 (03:10 +0300)
Put mini-os specific stuff under mini-os and toss until things build.

27 files changed:
Makefile
arch/x86/x86_32.S
arch/x86/x86_64.S
include/mini-os/arch_limits.h [new file with mode: 0644]
include/mini-os/arch_mm.h [new file with mode: 0644]
include/mini-os/arch_sched.h [new file with mode: 0644]
include/mini-os/arch_spinlock.h [new file with mode: 0644]
include/mini-os/gntmap.h
include/mini-os/hypervisor.h
include/mini-os/os.h [new file with mode: 0644]
include/mini-os/traps.h [new file with mode: 0644]
include/mini-os/x86/arch_limits.h [new file with mode: 0644]
include/mini-os/x86/arch_mm.h [new file with mode: 0644]
include/mini-os/x86/arch_sched.h [new file with mode: 0644]
include/mini-os/x86/arch_spinlock.h [new file with mode: 0644]
include/mini-os/x86/os.h [new file with mode: 0644]
include/mini-os/x86/traps.h [new file with mode: 0644]
include/mini-os/x86/x86_32/hypercall-x86_32.h [new file with mode: 0644]
include/mini-os/x86/x86_64/hypercall-x86_64.h [new file with mode: 0644]
include/x86/arch_limits.h [deleted file]
include/x86/arch_mm.h [deleted file]
include/x86/arch_sched.h [deleted file]
include/x86/arch_spinlock.h [deleted file]
include/x86/os.h [deleted file]
include/x86/traps.h [deleted file]
include/x86/x86_32/hypercall-x86_32.h [deleted file]
include/x86/x86_64/hypercall-x86_64.h [deleted file]

index 964ee901e4f49a66c8eb213c4c6b7c5823a9758d..b3fd9af6a49d183db8f459bd01f650f6db97282e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -125,7 +125,7 @@ include/mini-os/list.h: $(XEN_ROOT)/tools/include/xen-external/bsd-sys-queue-h-s
 .PHONY: links
 links: include/mini-os/list.h $(ARCH_LINKS)
        [ -e include/xen ] || ln -sf ../../../xen/include/public include/xen
-       [ -e include/$(TARGET_ARCH_FAM)/mini-os ] || ln -sf . include/$(TARGET_ARCH_FAM)/mini-os
+       [ -e include/mini-os/machine ] || ln -sf $(TARGET_ARCH_FAM) include/mini-os/machine
 
 .PHONY: arch_lib
 arch_lib:
index fb3e30a357f7130b35efe1d119e56a5f4243466f..df6fde650c73899f8a5ab67b8ec072dedfc0148d 100644 (file)
@@ -1,4 +1,4 @@
-#include <mini-os/os.h>
+#include <mini-os/x86/os.h>
 #include <mini-os/x86/arch_limits.h>
 #include <xen/arch-x86_32.h>
 
index a65e5d5d8e0c12bb67342ef597fdf1e2c5e79aac..53c049677cb6d9c8df78a647a413fb52dc937833 100644 (file)
@@ -1,4 +1,4 @@
-#include <mini-os/os.h>
+#include <mini-os/x86/os.h>
 #include <mini-os/x86/arch_limits.h>
 #include <xen/features.h>
 
diff --git a/include/mini-os/arch_limits.h b/include/mini-os/arch_limits.h
new file mode 100644 (file)
index 0000000..950d211
--- /dev/null
@@ -0,0 +1,3 @@
+/* XXX: only because I'm too lazy to fix the includers now */
+
+#include <mini-os/machine/arch_limits.h>
diff --git a/include/mini-os/arch_mm.h b/include/mini-os/arch_mm.h
new file mode 100644 (file)
index 0000000..d081dd3
--- /dev/null
@@ -0,0 +1,3 @@
+/* XXX: only because I'm too lazy to fix the includers now */
+
+#include <mini-os/machine/arch_mm.h>
diff --git a/include/mini-os/arch_sched.h b/include/mini-os/arch_sched.h
new file mode 100644 (file)
index 0000000..9085dc5
--- /dev/null
@@ -0,0 +1,3 @@
+/* XXX: only because I'm too lazy to fix the includers now */
+
+#include <mini-os/machine/arch_sched.h>
diff --git a/include/mini-os/arch_spinlock.h b/include/mini-os/arch_spinlock.h
new file mode 100644 (file)
index 0000000..a454bcd
--- /dev/null
@@ -0,0 +1,3 @@
+/* XXX: only because I'm too lazy to fix the includers now */
+
+#include <mini-os/machine/arch_spinlock.h>
index fde53f39b184ba6a12744ebca20b1643903f81d5..4df30a08748042cb9c4e31a5887d31ef556d437b 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __GNTMAP_H__
 #define __GNTMAP_H__
 
-#include <os.h>
+#include <mini-os/os.h>
 
 /*
  * Please consider struct gntmap opaque. If instead you choose to disregard
index e299df1f73a381d7e8e31dcf1edf891987651f64..3534421ac7eb68497ce42cb404fe8e9bcb0b7225 100644 (file)
 #include <mini-os/types.h>
 #include <xen/xen.h>
 #if defined(__i386__)
-#include <hypercall-x86_32.h>
+#include <mini-os/x86/x86_32/hypercall-x86_32.h>
 #elif defined(__x86_64__)
-#include <hypercall-x86_64.h>
-#elif defined(__ia64__)
-#include <hypercall-ia64.h>
+#include <mini-os/x86/x86_64/hypercall-x86_64.h>
 #else
 #error "Unsupported architecture"
 #endif
diff --git a/include/mini-os/os.h b/include/mini-os/os.h
new file mode 100644 (file)
index 0000000..77d3e9c
--- /dev/null
@@ -0,0 +1,3 @@
+/* XXX: only because I'm too lazy to fix the includers now */
+
+#include <mini-os/machine/os.h>
diff --git a/include/mini-os/traps.h b/include/mini-os/traps.h
new file mode 100644 (file)
index 0000000..d62990f
--- /dev/null
@@ -0,0 +1,3 @@
+/* XXX: only because I'm too lazy to fix the includers now */
+
+#include <mini-os/machine/traps.h>
diff --git a/include/mini-os/x86/arch_limits.h b/include/mini-os/x86/arch_limits.h
new file mode 100644 (file)
index 0000000..41f8620
--- /dev/null
@@ -0,0 +1,20 @@
+
+#ifndef __ARCH_LIMITS_H__
+#define __ARCH_LIMITS_H__
+
+#define __PAGE_SHIFT      12
+
+#ifdef __ASSEMBLY__
+#define __PAGE_SIZE       (1 << __PAGE_SHIFT)
+#else
+#ifdef __x86_64__
+#define __PAGE_SIZE       (1UL << __PAGE_SHIFT)
+#else
+#define __PAGE_SIZE       (1ULL << __PAGE_SHIFT)
+#endif
+#endif
+
+#define __STACK_SIZE_PAGE_ORDER  4
+#define __STACK_SIZE             (__PAGE_SIZE * (1 << __STACK_SIZE_PAGE_ORDER))
+          
+#endif /* __ARCH_LIMITS_H__ */
diff --git a/include/mini-os/x86/arch_mm.h b/include/mini-os/x86/arch_mm.h
new file mode 100644 (file)
index 0000000..a95632a
--- /dev/null
@@ -0,0 +1,234 @@
+/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
+ *
+ * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
+ * Copyright (c) 2005, Keir A Fraser
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _ARCH_MM_H_
+#define _ARCH_MM_H_
+
+#ifndef __ASSEMBLY__
+#include <xen/xen.h>
+#if defined(__i386__)
+#include <xen/arch-x86_32.h>
+#elif defined(__x86_64__)
+#include <xen/arch-x86_64.h>
+#else
+#error "Unsupported architecture"
+#endif
+#endif
+
+#define L1_FRAME                1
+#define L2_FRAME                2
+#define L3_FRAME                3
+
+#define L1_PAGETABLE_SHIFT      12
+
+#if defined(__i386__)
+
+#define L2_PAGETABLE_SHIFT      21
+#define L3_PAGETABLE_SHIFT      30
+
+#define L1_PAGETABLE_ENTRIES    512
+#define L2_PAGETABLE_ENTRIES    512
+#define L3_PAGETABLE_ENTRIES    4
+
+#define PADDR_BITS              44
+#define PADDR_MASK              ((1ULL << PADDR_BITS)-1)
+
+#define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
+
+/*
+ * If starting from virtual address greater than 0xc0000000,
+ * this value will be 2 to account for final mid-level page
+ * directory which is always mapped in at this location.
+ */
+#define NOT_L1_FRAMES           3
+#define PRIpte "016llx"
+#ifndef __ASSEMBLY__
+typedef uint64_t pgentry_t;
+#endif
+
+#elif defined(__x86_64__)
+
+#define L2_PAGETABLE_SHIFT      21
+#define L3_PAGETABLE_SHIFT      30
+#define L4_PAGETABLE_SHIFT      39
+
+#define L1_PAGETABLE_ENTRIES    512
+#define L2_PAGETABLE_ENTRIES    512
+#define L3_PAGETABLE_ENTRIES    512
+#define L4_PAGETABLE_ENTRIES    512
+
+/* These are page-table limitations. Current CPUs support only 40-bit phys. */
+#define PADDR_BITS              52
+#define VADDR_BITS              48
+#define PADDR_MASK              ((1UL << PADDR_BITS)-1)
+#define VADDR_MASK              ((1UL << VADDR_BITS)-1)
+
+#define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
+#define L3_MASK  ((1UL << L4_PAGETABLE_SHIFT) - 1)
+
+#define NOT_L1_FRAMES           3
+#define PRIpte "016lx"
+#ifndef __ASSEMBLY__
+typedef unsigned long pgentry_t;
+#endif
+
+#endif
+
+#define L1_MASK  ((1UL << L2_PAGETABLE_SHIFT) - 1)
+
+/* Given a virtual address, get an entry offset into a page table. */
+#define l1_table_offset(_a) \
+  (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
+#define l2_table_offset(_a) \
+  (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
+#define l3_table_offset(_a) \
+  (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
+#if defined(__x86_64__)
+#define l4_table_offset(_a) \
+  (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
+#endif
+
+#define _PAGE_PRESENT  0x001ULL
+#define _PAGE_RW       0x002ULL
+#define _PAGE_USER     0x004ULL
+#define _PAGE_PWT      0x008ULL
+#define _PAGE_PCD      0x010ULL
+#define _PAGE_ACCESSED 0x020ULL
+#define _PAGE_DIRTY    0x040ULL
+#define _PAGE_PAT      0x080ULL
+#define _PAGE_PSE      0x080ULL
+#define _PAGE_GLOBAL   0x100ULL
+
+#if defined(__i386__)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
+#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
+#define L3_PROT (_PAGE_PRESENT)
+#elif defined(__x86_64__)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
+#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_USER)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#endif /* __i386__ || __x86_64__ */
+
+/* flags for ioremap */
+#define IO_PROT (L1_PROT)
+#define IO_PROT_NOCACHE (L1_PROT | _PAGE_PCD)
+
+/* for P2M */
+#define INVALID_P2M_ENTRY (~0UL)
+
+#include "arch_limits.h"
+#define PAGE_SIZE       __PAGE_SIZE
+#define PAGE_SHIFT      __PAGE_SHIFT
+#define PAGE_MASK       (~(PAGE_SIZE-1))
+
+#define PFN_UP(x)      (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT)
+#define PFN_DOWN(x)    ((x) >> L1_PAGETABLE_SHIFT)
+#define PFN_PHYS(x)    ((uint64_t)(x) << L1_PAGETABLE_SHIFT)
+#define PHYS_PFN(x)    ((x) >> L1_PAGETABLE_SHIFT)
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)        (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+#ifndef __ASSEMBLY__
+/* Definitions for machine and pseudophysical addresses. */
+#ifdef __i386__
+typedef unsigned long long paddr_t;
+typedef unsigned long long maddr_t;
+#else
+typedef unsigned long paddr_t;
+typedef unsigned long maddr_t;
+#endif
+
+extern unsigned long *phys_to_machine_mapping;
+extern char _text, _etext, _erodata, _edata, _end;
+extern unsigned long mfn_zero;
+#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
+static __inline__ maddr_t phys_to_machine(paddr_t phys)
+{
+       maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+       machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+       return machine;
+}
+
+#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
+static __inline__ paddr_t machine_to_phys(maddr_t machine)
+{
+       paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+       phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+       return phys;
+}
+#endif
+
+#define VIRT_START                 ((unsigned long)&_text)
+
+#define to_phys(x)                 ((unsigned long)(x)-VIRT_START)
+#define to_virt(x)                 ((void *)((unsigned long)(x)+VIRT_START))
+
+#define virt_to_pfn(_virt)         (PFN_DOWN(to_phys(_virt)))
+#define virt_to_mfn(_virt)         (pfn_to_mfn(virt_to_pfn(_virt)))
+#define mach_to_virt(_mach)        (to_virt(machine_to_phys(_mach)))
+#define virt_to_mach(_virt)        (phys_to_machine(to_phys(_virt)))
+#define mfn_to_virt(_mfn)          (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT))
+#define pfn_to_virt(_pfn)          (to_virt((_pfn) << PAGE_SHIFT))
+
+/* Pagetable walking. */
+#define pte_to_mfn(_pte)           (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT)
+#define pte_to_virt(_pte)          to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << PAGE_SHIFT)
+
+
+#define PT_BASE                           ((pgentry_t *)start_info.pt_base)
+
+#ifdef __x86_64__
+#define virtual_to_l3(_virt)      ((pgentry_t *)pte_to_virt(PT_BASE[l4_table_offset(_virt)]))
+#else
+#define virtual_to_l3(_virt)      PT_BASE
+#endif
+
+#define virtual_to_l2(_virt)      ({ \
+       unsigned long __virt2 = (_virt); \
+       (pgentry_t *) pte_to_virt(virtual_to_l3(__virt2)[l3_table_offset(__virt2)]); \
+})
+
+#define virtual_to_l1(_virt)      ({ \
+       unsigned long __virt1 = (_virt); \
+       (pgentry_t *) pte_to_virt(virtual_to_l2(__virt1)[l2_table_offset(__virt1)]); \
+})
+
+#define virtual_to_pte(_virt)     ({ \
+       unsigned long __virt0 = (unsigned long) (_virt); \
+       virtual_to_l1(__virt0)[l1_table_offset(__virt0)]; \
+})
+#define virtual_to_mfn(_virt)     pte_to_mfn(virtual_to_pte(_virt))
+
+#define map_frames(f, n) map_frames_ex(f, n, 1, 0, 1, DOMID_SELF, NULL, L1_PROT)
+#define map_zero(n, a) map_frames_ex(&mfn_zero, n, 0, 0, a, DOMID_SELF, NULL, L1_PROT_RO)
+#define do_map_zero(start, n) do_map_frames(start, &mfn_zero, n, 0, 0, DOMID_SELF, NULL, L1_PROT_RO)
+
+pgentry_t *need_pgt(unsigned long addr);
+int mfn_is_ram(unsigned long mfn);
+
+#endif /* _ARCH_MM_H_ */
diff --git a/include/mini-os/x86/arch_sched.h b/include/mini-os/x86/arch_sched.h
new file mode 100644 (file)
index 0000000..b494eca
--- /dev/null
@@ -0,0 +1,25 @@
+
+#ifndef __ARCH_SCHED_H__
+#define __ARCH_SCHED_H__
+
+#include "arch_limits.h"
+
+static inline struct thread* get_current(void)
+{
+    struct thread **current;
+#ifdef __i386__    
+    register unsigned long sp asm("esp");
+#else
+    register unsigned long sp asm("rsp");
+#endif 
+    current = (void *)(unsigned long)(sp & ~(__STACK_SIZE-1));
+    return *current;
+}
+
+extern void __arch_switch_threads(unsigned long *prevctx, unsigned long *nextctx);
+
+#define arch_switch_threads(prev,next) __arch_switch_threads(&(prev)->sp, &(next)->sp)
+
+
+          
+#endif /* __ARCH_SCHED_H__ */
diff --git a/include/mini-os/x86/arch_spinlock.h b/include/mini-os/x86/arch_spinlock.h
new file mode 100644 (file)
index 0000000..4b8faf7
--- /dev/null
@@ -0,0 +1,94 @@
+
+
+#ifndef __ARCH_ASM_SPINLOCK_H
+#define __ARCH_ASM_SPINLOCK_H
+
+#include <mini-os/lib.h>
+#include "os.h"
+
+
+#define ARCH_SPIN_LOCK_UNLOCKED { 1 }
+
+/*
+ * Simple spin lock operations.  There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
+#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
+
+#define spin_lock_string \
+        "1:\n" \
+       LOCK \
+       "decb %0\n\t" \
+       "jns 3f\n" \
+       "2:\t" \
+       "rep;nop\n\t" \
+       "cmpb $0,%0\n\t" \
+       "jle 2b\n\t" \
+       "jmp 1b\n" \
+       "3:\n\t"
+
+#define spin_lock_string_flags \
+        "1:\n" \
+       LOCK \
+       "decb %0\n\t" \
+       "jns 4f\n\t" \
+       "2:\t" \
+       "testl $0x200, %1\n\t" \
+       "jz 3f\n\t" \
+       "#sti\n\t" \
+       "3:\t" \
+       "rep;nop\n\t" \
+       "cmpb $0, %0\n\t" \
+       "jle 3b\n\t" \
+       "#cli\n\t" \
+       "jmp 1b\n" \
+       "4:\n\t"
+
+/*
+ * This works. Despite all the confusion.
+ * (except on PPro SMP or if we are using OOSTORE)
+ * (PPro errata 66, 92)
+ */
+
+#define spin_unlock_string \
+       "xchgb %b0, %1" \
+               :"=q" (oldval), "=m" (lock->slock) \
+               :"0" (oldval) : "memory"
+
+static inline void _raw_spin_unlock(spinlock_t *lock)
+{
+       char oldval = 1;
+       __asm__ __volatile__(
+               spin_unlock_string
+       );
+}
+
+static inline int _raw_spin_trylock(spinlock_t *lock)
+{
+       char oldval;
+       __asm__ __volatile__(
+               "xchgb %b0,%1\n"
+               :"=q" (oldval), "=m" (lock->slock)
+               :"0" (0) : "memory");
+       return oldval > 0;
+}
+
+static inline void _raw_spin_lock(spinlock_t *lock)
+{
+       __asm__ __volatile__(
+               spin_lock_string
+               :"=m" (lock->slock) : : "memory");
+}
+
+static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+{
+       __asm__ __volatile__(
+               spin_lock_string_flags
+               :"=m" (lock->slock) : "r" (flags) : "memory");
+}
+
+#endif
diff --git a/include/mini-os/x86/os.h b/include/mini-os/x86/os.h
new file mode 100644 (file)
index 0000000..f193865
--- /dev/null
@@ -0,0 +1,572 @@
+/******************************************************************************
+ * os.h
+ * 
+ * random collection of macros and definition
+ */
+
+#ifndef _OS_H_
+#define _OS_H_
+
+#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
+#define __builtin_expect(x, expected_value) (x)
+#endif
+#define unlikely(x)  __builtin_expect((x),0)
+#define likely(x)  __builtin_expect((x),1)
+
+#define smp_processor_id() 0
+
+
+#ifndef __ASSEMBLY__
+#include <mini-os/types.h>
+#include <mini-os/hypervisor.h>
+#include <mini-os/kernel.h>
+
+#define USED    __attribute__ ((used))
+
+#define BUG do_exit
+
+#endif
+#include <xen/xen.h>
+
+
+
+#define __KERNEL_CS  FLAT_KERNEL_CS
+#define __KERNEL_DS  FLAT_KERNEL_DS
+#define __KERNEL_SS  FLAT_KERNEL_SS
+
+#define TRAP_divide_error      0
+#define TRAP_debug             1
+#define TRAP_nmi               2
+#define TRAP_int3              3
+#define TRAP_overflow          4
+#define TRAP_bounds            5
+#define TRAP_invalid_op        6
+#define TRAP_no_device         7
+#define TRAP_double_fault      8
+#define TRAP_copro_seg         9
+#define TRAP_invalid_tss      10
+#define TRAP_no_segment       11
+#define TRAP_stack_error      12
+#define TRAP_gp_fault         13
+#define TRAP_page_fault       14
+#define TRAP_spurious_int     15
+#define TRAP_copro_error      16
+#define TRAP_alignment_check  17
+#define TRAP_machine_check    18
+#define TRAP_simd_error       19
+#define TRAP_deferred_nmi     31
+
+/* Everything below this point is not included by assembler (.S) files. */
+#ifndef __ASSEMBLY__
+
+extern shared_info_t *HYPERVISOR_shared_info;
+
+void trap_init(void);
+void trap_fini(void);
+
+void arch_init(start_info_t *si);
+void arch_print_info(void);
+void arch_fini(void);
+
+
+
+
+
+/* 
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
+ * critical operations are executed. All critical operations must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
+ */
+
+#define __cli()                                                                \
+do {                                                                   \
+       vcpu_info_t *_vcpu;                                             \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
+       _vcpu->evtchn_upcall_mask = 1;                                  \
+       barrier();                                                      \
+} while (0)
+
+#define __sti()                                                                \
+do {                                                                   \
+       vcpu_info_t *_vcpu;                                             \
+       barrier();                                                      \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
+       _vcpu->evtchn_upcall_mask = 0;                                  \
+       barrier(); /* unmask then check (avoid races) */                \
+       if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
+               force_evtchn_callback();                                \
+} while (0)
+
+#define __save_flags(x)                                                        \
+do {                                                                   \
+       vcpu_info_t *_vcpu;                                             \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
+       (x) = _vcpu->evtchn_upcall_mask;                                \
+} while (0)
+
+#define __restore_flags(x)                                             \
+do {                                                                   \
+       vcpu_info_t *_vcpu;                                             \
+       barrier();                                                      \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
+       if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
+               barrier(); /* unmask then check (avoid races) */        \
+               if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
+                       force_evtchn_callback();                        \
+       }\
+} while (0)
+
+#define safe_halt()            ((void)0)
+
+#define __save_and_cli(x)                                              \
+do {                                                                   \
+       vcpu_info_t *_vcpu;                                             \
+       _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
+       (x) = _vcpu->evtchn_upcall_mask;                                \
+       _vcpu->evtchn_upcall_mask = 1;                                  \
+       barrier();                                                      \
+} while (0)
+
+#define local_irq_save(x)      __save_and_cli(x)
+#define local_irq_restore(x)   __restore_flags(x)
+#define local_save_flags(x)    __save_flags(x)
+#define local_irq_disable()    __cli()
+#define local_irq_enable()     __sti()
+
+#define irqs_disabled()                        \
+    HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
+
+/* This is a barrier for the compiler only, NOT the processor! */
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+#if defined(__i386__)
+#define mb()    __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define rmb()   __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define wmb()  __asm__ __volatile__ ("": : :"memory")
+#elif defined(__x86_64__)
+#define mb()    __asm__ __volatile__ ("mfence":::"memory")
+#define rmb()   __asm__ __volatile__ ("lfence":::"memory")
+#define wmb()  __asm__ __volatile__ ("sfence" ::: "memory") /* From CONFIG_UNORDERED_IO (linux) */
+#endif
+
+
+#define LOCK_PREFIX ""
+#define LOCK ""
+#define ADDR (*(volatile long *) addr)
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } atomic_t;
+
+
+/************************** i386 *******************************/
+#ifdef __INSIDE_MINIOS__
+#if defined (__i386__)
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+       switch (size) {
+               case 1:
+                       __asm__ __volatile__("xchgb %b0,%1"
+                               :"=q" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+               case 2:
+                       __asm__ __volatile__("xchgw %w0,%1"
+                               :"=r" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+               case 4:
+                       __asm__ __volatile__("xchgl %0,%1"
+                               :"=r" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+       }
+       return x;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It can be reorderdered on other architectures other than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
+{
+       int oldbit;
+
+       __asm__ __volatile__( LOCK
+               "btrl %2,%1\n\tsbbl %0,%0"
+               :"=r" (oldbit),"=m" (ADDR)
+               :"Ir" (nr) : "memory");
+       return oldbit;
+}
+
+static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
+{
+       return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
+}
+
+static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
+{
+       int oldbit;
+
+       __asm__ __volatile__(
+               "btl %2,%1\n\tsbbl %0,%0"
+               :"=r" (oldbit)
+               :"m" (ADDR),"Ir" (nr));
+       return oldbit;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ constant_test_bit((nr),(addr)) : \
+ variable_test_bit((nr),(addr)))
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writting portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile unsigned long * addr)
+{
+       __asm__ __volatile__( LOCK
+               "btsl %1,%0"
+               :"=m" (ADDR)
+               :"Ir" (nr));
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void clear_bit(int nr, volatile unsigned long * addr)
+{
+       __asm__ __volatile__( LOCK
+               "btrl %1,%0"
+               :"=m" (ADDR)
+               :"Ir" (nr));
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+       __asm__("bsfl %1,%0"
+               :"=r" (word)
+               :"rm" (word));
+       return word;
+}
+
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+#define ADDR (*(volatile long *) addr)
+
+#define rdtscll(val) \
+     __asm__ __volatile__("rdtsc" : "=A" (val))
+
+
+
+#elif defined(__x86_64__)/* ifdef __i386__ */
+/************************** x86_84 *******************************/
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define __xg(x) ((volatile long *)(x))
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+       switch (size) {
+               case 1:
+                       __asm__ __volatile__("xchgb %b0,%1"
+                               :"=q" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+               case 2:
+                       __asm__ __volatile__("xchgw %w0,%1"
+                               :"=r" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+               case 4:
+                       __asm__ __volatile__("xchgl %k0,%1"
+                               :"=r" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+               case 8:
+                       __asm__ __volatile__("xchgq %0,%1"
+                               :"=r" (x)
+                               :"m" (*__xg(ptr)), "0" (x)
+                               :"memory");
+                       break;
+       }
+       return x;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.  
+ * It also implies a memory barrier.
+ */
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+       int oldbit;
+
+       __asm__ __volatile__( LOCK_PREFIX
+               "btrl %2,%1\n\tsbbl %0,%0"
+               :"=r" (oldbit),"=m" (ADDR)
+               :"dIr" (nr) : "memory");
+       return oldbit;
+}
+
+static __inline__ int constant_test_bit(int nr, const volatile void * addr)
+{
+       return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int variable_test_bit(int nr, volatile const void * addr)
+{
+       int oldbit;
+
+       __asm__ __volatile__(
+               "btl %2,%1\n\tsbbl %0,%0"
+               :"=r" (oldbit)
+               :"m" (ADDR),"dIr" (nr));
+       return oldbit;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ constant_test_bit((nr),(addr)) : \
+ variable_test_bit((nr),(addr)))
+
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+       __asm__ __volatile__( LOCK_PREFIX
+               "btsl %1,%0"
+               :"=m" (ADDR)
+               :"dIr" (nr) : "memory");
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void clear_bit(int nr, volatile void * addr)
+{
+       __asm__ __volatile__( LOCK_PREFIX
+               "btrl %1,%0"
+               :"=m" (ADDR)
+               :"dIr" (nr));
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __inline__ unsigned long __ffs(unsigned long word)
+{
+       __asm__("bsfq %1,%0"
+               :"=r" (word)
+               :"rm" (word));
+       return word;
+}
+
+#define ADDR (*(volatile long *) addr)
+
+#define rdtscll(val) do { \
+     unsigned int __a,__d; \
+     asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
+} while(0)
+
+#define wrmsr(msr,val1,val2) \
+      __asm__ __volatile__("wrmsr" \
+                           : /* no outputs */ \
+                           : "c" (msr), "a" (val1), "d" (val2))
+
+#define wrmsrl(msr,val) wrmsr(msr,(uint32_t)((uint64_t)(val)),((uint64_t)(val))>>32)
+
+
+#else /* ifdef __x86_64__ */
+#error "Unsupported architecture"
+#endif
+#endif /* ifdef __INSIDE_MINIOS */
+
+/********************* common i386 and x86_64  ****************************/
+struct __synch_xchg_dummy { unsigned long a[100]; };
+#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
+
+#define synch_cmpxchg(ptr, old, new) \
+((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
+                                     (unsigned long)(old), \
+                                     (unsigned long)(new), \
+                                     sizeof(*(ptr))))
+
+static inline unsigned long __synch_cmpxchg(volatile void *ptr,
+        unsigned long old,
+        unsigned long new, int size)
+{
+    unsigned long prev;
+    switch (size) {
+        case 1:
+            __asm__ __volatile__("lock; cmpxchgb %b1,%2"
+                    : "=a"(prev)
+                    : "q"(new), "m"(*__synch_xg(ptr)),
+                    "0"(old)
+                    : "memory");
+            return prev;
+        case 2:
+            __asm__ __volatile__("lock; cmpxchgw %w1,%2"
+                    : "=a"(prev)
+                    : "r"(new), "m"(*__synch_xg(ptr)),
+                    "0"(old)
+                    : "memory");
+            return prev;
+#ifdef __x86_64__
+        case 4:
+            __asm__ __volatile__("lock; cmpxchgl %k1,%2"
+                    : "=a"(prev)
+                    : "r"(new), "m"(*__synch_xg(ptr)),
+                    "0"(old)
+                    : "memory");
+            return prev;
+        case 8:
+            __asm__ __volatile__("lock; cmpxchgq %1,%2"
+                    : "=a"(prev)
+                    : "r"(new), "m"(*__synch_xg(ptr)),
+                    "0"(old)
+                    : "memory");
+            return prev;
+#else
+        case 4:
+            __asm__ __volatile__("lock; cmpxchgl %1,%2"
+                    : "=a"(prev)
+                    : "r"(new), "m"(*__synch_xg(ptr)),
+                    "0"(old)
+                    : "memory");
+            return prev;
+#endif
+    }
+    return old;
+}
+
+
+static __inline__ void synch_set_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ ( 
+        "lock btsl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+{
+    __asm__ __volatile__ (
+        "lock btrl %1,%0"
+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "lock btsl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "lock btrl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+    return oldbit;
+}
+
+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
+{
+    return ((1UL << (nr & 31)) & 
+            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+    __asm__ __volatile__ (
+        "btl %2,%1\n\tsbbl %0,%0"
+        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
+    return oldbit;
+}
+
+#define synch_test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ synch_const_test_bit((nr),(addr)) : \
+ synch_var_test_bit((nr),(addr)))
+
+
+#undef ADDR
+
+#endif /* not assembly */
+#endif /* _OS_H_ */
diff --git a/include/mini-os/x86/traps.h b/include/mini-os/x86/traps.h
new file mode 100644 (file)
index 0000000..bfb6781
--- /dev/null
@@ -0,0 +1,78 @@
+/* 
+ ****************************************************************************
+ * (C) 2005 - Grzegorz Milos - Intel Reseach Cambridge
+ ****************************************************************************
+ *
+ *        File: traps.h
+ *      Author: Grzegorz Milos (gm281@cam.ac.uk)
+ *              
+ *        Date: Jun 2005
+ * 
+ * Environment: Xen Minimal OS
+ * Description: Deals with traps
+ *
+ ****************************************************************************
+ */
+
+#ifndef _TRAPS_H_
+#define _TRAPS_H_
+
+#ifdef __i386__
+struct pt_regs {
+       long ebx;
+       long ecx;
+       long edx;
+       long esi;
+       long edi;
+       long ebp;
+       long eax;
+       int  xds;
+       int  xes;
+       long orig_eax;
+       long eip;
+       int  xcs;
+       long eflags;
+       long esp;
+       int  xss;
+};
+#elif __x86_64__
+
+struct pt_regs {
+       unsigned long r15;
+       unsigned long r14;
+       unsigned long r13;
+       unsigned long r12;
+       unsigned long rbp;
+       unsigned long rbx;
+/* arguments: non interrupts/non tracing syscalls only save upto here*/
+       unsigned long r11;
+       unsigned long r10;      
+       unsigned long r9;
+       unsigned long r8;
+       unsigned long rax;
+       unsigned long rcx;
+       unsigned long rdx;
+       unsigned long rsi;
+       unsigned long rdi;
+       unsigned long orig_rax;
+/* end of arguments */         
+/* cpu exception frame or undefined */
+       unsigned long rip;
+       unsigned long cs;
+       unsigned long eflags; 
+       unsigned long rsp; 
+       unsigned long ss;
+/* top of stack page */ 
+};
+
+
+#endif
+
+void dump_regs(struct pt_regs *regs);
+void stack_walk(void);
+
+#define TRAP_PF_PROT   0x1
+#define TRAP_PF_WRITE  0x2
+#define TRAP_PF_USER   0x4
+
+#endif /* _TRAPS_H_ */
diff --git a/include/mini-os/x86/x86_32/hypercall-x86_32.h b/include/mini-os/x86/x86_32/hypercall-x86_32.h
new file mode 100644 (file)
index 0000000..43028ee
--- /dev/null
@@ -0,0 +1,324 @@
+/******************************************************************************
+ * hypercall-x86_32.h
+ * 
+ * Copied from XenLinux.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERCALL_X86_32_H__
+#define __HYPERCALL_X86_32_H__
+
+#include <xen/xen.h>
+#include <xen/sched.h>
+#include <xen/nmi.h>
+#include <mini-os/mm.h>
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+extern char hypercall_page[PAGE_SIZE];
+
+#define _hypercall0(type, name)                        \
+({                                             \
+       long __res;                             \
+       asm volatile (                          \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res)                  \
+               :                               \
+               : "memory" );                   \
+       (type)__res;                            \
+})
+
+#define _hypercall1(type, name, a1)                            \
+({                                                             \
+       long __res, __ign1;                                     \
+       asm volatile (                                          \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res), "=b" (__ign1)                   \
+               : "1" ((long)(a1))                              \
+               : "memory" );                                   \
+       (type)__res;                                            \
+})
+
+#define _hypercall2(type, name, a1, a2)                                \
+({                                                             \
+       long __res, __ign1, __ign2;                             \
+       asm volatile (                                          \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res), "=b" (__ign1), "=c" (__ign2)    \
+               : "1" ((long)(a1)), "2" ((long)(a2))            \
+               : "memory" );                                   \
+       (type)__res;                                            \
+})
+
+#define _hypercall3(type, name, a1, a2, a3)                    \
+({                                                             \
+       long __res, __ign1, __ign2, __ign3;                     \
+       asm volatile (                                          \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res), "=b" (__ign1), "=c" (__ign2),   \
+               "=d" (__ign3)                                   \
+               : "1" ((long)(a1)), "2" ((long)(a2)),           \
+               "3" ((long)(a3))                                \
+               : "memory" );                                   \
+       (type)__res;                                            \
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4)                        \
+({                                                             \
+       long __res, __ign1, __ign2, __ign3, __ign4;             \
+       asm volatile (                                          \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res), "=b" (__ign1), "=c" (__ign2),   \
+               "=d" (__ign3), "=S" (__ign4)                    \
+               : "1" ((long)(a1)), "2" ((long)(a2)),           \
+               "3" ((long)(a3)), "4" ((long)(a4))              \
+               : "memory" );                                   \
+       (type)__res;                                            \
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5)            \
+({                                                             \
+       long __res, __ign1, __ign2, __ign3, __ign4, __ign5;     \
+       asm volatile (                                          \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res), "=b" (__ign1), "=c" (__ign2),   \
+               "=d" (__ign3), "=S" (__ign4), "=D" (__ign5)     \
+               : "1" ((long)(a1)), "2" ((long)(a2)),           \
+               "3" ((long)(a3)), "4" ((long)(a4)),             \
+               "5" ((long)(a5))                                \
+               : "memory" );                                   \
+       (type)__res;                                            \
+})
+
+static inline int
+HYPERVISOR_set_trap_table(
+       trap_info_t *table)
+{
+       return _hypercall1(int, set_trap_table, table);
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+       mmu_update_t *req, int count, int *success_count, domid_t domid)
+{
+       return _hypercall4(int, mmu_update, req, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_mmuext_op(
+       struct mmuext_op *op, int count, int *success_count, domid_t domid)
+{
+       return _hypercall4(int, mmuext_op, op, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+       unsigned long *frame_list, int entries)
+{
+       return _hypercall2(int, set_gdt, frame_list, entries);
+}
+
+static inline int
+HYPERVISOR_stack_switch(
+       unsigned long ss, unsigned long esp)
+{
+       return _hypercall2(int, stack_switch, ss, esp);
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+       unsigned long event_selector, unsigned long event_address,
+       unsigned long failsafe_selector, unsigned long failsafe_address)
+{
+       return _hypercall4(int, set_callbacks,
+                          event_selector, event_address,
+                          failsafe_selector, failsafe_address);
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+       int set)
+{
+       return _hypercall1(int, fpu_taskswitch, set);
+}
+
+static inline int
+HYPERVISOR_sched_op(
+       int cmd, void *arg)
+{
+       return _hypercall2(int, sched_op, cmd, arg);
+}
+
+static inline long
+HYPERVISOR_set_timer_op(
+       uint64_t timeout)
+{
+       unsigned long timeout_hi = (unsigned long)(timeout>>32);
+       unsigned long timeout_lo = (unsigned long)timeout;
+       return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
+}
+
+static inline int
+HYPERVISOR_set_debugreg(
+       int reg, unsigned long value)
+{
+       return _hypercall2(int, set_debugreg, reg, value);
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+       int reg)
+{
+       return _hypercall1(unsigned long, get_debugreg, reg);
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+       uint64_t ma, uint64_t desc)
+{
+       return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
+}
+
+static inline int
+HYPERVISOR_memory_op(
+       unsigned int cmd, void *arg)
+{
+       return _hypercall2(int, memory_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_multicall(
+       void *call_list, int nr_calls)
+{
+       return _hypercall2(int, multicall, call_list, nr_calls);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(
+       unsigned long va, pte_t new_val, unsigned long flags)
+{
+       return _hypercall4(int, update_va_mapping, va,
+                          new_val.pte_low, new_val.pte_high, flags);
+}
+
+static inline int
+HYPERVISOR_event_channel_op(
+       int cmd, void *op)
+{
+       return _hypercall2(int, event_channel_op, cmd, op);
+}
+
+static inline int
+HYPERVISOR_xen_version(
+       int cmd, void *arg)
+{
+       return _hypercall2(int, xen_version, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_console_io(
+       int cmd, int count, char *str)
+{
+       return _hypercall3(int, console_io, cmd, count, str);
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+       void *physdev_op)
+{
+       return _hypercall1(int, physdev_op, physdev_op);
+}
+
+static inline int
+HYPERVISOR_grant_table_op(
+       unsigned int cmd, void *uop, unsigned int count)
+{
+       return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+       unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
+{
+       return _hypercall5(int, update_va_mapping_otherdomain, va,
+                          new_val.pte_low, new_val.pte_high, flags, domid);
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+       unsigned int cmd, unsigned int type)
+{
+       return _hypercall2(int, vm_assist, cmd, type);
+}
+
+static inline int
+HYPERVISOR_vcpu_op(
+       int cmd, int vcpuid, void *extra_args)
+{
+       return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
+}
+
+static inline int
+HYPERVISOR_suspend(
+       unsigned long srec)
+{
+       return _hypercall3(int, sched_op, SCHEDOP_shutdown,
+                          SHUTDOWN_suspend, srec);
+}
+
+static inline int
+HYPERVISOR_nmi_op(
+       unsigned long op,
+       unsigned long arg)
+{
+       return _hypercall2(int, nmi_op, op, arg);
+}
+
+static inline int
+HYPERVISOR_sysctl(
+       unsigned long op)
+{
+       return _hypercall1(int, sysctl, op);
+}
+
+static inline int
+HYPERVISOR_domctl(
+       unsigned long op)
+{
+       return _hypercall1(int, domctl, op);
+}
+
+#endif /* __HYPERCALL_X86_32_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff --git a/include/mini-os/x86/x86_64/hypercall-x86_64.h b/include/mini-os/x86/x86_64/hypercall-x86_64.h
new file mode 100644 (file)
index 0000000..b874f03
--- /dev/null
@@ -0,0 +1,332 @@
+/******************************************************************************
+ * hypercall-x86_64.h
+ * 
+ * Copied from XenLinux.
+ * 
+ * Copyright (c) 2002-2004, K A Fraser
+ * 
+ * 64-bit updates:
+ *   Benjamin Liu <benjamin.liu@intel.com>
+ *   Jun Nakajima <jun.nakajima@intel.com>
+ * 
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERCALL_X86_64_H__
+#define __HYPERCALL_X86_64_H__
+
+#include <xen/xen.h>
+#include <xen/sched.h>
+#include <mini-os/mm.h>
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+extern char hypercall_page[PAGE_SIZE];
+
+#define _hypercall0(type, name)                        \
+({                                             \
+       long __res;                             \
+       asm volatile (                          \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res)                  \
+               :                               \
+               : "memory" );                   \
+       (type)__res;                            \
+})
+
+#define _hypercall1(type, name, a1)                            \
+({                                                             \
+       long __res, __ign1;                                     \
+       asm volatile (                                          \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res), "=D" (__ign1)                   \
+               : "1" ((long)(a1))                              \
+               : "memory" );                                   \
+       (type)__res;                                            \
+})
+
+#define _hypercall2(type, name, a1, a2)                                \
+({                                                             \
+       long __res, __ign1, __ign2;                             \
+       asm volatile (                                          \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res), "=D" (__ign1), "=S" (__ign2)    \
+               : "1" ((long)(a1)), "2" ((long)(a2))            \
+               : "memory" );                                   \
+       (type)__res;                                            \
+})
+
+#define _hypercall3(type, name, a1, a2, a3)                    \
+({                                                             \
+       long __res, __ign1, __ign2, __ign3;                     \
+       asm volatile (                                          \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res), "=D" (__ign1), "=S" (__ign2),   \
+               "=d" (__ign3)                                   \
+               : "1" ((long)(a1)), "2" ((long)(a2)),           \
+               "3" ((long)(a3))                                \
+               : "memory" );                                   \
+       (type)__res;                                            \
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4)                        \
+({                                                             \
+       long __res, __ign1, __ign2, __ign3;                     \
+       asm volatile (                                          \
+               "movq %7,%%r10; "                               \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res), "=D" (__ign1), "=S" (__ign2),   \
+               "=d" (__ign3)                                   \
+               : "1" ((long)(a1)), "2" ((long)(a2)),           \
+               "3" ((long)(a3)), "g" ((long)(a4))              \
+               : "memory", "r10" );                            \
+       (type)__res;                                            \
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5)            \
+({                                                             \
+       long __res, __ign1, __ign2, __ign3;                     \
+       asm volatile (                                          \
+               "movq %7,%%r10; movq %8,%%r8; "                 \
+               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
+               : "=a" (__res), "=D" (__ign1), "=S" (__ign2),   \
+               "=d" (__ign3)                                   \
+               : "1" ((long)(a1)), "2" ((long)(a2)),           \
+               "3" ((long)(a3)), "g" ((long)(a4)),             \
+               "g" ((long)(a5))                                \
+               : "memory", "r10", "r8" );                      \
+       (type)__res;                                            \
+})
+
+static inline int
+HYPERVISOR_set_trap_table(
+       trap_info_t *table)
+{
+       return _hypercall1(int, set_trap_table, table);
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+       mmu_update_t *req, int count, int *success_count, domid_t domid)
+{
+       return _hypercall4(int, mmu_update, req, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_mmuext_op(
+       struct mmuext_op *op, int count, int *success_count, domid_t domid)
+{
+       return _hypercall4(int, mmuext_op, op, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+       unsigned long *frame_list, int entries)
+{
+       return _hypercall2(int, set_gdt, frame_list, entries);
+}
+
+static inline int
+HYPERVISOR_stack_switch(
+       unsigned long ss, unsigned long esp)
+{
+       return _hypercall2(int, stack_switch, ss, esp);
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+       unsigned long event_address, unsigned long failsafe_address, 
+       unsigned long syscall_address)
+{
+       return _hypercall3(int, set_callbacks,
+                          event_address, failsafe_address, syscall_address);
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+       int set)
+{
+       return _hypercall1(int, fpu_taskswitch, set);
+}
+
+static inline int
+HYPERVISOR_sched_op(
+       int cmd, void *arg)
+{
+       return _hypercall2(int, sched_op, cmd, arg);
+}
+
+static inline long
+HYPERVISOR_set_timer_op(
+       uint64_t timeout)
+{
+       return _hypercall1(long, set_timer_op, timeout);
+}
+
+static inline int
+HYPERVISOR_set_debugreg(
+       int reg, unsigned long value)
+{
+       return _hypercall2(int, set_debugreg, reg, value);
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+       int reg)
+{
+       return _hypercall1(unsigned long, get_debugreg, reg);
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+       unsigned long ma, unsigned long word)
+{
+       return _hypercall2(int, update_descriptor, ma, word);
+}
+
+static inline int
+HYPERVISOR_memory_op(
+       unsigned int cmd, void *arg)
+{
+       return _hypercall2(int, memory_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_multicall(
+       void *call_list, int nr_calls)
+{
+       return _hypercall2(int, multicall, call_list, nr_calls);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(
+       unsigned long va, pte_t new_val, unsigned long flags)
+{
+       return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
+}
+
+static inline int
+HYPERVISOR_event_channel_op(
+       int cmd, void *op)
+{
+    return _hypercall2(int, event_channel_op, cmd, op);
+}
+
+static inline int
+HYPERVISOR_xen_version(
+       int cmd, void *arg)
+{
+       return _hypercall2(int, xen_version, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_console_io(
+       int cmd, int count, char *str)
+{
+       return _hypercall3(int, console_io, cmd, count, str);
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+       void *physdev_op)
+{
+       return _hypercall1(int, physdev_op, physdev_op);
+}
+
+static inline int
+HYPERVISOR_grant_table_op(
+       unsigned int cmd, void *uop, unsigned int count)
+{
+       return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+       unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
+{
+       return _hypercall4(int, update_va_mapping_otherdomain, va,
+                          new_val.pte, flags, domid);
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+       unsigned int cmd, unsigned int type)
+{
+       return _hypercall2(int, vm_assist, cmd, type);
+}
+
+static inline int
+HYPERVISOR_vcpu_op(
+       int cmd, int vcpuid, void *extra_args)
+{
+       return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
+}
+
+static inline int
+HYPERVISOR_set_segment_base(
+       int reg, unsigned long value)
+{
+       return _hypercall2(int, set_segment_base, reg, value);
+}
+
+static inline int
+HYPERVISOR_suspend(
+       unsigned long srec)
+{
+       return _hypercall3(int, sched_op, SCHEDOP_shutdown,
+                          SHUTDOWN_suspend, srec);
+}
+
+static inline int
+HYPERVISOR_nmi_op(
+       unsigned long op,
+       unsigned long arg)
+{
+       return _hypercall2(int, nmi_op, op, arg);
+}
+
+static inline int
+HYPERVISOR_sysctl(
+       unsigned long op)
+{
+       return _hypercall1(int, sysctl, op);
+}
+
+static inline int
+HYPERVISOR_domctl(
+       unsigned long op)
+{
+       return _hypercall1(int, domctl, op);
+}
+
+#endif /* __HYPERCALL_X86_64_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff --git a/include/x86/arch_limits.h b/include/x86/arch_limits.h
deleted file mode 100644 (file)
index 41f8620..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-
-#ifndef __ARCH_LIMITS_H__
-#define __ARCH_LIMITS_H__
-
-#define __PAGE_SHIFT      12
-
-#ifdef __ASSEMBLY__
-#define __PAGE_SIZE       (1 << __PAGE_SHIFT)
-#else
-#ifdef __x86_64__
-#define __PAGE_SIZE       (1UL << __PAGE_SHIFT)
-#else
-#define __PAGE_SIZE       (1ULL << __PAGE_SHIFT)
-#endif
-#endif
-
-#define __STACK_SIZE_PAGE_ORDER  4
-#define __STACK_SIZE             (__PAGE_SIZE * (1 << __STACK_SIZE_PAGE_ORDER))
-          
-#endif /* __ARCH_LIMITS_H__ */
diff --git a/include/x86/arch_mm.h b/include/x86/arch_mm.h
deleted file mode 100644 (file)
index a95632a..0000000
+++ /dev/null
@@ -1,234 +0,0 @@
-/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
- *
- * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
- * Copyright (c) 2005, Keir A Fraser
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _ARCH_MM_H_
-#define _ARCH_MM_H_
-
-#ifndef __ASSEMBLY__
-#include <xen/xen.h>
-#if defined(__i386__)
-#include <xen/arch-x86_32.h>
-#elif defined(__x86_64__)
-#include <xen/arch-x86_64.h>
-#else
-#error "Unsupported architecture"
-#endif
-#endif
-
-#define L1_FRAME                1
-#define L2_FRAME                2
-#define L3_FRAME                3
-
-#define L1_PAGETABLE_SHIFT      12
-
-#if defined(__i386__)
-
-#define L2_PAGETABLE_SHIFT      21
-#define L3_PAGETABLE_SHIFT      30
-
-#define L1_PAGETABLE_ENTRIES    512
-#define L2_PAGETABLE_ENTRIES    512
-#define L3_PAGETABLE_ENTRIES    4
-
-#define PADDR_BITS              44
-#define PADDR_MASK              ((1ULL << PADDR_BITS)-1)
-
-#define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
-
-/*
- * If starting from virtual address greater than 0xc0000000,
- * this value will be 2 to account for final mid-level page
- * directory which is always mapped in at this location.
- */
-#define NOT_L1_FRAMES           3
-#define PRIpte "016llx"
-#ifndef __ASSEMBLY__
-typedef uint64_t pgentry_t;
-#endif
-
-#elif defined(__x86_64__)
-
-#define L2_PAGETABLE_SHIFT      21
-#define L3_PAGETABLE_SHIFT      30
-#define L4_PAGETABLE_SHIFT      39
-
-#define L1_PAGETABLE_ENTRIES    512
-#define L2_PAGETABLE_ENTRIES    512
-#define L3_PAGETABLE_ENTRIES    512
-#define L4_PAGETABLE_ENTRIES    512
-
-/* These are page-table limitations. Current CPUs support only 40-bit phys. */
-#define PADDR_BITS              52
-#define VADDR_BITS              48
-#define PADDR_MASK              ((1UL << PADDR_BITS)-1)
-#define VADDR_MASK              ((1UL << VADDR_BITS)-1)
-
-#define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
-#define L3_MASK  ((1UL << L4_PAGETABLE_SHIFT) - 1)
-
-#define NOT_L1_FRAMES           3
-#define PRIpte "016lx"
-#ifndef __ASSEMBLY__
-typedef unsigned long pgentry_t;
-#endif
-
-#endif
-
-#define L1_MASK  ((1UL << L2_PAGETABLE_SHIFT) - 1)
-
-/* Given a virtual address, get an entry offset into a page table. */
-#define l1_table_offset(_a) \
-  (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
-#define l2_table_offset(_a) \
-  (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
-#define l3_table_offset(_a) \
-  (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
-#if defined(__x86_64__)
-#define l4_table_offset(_a) \
-  (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
-#endif
-
-#define _PAGE_PRESENT  0x001ULL
-#define _PAGE_RW       0x002ULL
-#define _PAGE_USER     0x004ULL
-#define _PAGE_PWT      0x008ULL
-#define _PAGE_PCD      0x010ULL
-#define _PAGE_ACCESSED 0x020ULL
-#define _PAGE_DIRTY    0x040ULL
-#define _PAGE_PAT      0x080ULL
-#define _PAGE_PSE      0x080ULL
-#define _PAGE_GLOBAL   0x100ULL
-
-#if defined(__i386__)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
-#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
-#define L3_PROT (_PAGE_PRESENT)
-#elif defined(__x86_64__)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
-#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_USER)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-#endif /* __i386__ || __x86_64__ */
-
-/* flags for ioremap */
-#define IO_PROT (L1_PROT)
-#define IO_PROT_NOCACHE (L1_PROT | _PAGE_PCD)
-
-/* for P2M */
-#define INVALID_P2M_ENTRY (~0UL)
-
-#include "arch_limits.h"
-#define PAGE_SIZE       __PAGE_SIZE
-#define PAGE_SHIFT      __PAGE_SHIFT
-#define PAGE_MASK       (~(PAGE_SIZE-1))
-
-#define PFN_UP(x)      (((x) + PAGE_SIZE-1) >> L1_PAGETABLE_SHIFT)
-#define PFN_DOWN(x)    ((x) >> L1_PAGETABLE_SHIFT)
-#define PFN_PHYS(x)    ((uint64_t)(x) << L1_PAGETABLE_SHIFT)
-#define PHYS_PFN(x)    ((x) >> L1_PAGETABLE_SHIFT)
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr)        (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-#ifndef __ASSEMBLY__
-/* Definitions for machine and pseudophysical addresses. */
-#ifdef __i386__
-typedef unsigned long long paddr_t;
-typedef unsigned long long maddr_t;
-#else
-typedef unsigned long paddr_t;
-typedef unsigned long maddr_t;
-#endif
-
-extern unsigned long *phys_to_machine_mapping;
-extern char _text, _etext, _erodata, _edata, _end;
-extern unsigned long mfn_zero;
-#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
-static __inline__ maddr_t phys_to_machine(paddr_t phys)
-{
-       maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-       machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-       return machine;
-}
-
-#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
-static __inline__ paddr_t machine_to_phys(maddr_t machine)
-{
-       paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-       phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-       return phys;
-}
-#endif
-
-#define VIRT_START                 ((unsigned long)&_text)
-
-#define to_phys(x)                 ((unsigned long)(x)-VIRT_START)
-#define to_virt(x)                 ((void *)((unsigned long)(x)+VIRT_START))
-
-#define virt_to_pfn(_virt)         (PFN_DOWN(to_phys(_virt)))
-#define virt_to_mfn(_virt)         (pfn_to_mfn(virt_to_pfn(_virt)))
-#define mach_to_virt(_mach)        (to_virt(machine_to_phys(_mach)))
-#define virt_to_mach(_virt)        (phys_to_machine(to_phys(_virt)))
-#define mfn_to_virt(_mfn)          (to_virt(mfn_to_pfn(_mfn) << PAGE_SHIFT))
-#define pfn_to_virt(_pfn)          (to_virt((_pfn) << PAGE_SHIFT))
-
-/* Pagetable walking. */
-#define pte_to_mfn(_pte)           (((_pte) & (PADDR_MASK&PAGE_MASK)) >> L1_PAGETABLE_SHIFT)
-#define pte_to_virt(_pte)          to_virt(mfn_to_pfn(pte_to_mfn(_pte)) << PAGE_SHIFT)
-
-
-#define PT_BASE                           ((pgentry_t *)start_info.pt_base)
-
-#ifdef __x86_64__
-#define virtual_to_l3(_virt)      ((pgentry_t *)pte_to_virt(PT_BASE[l4_table_offset(_virt)]))
-#else
-#define virtual_to_l3(_virt)      PT_BASE
-#endif
-
-#define virtual_to_l2(_virt)      ({ \
-       unsigned long __virt2 = (_virt); \
-       (pgentry_t *) pte_to_virt(virtual_to_l3(__virt2)[l3_table_offset(__virt2)]); \
-})
-
-#define virtual_to_l1(_virt)      ({ \
-       unsigned long __virt1 = (_virt); \
-       (pgentry_t *) pte_to_virt(virtual_to_l2(__virt1)[l2_table_offset(__virt1)]); \
-})
-
-#define virtual_to_pte(_virt)     ({ \
-       unsigned long __virt0 = (unsigned long) (_virt); \
-       virtual_to_l1(__virt0)[l1_table_offset(__virt0)]; \
-})
-#define virtual_to_mfn(_virt)     pte_to_mfn(virtual_to_pte(_virt))
-
-#define map_frames(f, n) map_frames_ex(f, n, 1, 0, 1, DOMID_SELF, NULL, L1_PROT)
-#define map_zero(n, a) map_frames_ex(&mfn_zero, n, 0, 0, a, DOMID_SELF, NULL, L1_PROT_RO)
-#define do_map_zero(start, n) do_map_frames(start, &mfn_zero, n, 0, 0, DOMID_SELF, NULL, L1_PROT_RO)
-
-pgentry_t *need_pgt(unsigned long addr);
-int mfn_is_ram(unsigned long mfn);
-
-#endif /* _ARCH_MM_H_ */
diff --git a/include/x86/arch_sched.h b/include/x86/arch_sched.h
deleted file mode 100644 (file)
index b494eca..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-
-#ifndef __ARCH_SCHED_H__
-#define __ARCH_SCHED_H__
-
-#include "arch_limits.h"
-
-static inline struct thread* get_current(void)
-{
-    struct thread **current;
-#ifdef __i386__    
-    register unsigned long sp asm("esp");
-#else
-    register unsigned long sp asm("rsp");
-#endif 
-    current = (void *)(unsigned long)(sp & ~(__STACK_SIZE-1));
-    return *current;
-}
-
-extern void __arch_switch_threads(unsigned long *prevctx, unsigned long *nextctx);
-
-#define arch_switch_threads(prev,next) __arch_switch_threads(&(prev)->sp, &(next)->sp)
-
-
-          
-#endif /* __ARCH_SCHED_H__ */
diff --git a/include/x86/arch_spinlock.h b/include/x86/arch_spinlock.h
deleted file mode 100644 (file)
index 4b8faf7..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
-#ifndef __ARCH_ASM_SPINLOCK_H
-#define __ARCH_ASM_SPINLOCK_H
-
-#include <mini-os/lib.h>
-#include "os.h"
-
-
-#define ARCH_SPIN_LOCK_UNLOCKED { 1 }
-
-/*
- * Simple spin lock operations.  There are two variants, one clears IRQ's
- * on the local processor, one does not.
- *
- * We make no fairness assumptions. They have a cost.
- */
-
-#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
-
-#define spin_lock_string \
-        "1:\n" \
-       LOCK \
-       "decb %0\n\t" \
-       "jns 3f\n" \
-       "2:\t" \
-       "rep;nop\n\t" \
-       "cmpb $0,%0\n\t" \
-       "jle 2b\n\t" \
-       "jmp 1b\n" \
-       "3:\n\t"
-
-#define spin_lock_string_flags \
-        "1:\n" \
-       LOCK \
-       "decb %0\n\t" \
-       "jns 4f\n\t" \
-       "2:\t" \
-       "testl $0x200, %1\n\t" \
-       "jz 3f\n\t" \
-       "#sti\n\t" \
-       "3:\t" \
-       "rep;nop\n\t" \
-       "cmpb $0, %0\n\t" \
-       "jle 3b\n\t" \
-       "#cli\n\t" \
-       "jmp 1b\n" \
-       "4:\n\t"
-
-/*
- * This works. Despite all the confusion.
- * (except on PPro SMP or if we are using OOSTORE)
- * (PPro errata 66, 92)
- */
-
-#define spin_unlock_string \
-       "xchgb %b0, %1" \
-               :"=q" (oldval), "=m" (lock->slock) \
-               :"0" (oldval) : "memory"
-
-static inline void _raw_spin_unlock(spinlock_t *lock)
-{
-       char oldval = 1;
-       __asm__ __volatile__(
-               spin_unlock_string
-       );
-}
-
-static inline int _raw_spin_trylock(spinlock_t *lock)
-{
-       char oldval;
-       __asm__ __volatile__(
-               "xchgb %b0,%1\n"
-               :"=q" (oldval), "=m" (lock->slock)
-               :"0" (0) : "memory");
-       return oldval > 0;
-}
-
-static inline void _raw_spin_lock(spinlock_t *lock)
-{
-       __asm__ __volatile__(
-               spin_lock_string
-               :"=m" (lock->slock) : : "memory");
-}
-
-static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
-{
-       __asm__ __volatile__(
-               spin_lock_string_flags
-               :"=m" (lock->slock) : "r" (flags) : "memory");
-}
-
-#endif
diff --git a/include/x86/os.h b/include/x86/os.h
deleted file mode 100644 (file)
index f193865..0000000
+++ /dev/null
@@ -1,572 +0,0 @@
-/******************************************************************************
- * os.h
- * 
- * random collection of macros and definition
- */
-
-#ifndef _OS_H_
-#define _OS_H_
-
-#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
-#define __builtin_expect(x, expected_value) (x)
-#endif
-#define unlikely(x)  __builtin_expect((x),0)
-#define likely(x)  __builtin_expect((x),1)
-
-#define smp_processor_id() 0
-
-
-#ifndef __ASSEMBLY__
-#include <mini-os/types.h>
-#include <mini-os/hypervisor.h>
-#include <mini-os/kernel.h>
-
-#define USED    __attribute__ ((used))
-
-#define BUG do_exit
-
-#endif
-#include <xen/xen.h>
-
-
-
-#define __KERNEL_CS  FLAT_KERNEL_CS
-#define __KERNEL_DS  FLAT_KERNEL_DS
-#define __KERNEL_SS  FLAT_KERNEL_SS
-
-#define TRAP_divide_error      0
-#define TRAP_debug             1
-#define TRAP_nmi               2
-#define TRAP_int3              3
-#define TRAP_overflow          4
-#define TRAP_bounds            5
-#define TRAP_invalid_op        6
-#define TRAP_no_device         7
-#define TRAP_double_fault      8
-#define TRAP_copro_seg         9
-#define TRAP_invalid_tss      10
-#define TRAP_no_segment       11
-#define TRAP_stack_error      12
-#define TRAP_gp_fault         13
-#define TRAP_page_fault       14
-#define TRAP_spurious_int     15
-#define TRAP_copro_error      16
-#define TRAP_alignment_check  17
-#define TRAP_machine_check    18
-#define TRAP_simd_error       19
-#define TRAP_deferred_nmi     31
-
-/* Everything below this point is not included by assembler (.S) files. */
-#ifndef __ASSEMBLY__
-
-extern shared_info_t *HYPERVISOR_shared_info;
-
-void trap_init(void);
-void trap_fini(void);
-
-void arch_init(start_info_t *si);
-void arch_print_info(void);
-void arch_fini(void);
-
-
-
-
-
-/* 
- * The use of 'barrier' in the following reflects their use as local-lock
- * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
- * critical operations are executed. All critical operations must complete
- * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
- * includes these barriers, for example.
- */
-
-#define __cli()                                                                \
-do {                                                                   \
-       vcpu_info_t *_vcpu;                                             \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
-       _vcpu->evtchn_upcall_mask = 1;                                  \
-       barrier();                                                      \
-} while (0)
-
-#define __sti()                                                                \
-do {                                                                   \
-       vcpu_info_t *_vcpu;                                             \
-       barrier();                                                      \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
-       _vcpu->evtchn_upcall_mask = 0;                                  \
-       barrier(); /* unmask then check (avoid races) */                \
-       if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
-               force_evtchn_callback();                                \
-} while (0)
-
-#define __save_flags(x)                                                        \
-do {                                                                   \
-       vcpu_info_t *_vcpu;                                             \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
-       (x) = _vcpu->evtchn_upcall_mask;                                \
-} while (0)
-
-#define __restore_flags(x)                                             \
-do {                                                                   \
-       vcpu_info_t *_vcpu;                                             \
-       barrier();                                                      \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
-       if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
-               barrier(); /* unmask then check (avoid races) */        \
-               if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
-                       force_evtchn_callback();                        \
-       }\
-} while (0)
-
-#define safe_halt()            ((void)0)
-
-#define __save_and_cli(x)                                              \
-do {                                                                   \
-       vcpu_info_t *_vcpu;                                             \
-       _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
-       (x) = _vcpu->evtchn_upcall_mask;                                \
-       _vcpu->evtchn_upcall_mask = 1;                                  \
-       barrier();                                                      \
-} while (0)
-
-#define local_irq_save(x)      __save_and_cli(x)
-#define local_irq_restore(x)   __restore_flags(x)
-#define local_save_flags(x)    __save_flags(x)
-#define local_irq_disable()    __cli()
-#define local_irq_enable()     __sti()
-
-#define irqs_disabled()                        \
-    HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask
-
-/* This is a barrier for the compiler only, NOT the processor! */
-#define barrier() __asm__ __volatile__("": : :"memory")
-
-#if defined(__i386__)
-#define mb()    __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#define rmb()   __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
-#define wmb()  __asm__ __volatile__ ("": : :"memory")
-#elif defined(__x86_64__)
-#define mb()    __asm__ __volatile__ ("mfence":::"memory")
-#define rmb()   __asm__ __volatile__ ("lfence":::"memory")
-#define wmb()  __asm__ __volatile__ ("sfence" ::: "memory") /* From CONFIG_UNORDERED_IO (linux) */
-#endif
-
-
-#define LOCK_PREFIX ""
-#define LOCK ""
-#define ADDR (*(volatile long *) addr)
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-typedef struct { volatile int counter; } atomic_t;
-
-
-/************************** i386 *******************************/
-#ifdef __INSIDE_MINIOS__
-#if defined (__i386__)
-
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
-       switch (size) {
-               case 1:
-                       __asm__ __volatile__("xchgb %b0,%1"
-                               :"=q" (x)
-                               :"m" (*__xg(ptr)), "0" (x)
-                               :"memory");
-                       break;
-               case 2:
-                       __asm__ __volatile__("xchgw %w0,%1"
-                               :"=r" (x)
-                               :"m" (*__xg(ptr)), "0" (x)
-                               :"memory");
-                       break;
-               case 4:
-                       __asm__ __volatile__("xchgl %0,%1"
-                               :"=r" (x)
-                               :"m" (*__xg(ptr)), "0" (x)
-                               :"memory");
-                       break;
-       }
-       return x;
-}
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It can be reorderdered on other architectures other than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__( LOCK
-               "btrl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"=m" (ADDR)
-               :"Ir" (nr) : "memory");
-       return oldbit;
-}
-
-static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
-{
-       return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
-}
-
-static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__(
-               "btl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit)
-               :"m" (ADDR),"Ir" (nr));
-       return oldbit;
-}
-
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered.  See __set_bit()
- * if you do not require the atomic guarantees.
- *
- * Note: there are no guarantees that this function will not be reordered
- * on non x86 architectures, so if you are writting portable code,
- * make sure not to rely on its reordering guarantees.
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(int nr, volatile unsigned long * addr)
-{
-       __asm__ __volatile__( LOCK
-               "btsl %1,%0"
-               :"=m" (ADDR)
-               :"Ir" (nr));
-}
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered.  However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
- */
-static inline void clear_bit(int nr, volatile unsigned long * addr)
-{
-       __asm__ __volatile__( LOCK
-               "btrl %1,%0"
-               :"=m" (ADDR)
-               :"Ir" (nr));
-}
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
-       __asm__("bsfl %1,%0"
-               :"=r" (word)
-               :"rm" (word));
-       return word;
-}
-
-
-/*
- * These have to be done with inline assembly: that way the bit-setting
- * is guaranteed to be atomic. All bit operations return 0 if the bit
- * was cleared before the operation and != 0 if it was not.
- *
- * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
- */
-#define ADDR (*(volatile long *) addr)
-
-#define rdtscll(val) \
-     __asm__ __volatile__("rdtsc" : "=A" (val))
-
-
-
-#elif defined(__x86_64__)/* ifdef __i386__ */
-/************************** x86_84 *******************************/
-
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-#define __xg(x) ((volatile long *)(x))
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
-       switch (size) {
-               case 1:
-                       __asm__ __volatile__("xchgb %b0,%1"
-                               :"=q" (x)
-                               :"m" (*__xg(ptr)), "0" (x)
-                               :"memory");
-                       break;
-               case 2:
-                       __asm__ __volatile__("xchgw %w0,%1"
-                               :"=r" (x)
-                               :"m" (*__xg(ptr)), "0" (x)
-                               :"memory");
-                       break;
-               case 4:
-                       __asm__ __volatile__("xchgl %k0,%1"
-                               :"=r" (x)
-                               :"m" (*__xg(ptr)), "0" (x)
-                               :"memory");
-                       break;
-               case 8:
-                       __asm__ __volatile__("xchgq %0,%1"
-                               :"=r" (x)
-                               :"m" (*__xg(ptr)), "0" (x)
-                               :"memory");
-                       break;
-       }
-       return x;
-}
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.  
- * It also implies a memory barrier.
- */
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__( LOCK_PREFIX
-               "btrl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit),"=m" (ADDR)
-               :"dIr" (nr) : "memory");
-       return oldbit;
-}
-
-static __inline__ int constant_test_bit(int nr, const volatile void * addr)
-{
-       return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int variable_test_bit(int nr, volatile const void * addr)
-{
-       int oldbit;
-
-       __asm__ __volatile__(
-               "btl %2,%1\n\tsbbl %0,%0"
-               :"=r" (oldbit)
-               :"m" (ADDR),"dIr" (nr));
-       return oldbit;
-}
-
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
-
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered.  See __set_bit()
- * if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __inline__ void set_bit(int nr, volatile void * addr)
-{
-       __asm__ __volatile__( LOCK_PREFIX
-               "btsl %1,%0"
-               :"=m" (ADDR)
-               :"dIr" (nr) : "memory");
-}
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered.  However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
- */
-static __inline__ void clear_bit(int nr, volatile void * addr)
-{
-       __asm__ __volatile__( LOCK_PREFIX
-               "btrl %1,%0"
-               :"=m" (ADDR)
-               :"dIr" (nr));
-}
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static __inline__ unsigned long __ffs(unsigned long word)
-{
-       __asm__("bsfq %1,%0"
-               :"=r" (word)
-               :"rm" (word));
-       return word;
-}
-
-#define ADDR (*(volatile long *) addr)
-
-#define rdtscll(val) do { \
-     unsigned int __a,__d; \
-     asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
-     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
-} while(0)
-
-#define wrmsr(msr,val1,val2) \
-      __asm__ __volatile__("wrmsr" \
-                           : /* no outputs */ \
-                           : "c" (msr), "a" (val1), "d" (val2))
-
-#define wrmsrl(msr,val) wrmsr(msr,(uint32_t)((uint64_t)(val)),((uint64_t)(val))>>32)
-
-
-#else /* ifdef __x86_64__ */
-#error "Unsupported architecture"
-#endif
-#endif /* ifdef __INSIDE_MINIOS */
-
-/********************* common i386 and x86_64  ****************************/
-struct __synch_xchg_dummy { unsigned long a[100]; };
-#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
-
-#define synch_cmpxchg(ptr, old, new) \
-((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
-                                     (unsigned long)(old), \
-                                     (unsigned long)(new), \
-                                     sizeof(*(ptr))))
-
-static inline unsigned long __synch_cmpxchg(volatile void *ptr,
-        unsigned long old,
-        unsigned long new, int size)
-{
-    unsigned long prev;
-    switch (size) {
-        case 1:
-            __asm__ __volatile__("lock; cmpxchgb %b1,%2"
-                    : "=a"(prev)
-                    : "q"(new), "m"(*__synch_xg(ptr)),
-                    "0"(old)
-                    : "memory");
-            return prev;
-        case 2:
-            __asm__ __volatile__("lock; cmpxchgw %w1,%2"
-                    : "=a"(prev)
-                    : "r"(new), "m"(*__synch_xg(ptr)),
-                    "0"(old)
-                    : "memory");
-            return prev;
-#ifdef __x86_64__
-        case 4:
-            __asm__ __volatile__("lock; cmpxchgl %k1,%2"
-                    : "=a"(prev)
-                    : "r"(new), "m"(*__synch_xg(ptr)),
-                    "0"(old)
-                    : "memory");
-            return prev;
-        case 8:
-            __asm__ __volatile__("lock; cmpxchgq %1,%2"
-                    : "=a"(prev)
-                    : "r"(new), "m"(*__synch_xg(ptr)),
-                    "0"(old)
-                    : "memory");
-            return prev;
-#else
-        case 4:
-            __asm__ __volatile__("lock; cmpxchgl %1,%2"
-                    : "=a"(prev)
-                    : "r"(new), "m"(*__synch_xg(ptr)),
-                    "0"(old)
-                    : "memory");
-            return prev;
-#endif
-    }
-    return old;
-}
-
-
-static __inline__ void synch_set_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ ( 
-        "lock btsl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-{
-    __asm__ __volatile__ (
-        "lock btrl %1,%0"
-        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-}
-
-static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "lock btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "lock btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-    return oldbit;
-}
-
-static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
-{
-    return ((1UL << (nr & 31)) & 
-            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    __asm__ __volatile__ (
-        "btl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
-    return oldbit;
-}
-
-#define synch_test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- synch_const_test_bit((nr),(addr)) : \
- synch_var_test_bit((nr),(addr)))
-
-
-#undef ADDR
-
-#endif /* not assembly */
-#endif /* _OS_H_ */
diff --git a/include/x86/traps.h b/include/x86/traps.h
deleted file mode 100644 (file)
index bfb6781..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-/* 
- ****************************************************************************
- * (C) 2005 - Grzegorz Milos - Intel Reseach Cambridge
- ****************************************************************************
- *
- *        File: traps.h
- *      Author: Grzegorz Milos (gm281@cam.ac.uk)
- *              
- *        Date: Jun 2005
- * 
- * Environment: Xen Minimal OS
- * Description: Deals with traps
- *
- ****************************************************************************
- */
-
-#ifndef _TRAPS_H_
-#define _TRAPS_H_
-
-#ifdef __i386__
-struct pt_regs {
-       long ebx;
-       long ecx;
-       long edx;
-       long esi;
-       long edi;
-       long ebp;
-       long eax;
-       int  xds;
-       int  xes;
-       long orig_eax;
-       long eip;
-       int  xcs;
-       long eflags;
-       long esp;
-       int  xss;
-};
-#elif __x86_64__
-
-struct pt_regs {
-       unsigned long r15;
-       unsigned long r14;
-       unsigned long r13;
-       unsigned long r12;
-       unsigned long rbp;
-       unsigned long rbx;
-/* arguments: non interrupts/non tracing syscalls only save upto here*/
-       unsigned long r11;
-       unsigned long r10;      
-       unsigned long r9;
-       unsigned long r8;
-       unsigned long rax;
-       unsigned long rcx;
-       unsigned long rdx;
-       unsigned long rsi;
-       unsigned long rdi;
-       unsigned long orig_rax;
-/* end of arguments */         
-/* cpu exception frame or undefined */
-       unsigned long rip;
-       unsigned long cs;
-       unsigned long eflags; 
-       unsigned long rsp; 
-       unsigned long ss;
-/* top of stack page */ 
-};
-
-
-#endif
-
-void dump_regs(struct pt_regs *regs);
-void stack_walk(void);
-
-#define TRAP_PF_PROT   0x1
-#define TRAP_PF_WRITE  0x2
-#define TRAP_PF_USER   0x4
-
-#endif /* _TRAPS_H_ */
diff --git a/include/x86/x86_32/hypercall-x86_32.h b/include/x86/x86_32/hypercall-x86_32.h
deleted file mode 100644 (file)
index 43028ee..0000000
+++ /dev/null
@@ -1,324 +0,0 @@
-/******************************************************************************
- * hypercall-x86_32.h
- * 
- * Copied from XenLinux.
- * 
- * Copyright (c) 2002-2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __HYPERCALL_X86_32_H__
-#define __HYPERCALL_X86_32_H__
-
-#include <xen/xen.h>
-#include <xen/sched.h>
-#include <xen/nmi.h>
-#include <mini-os/mm.h>
-
-#define __STR(x) #x
-#define STR(x) __STR(x)
-
-extern char hypercall_page[PAGE_SIZE];
-
-#define _hypercall0(type, name)                        \
-({                                             \
-       long __res;                             \
-       asm volatile (                          \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res)                  \
-               :                               \
-               : "memory" );                   \
-       (type)__res;                            \
-})
-
-#define _hypercall1(type, name, a1)                            \
-({                                                             \
-       long __res, __ign1;                                     \
-       asm volatile (                                          \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res), "=b" (__ign1)                   \
-               : "1" ((long)(a1))                              \
-               : "memory" );                                   \
-       (type)__res;                                            \
-})
-
-#define _hypercall2(type, name, a1, a2)                                \
-({                                                             \
-       long __res, __ign1, __ign2;                             \
-       asm volatile (                                          \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res), "=b" (__ign1), "=c" (__ign2)    \
-               : "1" ((long)(a1)), "2" ((long)(a2))            \
-               : "memory" );                                   \
-       (type)__res;                                            \
-})
-
-#define _hypercall3(type, name, a1, a2, a3)                    \
-({                                                             \
-       long __res, __ign1, __ign2, __ign3;                     \
-       asm volatile (                                          \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res), "=b" (__ign1), "=c" (__ign2),   \
-               "=d" (__ign3)                                   \
-               : "1" ((long)(a1)), "2" ((long)(a2)),           \
-               "3" ((long)(a3))                                \
-               : "memory" );                                   \
-       (type)__res;                                            \
-})
-
-#define _hypercall4(type, name, a1, a2, a3, a4)                        \
-({                                                             \
-       long __res, __ign1, __ign2, __ign3, __ign4;             \
-       asm volatile (                                          \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res), "=b" (__ign1), "=c" (__ign2),   \
-               "=d" (__ign3), "=S" (__ign4)                    \
-               : "1" ((long)(a1)), "2" ((long)(a2)),           \
-               "3" ((long)(a3)), "4" ((long)(a4))              \
-               : "memory" );                                   \
-       (type)__res;                                            \
-})
-
-#define _hypercall5(type, name, a1, a2, a3, a4, a5)            \
-({                                                             \
-       long __res, __ign1, __ign2, __ign3, __ign4, __ign5;     \
-       asm volatile (                                          \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res), "=b" (__ign1), "=c" (__ign2),   \
-               "=d" (__ign3), "=S" (__ign4), "=D" (__ign5)     \
-               : "1" ((long)(a1)), "2" ((long)(a2)),           \
-               "3" ((long)(a3)), "4" ((long)(a4)),             \
-               "5" ((long)(a5))                                \
-               : "memory" );                                   \
-       (type)__res;                                            \
-})
-
-static inline int
-HYPERVISOR_set_trap_table(
-       trap_info_t *table)
-{
-       return _hypercall1(int, set_trap_table, table);
-}
-
-static inline int
-HYPERVISOR_mmu_update(
-       mmu_update_t *req, int count, int *success_count, domid_t domid)
-{
-       return _hypercall4(int, mmu_update, req, count, success_count, domid);
-}
-
-static inline int
-HYPERVISOR_mmuext_op(
-       struct mmuext_op *op, int count, int *success_count, domid_t domid)
-{
-       return _hypercall4(int, mmuext_op, op, count, success_count, domid);
-}
-
-static inline int
-HYPERVISOR_set_gdt(
-       unsigned long *frame_list, int entries)
-{
-       return _hypercall2(int, set_gdt, frame_list, entries);
-}
-
-static inline int
-HYPERVISOR_stack_switch(
-       unsigned long ss, unsigned long esp)
-{
-       return _hypercall2(int, stack_switch, ss, esp);
-}
-
-static inline int
-HYPERVISOR_set_callbacks(
-       unsigned long event_selector, unsigned long event_address,
-       unsigned long failsafe_selector, unsigned long failsafe_address)
-{
-       return _hypercall4(int, set_callbacks,
-                          event_selector, event_address,
-                          failsafe_selector, failsafe_address);
-}
-
-static inline int
-HYPERVISOR_fpu_taskswitch(
-       int set)
-{
-       return _hypercall1(int, fpu_taskswitch, set);
-}
-
-static inline int
-HYPERVISOR_sched_op(
-       int cmd, void *arg)
-{
-       return _hypercall2(int, sched_op, cmd, arg);
-}
-
-static inline long
-HYPERVISOR_set_timer_op(
-       uint64_t timeout)
-{
-       unsigned long timeout_hi = (unsigned long)(timeout>>32);
-       unsigned long timeout_lo = (unsigned long)timeout;
-       return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
-}
-
-static inline int
-HYPERVISOR_set_debugreg(
-       int reg, unsigned long value)
-{
-       return _hypercall2(int, set_debugreg, reg, value);
-}
-
-static inline unsigned long
-HYPERVISOR_get_debugreg(
-       int reg)
-{
-       return _hypercall1(unsigned long, get_debugreg, reg);
-}
-
-static inline int
-HYPERVISOR_update_descriptor(
-       uint64_t ma, uint64_t desc)
-{
-       return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
-}
-
-static inline int
-HYPERVISOR_memory_op(
-       unsigned int cmd, void *arg)
-{
-       return _hypercall2(int, memory_op, cmd, arg);
-}
-
-static inline int
-HYPERVISOR_multicall(
-       void *call_list, int nr_calls)
-{
-       return _hypercall2(int, multicall, call_list, nr_calls);
-}
-
-static inline int
-HYPERVISOR_update_va_mapping(
-       unsigned long va, pte_t new_val, unsigned long flags)
-{
-       return _hypercall4(int, update_va_mapping, va,
-                          new_val.pte_low, new_val.pte_high, flags);
-}
-
-static inline int
-HYPERVISOR_event_channel_op(
-       int cmd, void *op)
-{
-       return _hypercall2(int, event_channel_op, cmd, op);
-}
-
-static inline int
-HYPERVISOR_xen_version(
-       int cmd, void *arg)
-{
-       return _hypercall2(int, xen_version, cmd, arg);
-}
-
-static inline int
-HYPERVISOR_console_io(
-       int cmd, int count, char *str)
-{
-       return _hypercall3(int, console_io, cmd, count, str);
-}
-
-static inline int
-HYPERVISOR_physdev_op(
-       void *physdev_op)
-{
-       return _hypercall1(int, physdev_op, physdev_op);
-}
-
-static inline int
-HYPERVISOR_grant_table_op(
-       unsigned int cmd, void *uop, unsigned int count)
-{
-       return _hypercall3(int, grant_table_op, cmd, uop, count);
-}
-
-static inline int
-HYPERVISOR_update_va_mapping_otherdomain(
-       unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-{
-       return _hypercall5(int, update_va_mapping_otherdomain, va,
-                          new_val.pte_low, new_val.pte_high, flags, domid);
-}
-
-static inline int
-HYPERVISOR_vm_assist(
-       unsigned int cmd, unsigned int type)
-{
-       return _hypercall2(int, vm_assist, cmd, type);
-}
-
-static inline int
-HYPERVISOR_vcpu_op(
-       int cmd, int vcpuid, void *extra_args)
-{
-       return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-}
-
-static inline int
-HYPERVISOR_suspend(
-       unsigned long srec)
-{
-       return _hypercall3(int, sched_op, SCHEDOP_shutdown,
-                          SHUTDOWN_suspend, srec);
-}
-
-static inline int
-HYPERVISOR_nmi_op(
-       unsigned long op,
-       unsigned long arg)
-{
-       return _hypercall2(int, nmi_op, op, arg);
-}
-
-static inline int
-HYPERVISOR_sysctl(
-       unsigned long op)
-{
-       return _hypercall1(int, sysctl, op);
-}
-
-static inline int
-HYPERVISOR_domctl(
-       unsigned long op)
-{
-       return _hypercall1(int, domctl, op);
-}
-
-#endif /* __HYPERCALL_X86_32_H__ */
-
-/*
- * Local variables:
- *  c-file-style: "linux"
- *  indent-tabs-mode: t
- *  c-indent-level: 8
- *  c-basic-offset: 8
- *  tab-width: 8
- * End:
- */
diff --git a/include/x86/x86_64/hypercall-x86_64.h b/include/x86/x86_64/hypercall-x86_64.h
deleted file mode 100644 (file)
index b874f03..0000000
+++ /dev/null
@@ -1,332 +0,0 @@
-/******************************************************************************
- * hypercall-x86_64.h
- * 
- * Copied from XenLinux.
- * 
- * Copyright (c) 2002-2004, K A Fraser
- * 
- * 64-bit updates:
- *   Benjamin Liu <benjamin.liu@intel.com>
- *   Jun Nakajima <jun.nakajima@intel.com>
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __HYPERCALL_X86_64_H__
-#define __HYPERCALL_X86_64_H__
-
-#include <xen/xen.h>
-#include <xen/sched.h>
-#include <mini-os/mm.h>
-
-#define __STR(x) #x
-#define STR(x) __STR(x)
-
-extern char hypercall_page[PAGE_SIZE];
-
-#define _hypercall0(type, name)                        \
-({                                             \
-       long __res;                             \
-       asm volatile (                          \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res)                  \
-               :                               \
-               : "memory" );                   \
-       (type)__res;                            \
-})
-
-#define _hypercall1(type, name, a1)                            \
-({                                                             \
-       long __res, __ign1;                                     \
-       asm volatile (                                          \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res), "=D" (__ign1)                   \
-               : "1" ((long)(a1))                              \
-               : "memory" );                                   \
-       (type)__res;                                            \
-})
-
-#define _hypercall2(type, name, a1, a2)                                \
-({                                                             \
-       long __res, __ign1, __ign2;                             \
-       asm volatile (                                          \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res), "=D" (__ign1), "=S" (__ign2)    \
-               : "1" ((long)(a1)), "2" ((long)(a2))            \
-               : "memory" );                                   \
-       (type)__res;                                            \
-})
-
-#define _hypercall3(type, name, a1, a2, a3)                    \
-({                                                             \
-       long __res, __ign1, __ign2, __ign3;                     \
-       asm volatile (                                          \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res), "=D" (__ign1), "=S" (__ign2),   \
-               "=d" (__ign3)                                   \
-               : "1" ((long)(a1)), "2" ((long)(a2)),           \
-               "3" ((long)(a3))                                \
-               : "memory" );                                   \
-       (type)__res;                                            \
-})
-
-#define _hypercall4(type, name, a1, a2, a3, a4)                        \
-({                                                             \
-       long __res, __ign1, __ign2, __ign3;                     \
-       asm volatile (                                          \
-               "movq %7,%%r10; "                               \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res), "=D" (__ign1), "=S" (__ign2),   \
-               "=d" (__ign3)                                   \
-               : "1" ((long)(a1)), "2" ((long)(a2)),           \
-               "3" ((long)(a3)), "g" ((long)(a4))              \
-               : "memory", "r10" );                            \
-       (type)__res;                                            \
-})
-
-#define _hypercall5(type, name, a1, a2, a3, a4, a5)            \
-({                                                             \
-       long __res, __ign1, __ign2, __ign3;                     \
-       asm volatile (                                          \
-               "movq %7,%%r10; movq %8,%%r8; "                 \
-               "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-               : "=a" (__res), "=D" (__ign1), "=S" (__ign2),   \
-               "=d" (__ign3)                                   \
-               : "1" ((long)(a1)), "2" ((long)(a2)),           \
-               "3" ((long)(a3)), "g" ((long)(a4)),             \
-               "g" ((long)(a5))                                \
-               : "memory", "r10", "r8" );                      \
-       (type)__res;                                            \
-})
-
-static inline int
-HYPERVISOR_set_trap_table(
-       trap_info_t *table)
-{
-       return _hypercall1(int, set_trap_table, table);
-}
-
-static inline int
-HYPERVISOR_mmu_update(
-       mmu_update_t *req, int count, int *success_count, domid_t domid)
-{
-       return _hypercall4(int, mmu_update, req, count, success_count, domid);
-}
-
-static inline int
-HYPERVISOR_mmuext_op(
-       struct mmuext_op *op, int count, int *success_count, domid_t domid)
-{
-       return _hypercall4(int, mmuext_op, op, count, success_count, domid);
-}
-
-static inline int
-HYPERVISOR_set_gdt(
-       unsigned long *frame_list, int entries)
-{
-       return _hypercall2(int, set_gdt, frame_list, entries);
-}
-
-static inline int
-HYPERVISOR_stack_switch(
-       unsigned long ss, unsigned long esp)
-{
-       return _hypercall2(int, stack_switch, ss, esp);
-}
-
-static inline int
-HYPERVISOR_set_callbacks(
-       unsigned long event_address, unsigned long failsafe_address, 
-       unsigned long syscall_address)
-{
-       return _hypercall3(int, set_callbacks,
-                          event_address, failsafe_address, syscall_address);
-}
-
-static inline int
-HYPERVISOR_fpu_taskswitch(
-       int set)
-{
-       return _hypercall1(int, fpu_taskswitch, set);
-}
-
-static inline int
-HYPERVISOR_sched_op(
-       int cmd, void *arg)
-{
-       return _hypercall2(int, sched_op, cmd, arg);
-}
-
-static inline long
-HYPERVISOR_set_timer_op(
-       uint64_t timeout)
-{
-       return _hypercall1(long, set_timer_op, timeout);
-}
-
-static inline int
-HYPERVISOR_set_debugreg(
-       int reg, unsigned long value)
-{
-       return _hypercall2(int, set_debugreg, reg, value);
-}
-
-static inline unsigned long
-HYPERVISOR_get_debugreg(
-       int reg)
-{
-       return _hypercall1(unsigned long, get_debugreg, reg);
-}
-
-static inline int
-HYPERVISOR_update_descriptor(
-       unsigned long ma, unsigned long word)
-{
-       return _hypercall2(int, update_descriptor, ma, word);
-}
-
-static inline int
-HYPERVISOR_memory_op(
-       unsigned int cmd, void *arg)
-{
-       return _hypercall2(int, memory_op, cmd, arg);
-}
-
-static inline int
-HYPERVISOR_multicall(
-       void *call_list, int nr_calls)
-{
-       return _hypercall2(int, multicall, call_list, nr_calls);
-}
-
-static inline int
-HYPERVISOR_update_va_mapping(
-       unsigned long va, pte_t new_val, unsigned long flags)
-{
-       return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
-}
-
-static inline int
-HYPERVISOR_event_channel_op(
-       int cmd, void *op)
-{
-    return _hypercall2(int, event_channel_op, cmd, op);
-}
-
-static inline int
-HYPERVISOR_xen_version(
-       int cmd, void *arg)
-{
-       return _hypercall2(int, xen_version, cmd, arg);
-}
-
-static inline int
-HYPERVISOR_console_io(
-       int cmd, int count, char *str)
-{
-       return _hypercall3(int, console_io, cmd, count, str);
-}
-
-static inline int
-HYPERVISOR_physdev_op(
-       void *physdev_op)
-{
-       return _hypercall1(int, physdev_op, physdev_op);
-}
-
-static inline int
-HYPERVISOR_grant_table_op(
-       unsigned int cmd, void *uop, unsigned int count)
-{
-       return _hypercall3(int, grant_table_op, cmd, uop, count);
-}
-
-static inline int
-HYPERVISOR_update_va_mapping_otherdomain(
-       unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-{
-       return _hypercall4(int, update_va_mapping_otherdomain, va,
-                          new_val.pte, flags, domid);
-}
-
-static inline int
-HYPERVISOR_vm_assist(
-       unsigned int cmd, unsigned int type)
-{
-       return _hypercall2(int, vm_assist, cmd, type);
-}
-
-static inline int
-HYPERVISOR_vcpu_op(
-       int cmd, int vcpuid, void *extra_args)
-{
-       return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-}
-
-static inline int
-HYPERVISOR_set_segment_base(
-       int reg, unsigned long value)
-{
-       return _hypercall2(int, set_segment_base, reg, value);
-}
-
-static inline int
-HYPERVISOR_suspend(
-       unsigned long srec)
-{
-       return _hypercall3(int, sched_op, SCHEDOP_shutdown,
-                          SHUTDOWN_suspend, srec);
-}
-
-static inline int
-HYPERVISOR_nmi_op(
-       unsigned long op,
-       unsigned long arg)
-{
-       return _hypercall2(int, nmi_op, op, arg);
-}
-
-static inline int
-HYPERVISOR_sysctl(
-       unsigned long op)
-{
-       return _hypercall1(int, sysctl, op);
-}
-
-static inline int
-HYPERVISOR_domctl(
-       unsigned long op)
-{
-       return _hypercall1(int, domctl, op);
-}
-
-#endif /* __HYPERCALL_X86_64_H__ */
-
-/*
- * Local variables:
- *  c-file-style: "linux"
- *  indent-tabs-mode: t
- *  c-indent-level: 8
- *  c-basic-offset: 8
- *  tab-width: 8
- * End:
- */