ia64/xen-unstable

changeset 13464:26c75e0e48ed

merge with xen-unstable.hg
author awilliam@xenbuild2.aw
date Wed Jan 17 21:31:22 2007 -0700 (2007-01-17)
parents 7e9077dd4010 dd0989523d17
children 58637a0a7c7e
files extras/mini-os/minios-x86_32.lds extras/mini-os/minios-x86_64.lds extras/mini-os/x86_32.S extras/mini-os/x86_64.S
line diff
     1.1 --- a/Makefile	Wed Jan 17 19:55:48 2007 -0700
     1.2 +++ b/Makefile	Wed Jan 17 21:31:22 2007 -0700
     1.3 @@ -2,18 +2,15 @@
     1.4  # Grand Unified Makefile for Xen.
     1.5  #
     1.6  
     1.7 -# Export target architecture overrides to Xen and Linux sub-trees.
     1.8 -ifneq ($(XEN_TARGET_ARCH),)
     1.9 -SUBARCH := $(subst x86_32,i386,$(XEN_TARGET_ARCH))
    1.10 -export XEN_TARGET_ARCH SUBARCH XEN_SYSTYPE
    1.11 -endif
    1.12 -
    1.13  # Default target must appear before any include lines
    1.14  .PHONY: all
    1.15  all: dist
    1.16  
    1.17  export XEN_ROOT=$(CURDIR)
    1.18  include Config.mk
    1.19 +
    1.20 +SUBARCH := $(subst x86_32,i386,$(XEN_TARGET_ARCH))
    1.21 +export XEN_TARGET_ARCH SUBARCH XEN_SYSTYPE
    1.22  include buildconfigs/Rules.mk
    1.23  
    1.24  ifeq ($(XEN_TARGET_X86_PAE),y)
     2.1 --- a/docs/man/xm.pod.1	Wed Jan 17 19:55:48 2007 -0700
     2.2 +++ b/docs/man/xm.pod.1	Wed Jan 17 21:31:22 2007 -0700
     2.3 @@ -451,6 +451,7 @@ make the man page more readable):
     2.4   xen_minor              : 0
     2.5   xen_extra              : -devel
     2.6   xen_caps               : xen-3.0-x86_32
     2.7 + xen_scheduler          : credit
     2.8   xen_pagesize           : 4096
     2.9   platform_params        : virt_start=0xfc000000
    2.10   xen_changeset          : Mon Nov 14 18:13:38 2005 +0100 
    2.11 @@ -460,7 +461,7 @@ make the man page more readable):
    2.12   cc_compile_by          : sdague
    2.13   cc_compile_domain      : (none)
    2.14   cc_compile_date        : Mon Nov 14 14:16:48 EST 2005
    2.15 - xend_config_format     : 2
    2.16 + xend_config_format     : 3
    2.17  
    2.18  B<FIELDS>
    2.19  
     3.1 --- a/docs/man/xmdomain.cfg.pod.5	Wed Jan 17 19:55:48 2007 -0700
     3.2 +++ b/docs/man/xmdomain.cfg.pod.5	Wed Jan 17 21:31:22 2007 -0700
     3.3 @@ -135,6 +135,55 @@ one will be randomly chosen by xen with 
     3.4  
     3.5  =back
     3.6  
     3.7 +=item B<vfb>
     3.8 +
     3.9 +A virtual frame buffer stanza in the form:
    3.10 +
    3.11 +    vfb = [ "stanza" ]
    3.12 +
    3.13 +The stanza specifies a set of I<name = value> options separated by
    3.14 +commas, in the form: "name1=value1, name2=value2, ..."
    3.15 +
    3.16 +B<OPTIONS>
    3.17 +
    3.18 +=over 4
    3.19 +
    3.20 +=item I<type>
    3.21 +
    3.22 +There are currently two valid options: I<vnc> starts a VNC server that
    3.23 +lets you connect an external VNC viewer, and I<sdl> starts an internal
    3.24 +viewer.
    3.25 +
    3.26 +=item I<vncdisplay>
    3.27 +
    3.28 +The VNC display number to use, defaults to the domain ID.  The
    3.29 +VNC server listens on port 5900 + display number.
    3.30 +
    3.31 +=item I<vnclisten>
    3.32 +
    3.33 +The listening address for the VNC server, default 127.0.0.1.
    3.34 +
    3.35 +=item I<vncunused>
    3.36 +
    3.37 +If non-zero, the VNC server listens on the first unused port above
    3.38 +5900.
    3.39 +
    3.40 +=item I<vncpasswd>
    3.41 +
    3.42 +Overrides the XenD configured default password.
    3.43 +
    3.44 +=item I<display>
    3.45 +
    3.46 +Display to use for the internal viewer, defaults to environment
    3.47 +variable I<DISPLAY>.
    3.48 +
    3.49 +=item I<xauthority>
    3.50 +
    3.51 +Authority file to use for the internal viewer, defaults to environment
    3.52 +variable I<XAUTHORITY>.
    3.53 +
    3.54 +=back
    3.55 +
    3.56  =back
    3.57  
    3.58  =head1 ADDITIONAL OPTIONS
     4.1 --- a/extras/mini-os/Makefile	Wed Jan 17 19:55:48 2007 -0700
     4.2 +++ b/extras/mini-os/Makefile	Wed Jan 17 21:31:22 2007 -0700
     4.3 @@ -1,112 +1,88 @@
     4.4 -debug ?= y
     4.5 +# Common Makefile for mini-os.
     4.6 +#
     4.7 +# Every architecture directory below mini-os/arch has to have a
     4.8 +# Makefile and a arch.mk.
     4.9 +#
    4.10 +
    4.11  pae ?= n
    4.12  
    4.13  XEN_ROOT = ../..
    4.14  include $(XEN_ROOT)/Config.mk
    4.15  
    4.16 -# Set TARGET_ARCH
    4.17 -override TARGET_ARCH     := $(XEN_TARGET_ARCH)
    4.18 -
    4.19  XEN_INTERFACE_VERSION := 0x00030204
    4.20 +export XEN_INTERFACE_VERSION
    4.21  
    4.22 -# NB. '-Wcast-qual' is nasty, so I omitted it.
    4.23 -CFLAGS := -fno-builtin -Wall -Werror -Wredundant-decls -Wno-format
    4.24 -CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline
    4.25 -CFLAGS += -D__XEN_INTERFACE_VERSION__=$(XEN_INTERFACE_VERSION)
    4.26 +# Set TARGET_ARCH
    4.27 +override TARGET_ARCH := $(XEN_TARGET_ARCH)
    4.28  
    4.29 -ASFLAGS = -D__ASSEMBLY__
    4.30 +# Set mini-os root path, used in mini-os.mk.
    4.31 +MINI-OS_ROOT=$(PWD)
    4.32 +export MINI-OS_ROOT
    4.33  
    4.34 -LDLIBS =  -L. -lminios
    4.35 -LDFLAGS_FINAL := -N -T minios-$(TARGET_ARCH).lds
    4.36 -LDFLAGS :=
    4.37 +# Try to find out the architecture family TARGET_ARCH_FAM.
    4.38 +# First check whether x86_... is contained (for x86_32, x86_32y, x86_64).
    4.39 +# If not x86 then use $(TARGET_ARCH) -> for ia64, ...
    4.40 +ifeq ($(findstring x86_,$(TARGET_ARCH)),x86_)
    4.41 +TARGET_ARCH_FAM = x86
    4.42 +else
    4.43 +TARGET_ARCH_FAM = $(TARGET_ARCH)
    4.44 +endif
    4.45 +
    4.46 +# The architecture family directory below mini-os.
    4.47 +TARGET_ARCH_DIR := arch/$(TARGET_ARCH_FAM)
    4.48 +
    4.49 +# Export these variables for possible use in architecture dependent makefiles.
    4.50 +export TARGET_ARCH
    4.51 +export TARGET_ARCH_DIR
    4.52 +export TARGET_ARCH_FAM
    4.53 +
    4.54 +# This is used for architecture specific links.
    4.55 +# This can be overwritten from arch specific rules.
    4.56 +ARCH_LINKS =
    4.57 +
    4.58 +# For possible special header directories.
    4.59 +# This can be overwritten from arch specific rules.
    4.60 +EXTRA_INC =
    4.61 +
    4.62 +# Special build dependencies.
    4.63 +# Build all after touching this/these file(s) (see minios.mk)
    4.64 +SPEC_DEPENDS = minios.mk
    4.65 +
    4.66 +# Include the architecture family's special makerules.
    4.67 +# This must be before include minios.mk!
    4.68 +include $(TARGET_ARCH_DIR)/arch.mk
    4.69 +
    4.70 +# Include common mini-os makerules.
    4.71 +include minios.mk
    4.72 +
    4.73 +# Define some default flags for linking.
    4.74 +LDLIBS := 
    4.75 +LDFLAGS := 
    4.76 +LDARCHLIB := -L$(TARGET_ARCH_DIR) -l$(ARCH_LIB_NAME)
    4.77 +LDFLAGS_FINAL := -N -T $(TARGET_ARCH_DIR)/minios-$(TARGET_ARCH).lds
    4.78  
    4.79  # Prefix for global API names. All other symbols are localised before
    4.80  # linking with EXTRA_OBJS.
    4.81  GLOBAL_PREFIX := xenos_
    4.82  EXTRA_OBJS =
    4.83  
    4.84 -# For possible special source directories.
    4.85 -EXTRA_SRC =
    4.86 -# For possible special header directories.
    4.87 -EXTRA_INC =
    4.88 -
    4.89 -# Standard name for architecture specific subdirectories.
    4.90 -TARGET_ARCH_DIR = $(TARGET_ARCH)
    4.91 -# This is used for architecture specific links.
    4.92 -ARCH_LINKS =
    4.93 -
    4.94 -ifeq ($(TARGET_ARCH),x86_32)
    4.95 -CFLAGS += -m32 -march=i686
    4.96 -LDFLAGS += -m elf_i386
    4.97 -TARGET_ARCH_DIR = x86
    4.98 -EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
    4.99 -EXTRA_SRC += arch/$(EXTRA_INC)
   4.100 -endif
   4.101 -
   4.102 -ifeq ($(TARGET_ARCH)$(pae),x86_32y)
   4.103 -CFLAGS  += -DCONFIG_X86_PAE=1
   4.104 -ASFLAGS += -DCONFIG_X86_PAE=1
   4.105 -TARGET_ARCH_DIR = x86
   4.106 -EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
   4.107 -EXTRA_SRC += arch/$(EXTRA_INC)
   4.108 -endif
   4.109 -
   4.110 -ifeq ($(TARGET_ARCH),x86_64)
   4.111 -CFLAGS += -m64 -mno-red-zone -fpic -fno-reorder-blocks
   4.112 -CFLAGS += -fno-asynchronous-unwind-tables
   4.113 -LDFLAGS += -m elf_x86_64
   4.114 -TARGET_ARCH_DIR = x86
   4.115 -EXTRA_INC += $(TARGET_ARCH_DIR)/$(TARGET_ARCH)
   4.116 -EXTRA_SRC += arch/$(EXTRA_INC)
   4.117 -endif
   4.118 -
   4.119 -ifeq ($(TARGET_ARCH),ia64)
   4.120 -CFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -mconstant-gp
   4.121 -ASFLAGS += -x assembler-with-cpp -Wall
   4.122 -ASFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -fomit-frame-pointer
   4.123 -ASFLAGS += -fno-builtin -fno-common -fno-strict-aliasing -mconstant-gp
   4.124 -ARCH_LINKS = IA64_LINKS		# Special link on ia64 needed
   4.125 -define arch_links
   4.126 -[ -e include/ia64/asm-xsi-offsets.h ] || ln -sf ../../../../xen/include/asm-ia64/asm-xsi-offsets.h include/ia64/asm-xsi-offsets.h
   4.127 -endef
   4.128 -endif
   4.129 -
   4.130 -ifeq ($(debug),y)
   4.131 -CFLAGS += -g
   4.132 -else
   4.133 -CFLAGS += -O3
   4.134 -endif
   4.135 -
   4.136 -# Add the special header directories to the include paths.
   4.137 -extra_incl := $(foreach dir,$(EXTRA_INC),-Iinclude/$(dir))
   4.138 -override CPPFLAGS := -Iinclude $(CPPFLAGS) -Iinclude/$(TARGET_ARCH_DIR)	$(extra_incl)
   4.139 -
   4.140  TARGET := mini-os
   4.141  
   4.142 -HEAD := $(TARGET_ARCH).o
   4.143 +# Subdirectories common to mini-os
   4.144 +SUBDIRS := lib xenbus console
   4.145 +
   4.146 +# The common mini-os objects to build.
   4.147  OBJS := $(patsubst %.c,%.o,$(wildcard *.c))
   4.148  OBJS += $(patsubst %.c,%.o,$(wildcard lib/*.c))
   4.149  OBJS += $(patsubst %.c,%.o,$(wildcard xenbus/*.c))
   4.150  OBJS += $(patsubst %.c,%.o,$(wildcard console/*.c))
   4.151 -OBJS += $(patsubst %.S,%.o,$(wildcard arch/$(TARGET_ARCH_DIR)/*.S))
   4.152 -OBJS += $(patsubst %.c,%.o,$(wildcard arch/$(TARGET_ARCH_DIR)/*.c))
   4.153 -# For special wanted source directories.
   4.154 -extra_objs := $(foreach dir,$(EXTRA_SRC),$(patsubst %.c,%.o,$(wildcard $(dir)/*.c)))
   4.155 -OBJS += $(extra_objs)
   4.156 -extra_objs := $(foreach dir,$(EXTRA_SRC),$(patsubst %.S,%.o,$(wildcard $(dir)/*.S)))
   4.157 -OBJS += $(extra_objs)
   4.158  
   4.159 -HDRS := $(wildcard include/*.h)
   4.160 -HDRS += $(wildcard include/xen/*.h)
   4.161 -HDRS += $(wildcard include/$(TARGET_ARCH_DIR)/*.h)
   4.162 -# For special wanted header directories.
   4.163 -extra_heads := $(foreach dir,$(EXTRA_INC),$(wildcard $(dir)/*.h))
   4.164 -HDRS += $(extra_heads)
   4.165  
   4.166  .PHONY: default
   4.167  default: $(TARGET)
   4.168  
   4.169 -# Create special architecture specific links.
   4.170 +# Create special architecture specific links. The function arch_links
   4.171 +# has to be defined in arch.mk (see include above).
   4.172  ifneq ($(ARCH_LINKS),)
   4.173  $(ARCH_LINKS):
   4.174  	$(arch_links)
   4.175 @@ -116,24 +92,29 @@ endif
   4.176  links:	$(ARCH_LINKS)
   4.177  	[ -e include/xen ] || ln -sf ../../../xen/include/public include/xen
   4.178  
   4.179 -$(TARGET): links $(OBJS) $(HEAD)
   4.180 -	$(LD) -r $(LDFLAGS) $(HEAD) $(OBJS) -o $@.o
   4.181 +.PHONY: arch_lib
   4.182 +arch_lib:
   4.183 +	$(MAKE) --directory=$(TARGET_ARCH_DIR) || exit 1;
   4.184 +
   4.185 +$(TARGET): links $(OBJS) arch_lib
   4.186 +	$(LD) -r $(LDFLAGS) $(HEAD_OBJ) $(OBJS) $(LDARCHLIB) -o $@.o
   4.187  	$(OBJCOPY) -w -G $(GLOBAL_PREFIX)* -G _start $@.o $@.o
   4.188  	$(LD) $(LDFLAGS) $(LDFLAGS_FINAL) $@.o $(EXTRA_OBJS) -o $@
   4.189  	gzip -f -9 -c $@ >$@.gz
   4.190  
   4.191 -.PHONY: clean
   4.192 -clean:
   4.193 -	find . -type f -name '*.o' | xargs rm -f
   4.194 -	rm -f *.o *~ core $(TARGET) $(TARGET).gz
   4.195 +.PHONY: clean arch_clean
   4.196 +
   4.197 +arch_clean:
   4.198 +	$(MAKE) --directory=$(TARGET_ARCH_DIR) clean || exit 1;
   4.199 +
   4.200 +clean:	arch_clean
   4.201 +	for dir in $(SUBDIRS); do \
   4.202 +		rm -f $$dir/*.o; \
   4.203 +	done
   4.204 +	rm -f *.o *~ core $(TARGET).elf $(TARGET).raw $(TARGET) $(TARGET).gz
   4.205  	find . -type l | xargs rm -f
   4.206  	rm -f tags TAGS
   4.207  
   4.208 -%.o: %.c $(HDRS) Makefile
   4.209 -	$(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@
   4.210 -
   4.211 -%.o: %.S $(HDRS) Makefile
   4.212 -	$(CC) $(ASFLAGS) $(CPPFLAGS) -c $< -o $@
   4.213  
   4.214  define all_sources
   4.215       ( find . -follow -name SCCS -prune -o -name '*.[chS]' -print )
   4.216 @@ -147,3 +128,4 @@ cscope:
   4.217  .PHONY: tags
   4.218  tags:
   4.219  	$(all_sources) | xargs ctags
   4.220 +
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/extras/mini-os/arch/x86/Makefile	Wed Jan 17 21:31:22 2007 -0700
     5.3 @@ -0,0 +1,29 @@
     5.4 +#
     5.5 +# x86 architecture specific makefiles.
     5.6 +# It's is used for x86_32, x86_32y and x86_64
     5.7 +#
     5.8 +
     5.9 +# Rebuild all after touching this/these extra file(s) (see mini-os.mk)
    5.10 +SPEC_DEP = arch.mk
    5.11 +
    5.12 +# include arch.mk has to be before mini-os.mk!
    5.13 +include arch.mk
    5.14 +include ../../minios.mk
    5.15 +
    5.16 +# Sources here are all *.c *.S without $(TARGET_ARCH).S
    5.17 +# This is handled in $(HEAD_ARCH_OBJ)
    5.18 +ARCH_SRCS := $(wildcard *.c)
    5.19 +
    5.20 +# The objects built from the sources.
    5.21 +ARCH_OBJS := $(patsubst %.c,%.o,$(ARCH_SRCS))
    5.22 +
    5.23 +all: $(ARCH_LIB)
    5.24 +
    5.25 +# $(HEAD_ARCH_OBJ) is only build here, needed on linking
    5.26 +# in ../../Makefile.
    5.27 +$(ARCH_LIB): $(ARCH_OBJS) $(HEAD_ARCH_OBJ)
    5.28 +	$(AR) rv $(ARCH_LIB) $(ARCH_OBJS)
    5.29 +
    5.30 +clean:
    5.31 +	rm -f $(ARCH_LIB) $(ARCH_OBJS)
    5.32 +
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/extras/mini-os/arch/x86/arch.mk	Wed Jan 17 21:31:22 2007 -0700
     6.3 @@ -0,0 +1,28 @@
     6.4 +#
     6.5 +# Architecture special makerules for x86 family
     6.6 +# (including x86_32, x86_32y and x86_64).
     6.7 +#
     6.8 +
     6.9 +ifeq ($(TARGET_ARCH),x86_32)
    6.10 +ARCH_CFLAGS  := -m32 -march=i686
    6.11 +ARCH_LDFLAGS := -m elf_i386
    6.12 +EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
    6.13 +EXTRA_SRC += arch/$(EXTRA_INC)
    6.14 +endif
    6.15 +
    6.16 +ifeq ($(TARGET_ARCH)$(pae),x86_32y)
    6.17 +ARCH_CFLAGS  := -DCONFIG_X86_PAE=1
    6.18 +ARCH_ASFLAGS := -DCONFIG_X86_PAE=1
    6.19 +EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
    6.20 +EXTRA_SRC += arch/$(EXTRA_INC)
    6.21 +endif
    6.22 +
    6.23 +ifeq ($(TARGET_ARCH),x86_64)
    6.24 +ARCH_CFLAGS := -m64 -mno-red-zone -fpic -fno-reorder-blocks
    6.25 +ARCH_CFLAGS := -fno-asynchronous-unwind-tables
    6.26 +ARCH_LDFLAGS := -m elf_x86_64
    6.27 +EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
    6.28 +EXTRA_SRC += arch/$(EXTRA_INC)
    6.29 +endif
    6.30 +
    6.31 +
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/extras/mini-os/arch/x86/minios-x86_32.lds	Wed Jan 17 21:31:22 2007 -0700
     7.3 @@ -0,0 +1,45 @@
     7.4 +OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
     7.5 +OUTPUT_ARCH(i386)
     7.6 +ENTRY(_start)
     7.7 +SECTIONS
     7.8 +{
     7.9 +  . = 0x0;
    7.10 +  _text = .;			/* Text and read-only data */
    7.11 +  .text : {
    7.12 +	*(.text)
    7.13 +	*(.gnu.warning)
    7.14 +	} = 0x9090
    7.15 +
    7.16 +  _etext = .;			/* End of text section */
    7.17 +
    7.18 +  .rodata : { *(.rodata) *(.rodata.*) }
    7.19 +
    7.20 +  .data : {			/* Data */
    7.21 +	*(.data)
    7.22 +	CONSTRUCTORS
    7.23 +	}
    7.24 +
    7.25 +  _edata = .;			/* End of data section */
    7.26 +
    7.27 +  __bss_start = .;		/* BSS */
    7.28 +  .bss : {
    7.29 +	*(.bss)
    7.30 +	}
    7.31 +  _end = . ;
    7.32 +
    7.33 +  /* Sections to be discarded */
    7.34 +  /DISCARD/ : {
    7.35 +	*(.text.exit)
    7.36 +	*(.data.exit)
    7.37 +	*(.exitcall.exit)
    7.38 +	}
    7.39 +
    7.40 +  /* Stabs debugging sections.  */
    7.41 +  .stab 0 : { *(.stab) }
    7.42 +  .stabstr 0 : { *(.stabstr) }
    7.43 +  .stab.excl 0 : { *(.stab.excl) }
    7.44 +  .stab.exclstr 0 : { *(.stab.exclstr) }
    7.45 +  .stab.index 0 : { *(.stab.index) }
    7.46 +  .stab.indexstr 0 : { *(.stab.indexstr) }
    7.47 +  .comment 0 : { *(.comment) }
    7.48 +}
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/extras/mini-os/arch/x86/minios-x86_64.lds	Wed Jan 17 21:31:22 2007 -0700
     8.3 @@ -0,0 +1,54 @@
     8.4 +OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
     8.5 +OUTPUT_ARCH(i386:x86-64)
     8.6 +ENTRY(_start)
     8.7 +SECTIONS
     8.8 +{
     8.9 +  . = 0x0;
    8.10 +  _text = .;			/* Text and read-only data */
    8.11 +  .text : {
    8.12 +	*(.text)
    8.13 +	*(.gnu.warning)
    8.14 +	} = 0x9090
    8.15 +
    8.16 +  _etext = .;			/* End of text section */
    8.17 +
    8.18 +  .rodata : { *(.rodata) *(.rodata.*) }
    8.19 +
    8.20 +  .data : {			/* Data */
    8.21 +	*(.data)
    8.22 +	CONSTRUCTORS
    8.23 +	}
    8.24 +
    8.25 +  _edata = .;			/* End of data section */
    8.26 +
    8.27 +  . = ALIGN(8192);		/* init_task */
    8.28 +  .data.init_task : { *(.data.init_task) }
    8.29 +
    8.30 +  . = ALIGN(4096);
    8.31 +  .data.page_aligned : { *(.data.idt) }
    8.32 +
    8.33 +  . = ALIGN(32);
    8.34 +  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
    8.35 +
    8.36 +  __bss_start = .;		/* BSS */
    8.37 +  .bss : {
    8.38 +	*(.bss)
    8.39 +	}
    8.40 +  _end = . ;
    8.41 +
    8.42 +  /* Sections to be discarded */
    8.43 +  /DISCARD/ : {
    8.44 +	*(.text.exit)
    8.45 +	*(.data.exit)
    8.46 +	*(.exitcall.exit)
    8.47 +	}
    8.48 +
    8.49 +  /* Stabs debugging sections.  */
    8.50 +  .stab 0 : { *(.stab) }
    8.51 +  .stabstr 0 : { *(.stabstr) }
    8.52 +  .stab.excl 0 : { *(.stab.excl) }
    8.53 +  .stab.exclstr 0 : { *(.stab.exclstr) }
    8.54 +  .stab.index 0 : { *(.stab.index) }
    8.55 +  .stab.indexstr 0 : { *(.stab.indexstr) }
    8.56 +  .comment 0 : { *(.comment) }
    8.57 +}
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/extras/mini-os/arch/x86/x86_32.S	Wed Jan 17 21:31:22 2007 -0700
     9.3 @@ -0,0 +1,287 @@
     9.4 +#include <os.h>
     9.5 +#include <xen/arch-x86_32.h>
     9.6 +
     9.7 +.section __xen_guest
     9.8 +	.ascii	"GUEST_OS=Mini-OS"
     9.9 +	.ascii	",XEN_VER=xen-3.0"
    9.10 +	.ascii	",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
    9.11 +	.ascii	",ELF_PADDR_OFFSET=0x0"
    9.12 +	.ascii	",HYPERCALL_PAGE=0x2"
    9.13 +#ifdef CONFIG_X86_PAE
    9.14 +	.ascii	",PAE=yes"
    9.15 +#else
    9.16 +	.ascii	",PAE=no"
    9.17 +#endif
    9.18 +	.ascii	",LOADER=generic"
    9.19 +	.byte	0
    9.20 +.text
    9.21 +
    9.22 +.globl _start, shared_info, hypercall_page
    9.23 +                        
    9.24 +_start:
    9.25 +        cld
    9.26 +        lss stack_start,%esp
    9.27 +        push %esi 
    9.28 +        call start_kernel
    9.29 +
    9.30 +stack_start:
    9.31 +	.long stack+8192, __KERNEL_SS
    9.32 +
    9.33 +        /* Unpleasant -- the PTE that maps this page is actually overwritten */
    9.34 +        /* to map the real shared-info page! :-)                             */
    9.35 +        .org 0x1000
    9.36 +shared_info:
    9.37 +        .org 0x2000
    9.38 +
    9.39 +hypercall_page:
    9.40 +        .org 0x3000
    9.41 +
    9.42 +ES		= 0x20
    9.43 +ORIG_EAX	= 0x24
    9.44 +EIP		= 0x28
    9.45 +CS		= 0x2C
    9.46 +
    9.47 +#define ENTRY(X) .globl X ; X :
    9.48 +
    9.49 +#define SAVE_ALL \
    9.50 +	cld; \
    9.51 +	pushl %es; \
    9.52 +	pushl %ds; \
    9.53 +	pushl %eax; \
    9.54 +	pushl %ebp; \
    9.55 +	pushl %edi; \
    9.56 +	pushl %esi; \
    9.57 +	pushl %edx; \
    9.58 +	pushl %ecx; \
    9.59 +	pushl %ebx; \
    9.60 +	movl $(__KERNEL_DS),%edx; \
    9.61 +	movl %edx,%ds; \
    9.62 +	movl %edx,%es;
    9.63 +
    9.64 +#define RESTORE_ALL	\
    9.65 +	popl %ebx;	\
    9.66 +	popl %ecx;	\
    9.67 +	popl %edx;	\
    9.68 +	popl %esi;	\
    9.69 +	popl %edi;	\
    9.70 +	popl %ebp;	\
    9.71 +	popl %eax;	\
    9.72 +	popl %ds;	\
    9.73 +	popl %es;	\
    9.74 +	addl $4,%esp;	\
    9.75 +	iret;		\
    9.76 +
    9.77 +ENTRY(divide_error)
    9.78 +	pushl $0		# no error code
    9.79 +	pushl $do_divide_error
    9.80 +do_exception:
    9.81 +    pushl %ds
    9.82 +	pushl %eax
    9.83 +	xorl %eax, %eax
    9.84 +	pushl %ebp
    9.85 +	pushl %edi
    9.86 +	pushl %esi
    9.87 +	pushl %edx
    9.88 +	decl %eax			# eax = -1
    9.89 +	pushl %ecx
    9.90 +	pushl %ebx
    9.91 +	cld
    9.92 +	movl %es, %ecx
    9.93 +	movl ES(%esp), %edi		# get the function address
    9.94 +	movl ORIG_EAX(%esp), %edx	# get the error code
    9.95 +	movl %eax, ORIG_EAX(%esp)
    9.96 +	movl %ecx, ES(%esp)
    9.97 +	movl $(__KERNEL_DS), %ecx
    9.98 +	movl %ecx, %ds
    9.99 +	movl %ecx, %es
   9.100 +	movl %esp,%eax			# pt_regs pointer
   9.101 +    pushl %edx
   9.102 +    pushl %eax
   9.103 +	call *%edi
   9.104 +    jmp ret_from_exception
   9.105 +    
   9.106 +ret_from_exception:
   9.107 +        movb CS(%esp),%cl
   9.108 +	test $2,%cl          # slow return to ring 2 or 3
   9.109 +	jne  safesti
   9.110 +        RESTORE_ALL
   9.111 +
   9.112 +# A note on the "critical region" in our callback handler.
   9.113 +# We want to avoid stacking callback handlers due to events occurring
   9.114 +# during handling of the last event. To do this, we keep events disabled
   9.115 +# until weve done all processing. HOWEVER, we must enable events before
   9.116 +# popping the stack frame (cant be done atomically) and so it would still
   9.117 +# be possible to get enough handler activations to overflow the stack.
   9.118 +# Although unlikely, bugs of that kind are hard to track down, so wed
   9.119 +# like to avoid the possibility.
   9.120 +# So, on entry to the handler we detect whether we interrupted an
   9.121 +# existing activation in its critical region -- if so, we pop the current
   9.122 +# activation and restart the handler using the previous one.
   9.123 +ENTRY(hypervisor_callback)
   9.124 +        pushl %eax
   9.125 +        SAVE_ALL
   9.126 +        movl EIP(%esp),%eax
   9.127 +        cmpl $scrit,%eax
   9.128 +        jb   11f
   9.129 +        cmpl $ecrit,%eax
   9.130 +        jb   critical_region_fixup
   9.131 +11:     push %esp
   9.132 +        call do_hypervisor_callback
   9.133 +        add  $4,%esp
   9.134 +        movl HYPERVISOR_shared_info,%esi
   9.135 +        xorl %eax,%eax
   9.136 +        movb CS(%esp),%cl
   9.137 +    	test $2,%cl          # slow return to ring 2 or 3
   9.138 +        jne  safesti
   9.139 +safesti:movb $0,1(%esi)     # reenable event callbacks
   9.140 +scrit:  /**** START OF CRITICAL REGION ****/
   9.141 +        testb $0xFF,(%esi)
   9.142 +        jnz  14f              # process more events if necessary...
   9.143 +        RESTORE_ALL
   9.144 +14:     movb $1,1(%esi)
   9.145 +        jmp  11b
   9.146 +ecrit:  /**** END OF CRITICAL REGION ****/
   9.147 +# [How we do the fixup]. We want to merge the current stack frame with the
   9.148 +# just-interrupted frame. How we do this depends on where in the critical
   9.149 +# region the interrupted handler was executing, and so how many saved
   9.150 +# registers are in each frame. We do this quickly using the lookup table
   9.151 +# 'critical_fixup_table'. For each byte offset in the critical region, it
   9.152 +# provides the number of bytes which have already been popped from the
   9.153 +# interrupted stack frame. 
   9.154 +critical_region_fixup:
   9.155 +        addl $critical_fixup_table-scrit,%eax
   9.156 +        movzbl (%eax),%eax    # %eax contains num bytes popped
   9.157 +        mov  %esp,%esi
   9.158 +        add  %eax,%esi        # %esi points at end of src region
   9.159 +        mov  %esp,%edi
   9.160 +        add  $0x34,%edi       # %edi points at end of dst region
   9.161 +        mov  %eax,%ecx
   9.162 +        shr  $2,%ecx          # convert words to bytes
   9.163 +        je   16f              # skip loop if nothing to copy
   9.164 +15:     subl $4,%esi          # pre-decrementing copy loop
   9.165 +        subl $4,%edi
   9.166 +        movl (%esi),%eax
   9.167 +        movl %eax,(%edi)
   9.168 +        loop 15b
   9.169 +16:     movl %edi,%esp        # final %edi is top of merged stack
   9.170 +        jmp  11b
   9.171 +         
   9.172 +critical_fixup_table:        
   9.173 +        .byte 0x00,0x00,0x00                  # testb $0xff,(%esi)
   9.174 +        .byte 0x00,0x00                       # jne  14f
   9.175 +        .byte 0x00                            # pop  %ebx
   9.176 +        .byte 0x04                            # pop  %ecx
   9.177 +        .byte 0x08                            # pop  %edx
   9.178 +        .byte 0x0c                            # pop  %esi
   9.179 +        .byte 0x10                            # pop  %edi
   9.180 +        .byte 0x14                            # pop  %ebp
   9.181 +        .byte 0x18                            # pop  %eax
   9.182 +        .byte 0x1c                            # pop  %ds
   9.183 +        .byte 0x20                            # pop  %es
   9.184 +        .byte 0x24,0x24,0x24                  # add  $4,%esp
   9.185 +        .byte 0x28                            # iret
   9.186 +        .byte 0x00,0x00,0x00,0x00             # movb $1,1(%esi)
   9.187 +        .byte 0x00,0x00                       # jmp  11b
   9.188 +       
   9.189 +# Hypervisor uses this for application faults while it executes.
   9.190 +ENTRY(failsafe_callback)
   9.191 +      pop  %ds
   9.192 +      pop  %es
   9.193 +      pop  %fs
   9.194 +      pop  %gs
   9.195 +      iret
   9.196 +                
   9.197 +ENTRY(coprocessor_error)
   9.198 +	pushl $0
   9.199 +	pushl $do_coprocessor_error
   9.200 +	jmp do_exception
   9.201 +
   9.202 +ENTRY(simd_coprocessor_error)
   9.203 +	pushl $0
   9.204 +	pushl $do_simd_coprocessor_error
   9.205 +	jmp do_exception
   9.206 +
   9.207 +ENTRY(device_not_available)
   9.208 +        iret
   9.209 +
   9.210 +ENTRY(debug)
   9.211 +	pushl $0
   9.212 +	pushl $do_debug
   9.213 +	jmp do_exception
   9.214 +
   9.215 +ENTRY(int3)
   9.216 +	pushl $0
   9.217 +	pushl $do_int3
   9.218 +	jmp do_exception
   9.219 +
   9.220 +ENTRY(overflow)
   9.221 +	pushl $0
   9.222 +	pushl $do_overflow
   9.223 +	jmp do_exception
   9.224 +
   9.225 +ENTRY(bounds)
   9.226 +	pushl $0
   9.227 +	pushl $do_bounds
   9.228 +	jmp do_exception
   9.229 +
   9.230 +ENTRY(invalid_op)
   9.231 +	pushl $0
   9.232 +	pushl $do_invalid_op
   9.233 +	jmp do_exception
   9.234 +
   9.235 +
   9.236 +ENTRY(coprocessor_segment_overrun)
   9.237 +	pushl $0
   9.238 +	pushl $do_coprocessor_segment_overrun
   9.239 +	jmp do_exception
   9.240 +
   9.241 +
   9.242 +ENTRY(invalid_TSS)
   9.243 +	pushl $do_invalid_TSS
   9.244 +	jmp do_exception
   9.245 +
   9.246 +
   9.247 +ENTRY(segment_not_present)
   9.248 +	pushl $do_segment_not_present
   9.249 +	jmp do_exception
   9.250 +
   9.251 +
   9.252 +ENTRY(stack_segment)
   9.253 +	pushl $do_stack_segment
   9.254 +	jmp do_exception
   9.255 +
   9.256 +
   9.257 +ENTRY(general_protection)
   9.258 +	pushl $do_general_protection
   9.259 +	jmp do_exception
   9.260 +
   9.261 +
   9.262 +ENTRY(alignment_check)
   9.263 +	pushl $do_alignment_check
   9.264 +	jmp do_exception
   9.265 +
   9.266 +
   9.267 +ENTRY(page_fault)
   9.268 +    pushl $do_page_fault
   9.269 +    jmp do_exception
   9.270 +    
   9.271 +ENTRY(machine_check)
   9.272 +	pushl $0
   9.273 +	pushl $do_machine_check
   9.274 +	jmp do_exception
   9.275 +
   9.276 +
   9.277 +ENTRY(spurious_interrupt_bug)
   9.278 +	pushl $0
   9.279 +	pushl $do_spurious_interrupt_bug
   9.280 +	jmp do_exception
   9.281 +
   9.282 +
   9.283 +
   9.284 +ENTRY(thread_starter)
   9.285 +    popl %eax
   9.286 +    popl %ebx
   9.287 +    pushl %eax
   9.288 +    call *%ebx
   9.289 +    call exit_thread 
   9.290 +    
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/extras/mini-os/arch/x86/x86_64.S	Wed Jan 17 21:31:22 2007 -0700
    10.3 @@ -0,0 +1,385 @@
    10.4 +#include <os.h>
    10.5 +#include <xen/features.h>
    10.6 +
    10.7 +.section __xen_guest
    10.8 +	.ascii	"GUEST_OS=Mini-OS"
    10.9 +	.ascii	",XEN_VER=xen-3.0"
   10.10 +	.ascii	",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
   10.11 +	.ascii	",ELF_PADDR_OFFSET=0x0"
   10.12 +	.ascii	",HYPERCALL_PAGE=0x2"
   10.13 +	.ascii	",LOADER=generic"
   10.14 +	.byte	0
   10.15 +.text
   10.16 +
   10.17 +#define ENTRY(X) .globl X ; X :
   10.18 +.globl _start, shared_info, hypercall_page
   10.19 +
   10.20 +
   10.21 +_start:
   10.22 +        cld
   10.23 +        movq stack_start(%rip),%rsp
   10.24 +        movq %rsi,%rdi
   10.25 +        call start_kernel
   10.26 +
   10.27 +stack_start:
   10.28 +        .quad stack+8192
   10.29 +
   10.30 +        /* Unpleasant -- the PTE that maps this page is actually overwritten */
   10.31 +        /* to map the real shared-info page! :-)                             */
   10.32 +        .org 0x1000
   10.33 +shared_info:
   10.34 +        .org 0x2000
   10.35 +
   10.36 +hypercall_page:
   10.37 +        .org 0x3000
   10.38 +
   10.39 +
   10.40 +/* Offsets into shared_info_t. */                
   10.41 +#define evtchn_upcall_pending		/* 0 */
   10.42 +#define evtchn_upcall_mask		1
   10.43 +
   10.44 +NMI_MASK = 0x80000000
   10.45 +
   10.46 +#define RDI 112
   10.47 +#define ORIG_RAX 120       /* + error_code */ 
   10.48 +#define EFLAGS 144
   10.49 +
   10.50 +#define REST_SKIP 6*8			
   10.51 +.macro SAVE_REST
   10.52 +	subq $REST_SKIP,%rsp
   10.53 +#	CFI_ADJUST_CFA_OFFSET	REST_SKIP
   10.54 +	movq %rbx,5*8(%rsp) 
   10.55 +#	CFI_REL_OFFSET	rbx,5*8
   10.56 +	movq %rbp,4*8(%rsp) 
   10.57 +#	CFI_REL_OFFSET	rbp,4*8
   10.58 +	movq %r12,3*8(%rsp) 
   10.59 +#	CFI_REL_OFFSET	r12,3*8
   10.60 +	movq %r13,2*8(%rsp) 
   10.61 +#	CFI_REL_OFFSET	r13,2*8
   10.62 +	movq %r14,1*8(%rsp) 
   10.63 +#	CFI_REL_OFFSET	r14,1*8
   10.64 +	movq %r15,(%rsp) 
   10.65 +#	CFI_REL_OFFSET	r15,0*8
   10.66 +.endm		
   10.67 +
   10.68 +
   10.69 +.macro RESTORE_REST
   10.70 +	movq (%rsp),%r15
   10.71 +#	CFI_RESTORE r15
   10.72 +	movq 1*8(%rsp),%r14
   10.73 +#	CFI_RESTORE r14
   10.74 +	movq 2*8(%rsp),%r13
   10.75 +#	CFI_RESTORE r13
   10.76 +	movq 3*8(%rsp),%r12
   10.77 +#	CFI_RESTORE r12
   10.78 +	movq 4*8(%rsp),%rbp
   10.79 +#	CFI_RESTORE rbp
   10.80 +	movq 5*8(%rsp),%rbx
   10.81 +#	CFI_RESTORE rbx
   10.82 +	addq $REST_SKIP,%rsp
   10.83 +#	CFI_ADJUST_CFA_OFFSET	-(REST_SKIP)
   10.84 +.endm
   10.85 +
   10.86 +
   10.87 +#define ARG_SKIP 9*8
   10.88 +.macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
   10.89 +	.if \skipr11
   10.90 +	.else
   10.91 +	movq (%rsp),%r11
   10.92 +#	CFI_RESTORE r11
   10.93 +	.endif
   10.94 +	.if \skipr8910
   10.95 +	.else
   10.96 +	movq 1*8(%rsp),%r10
   10.97 +#	CFI_RESTORE r10
   10.98 +	movq 2*8(%rsp),%r9
   10.99 +#	CFI_RESTORE r9
  10.100 +	movq 3*8(%rsp),%r8
  10.101 +#	CFI_RESTORE r8
  10.102 +	.endif
  10.103 +	.if \skiprax
  10.104 +	.else
  10.105 +	movq 4*8(%rsp),%rax
  10.106 +#	CFI_RESTORE rax
  10.107 +	.endif
  10.108 +	.if \skiprcx
  10.109 +	.else
  10.110 +	movq 5*8(%rsp),%rcx
  10.111 +#	CFI_RESTORE rcx
  10.112 +	.endif
  10.113 +	.if \skiprdx
  10.114 +	.else
  10.115 +	movq 6*8(%rsp),%rdx
  10.116 +#	CFI_RESTORE rdx
  10.117 +	.endif
  10.118 +	movq 7*8(%rsp),%rsi
  10.119 +#	CFI_RESTORE rsi
  10.120 +	movq 8*8(%rsp),%rdi
  10.121 +#	CFI_RESTORE rdi
  10.122 +	.if ARG_SKIP+\addskip > 0
  10.123 +	addq $ARG_SKIP+\addskip,%rsp
  10.124 +#	CFI_ADJUST_CFA_OFFSET	-(ARG_SKIP+\addskip)
  10.125 +	.endif
  10.126 +.endm	
  10.127 +
  10.128 +
  10.129 +.macro HYPERVISOR_IRET flag
  10.130 +#    testb $3,1*8(%rsp)    /* Don't need to do that in Mini-os, as */
  10.131 +#	jnz   2f               /* there is no userspace? */
  10.132 +	testl $NMI_MASK,2*8(%rsp)
  10.133 +	jnz   2f
  10.134 +
  10.135 +	testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
  10.136 +	jnz   1f
  10.137 +
  10.138 +	/* Direct iret to kernel space. Correct CS and SS. */
  10.139 +	orb   $3,1*8(%rsp)
  10.140 +	orb   $3,4*8(%rsp)
  10.141 +1:	iretq
  10.142 +
  10.143 +2:	/* Slow iret via hypervisor. */
  10.144 +	andl  $~NMI_MASK, 16(%rsp)
  10.145 +	pushq $\flag
  10.146 +	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
  10.147 +.endm
  10.148 +
  10.149 +/*
  10.150 + * Exception entry point. This expects an error code/orig_rax on the stack
  10.151 + * and the exception handler in %rax.	
  10.152 + */ 		  				
  10.153 +ENTRY(error_entry)
  10.154 +#	_frame RDI
  10.155 +	/* rdi slot contains rax, oldrax contains error code */
  10.156 +	cld	
  10.157 +	subq  $14*8,%rsp
  10.158 +#	CFI_ADJUST_CFA_OFFSET	(14*8)
  10.159 +	movq %rsi,13*8(%rsp)
  10.160 +#	CFI_REL_OFFSET	rsi,RSI
  10.161 +	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */
  10.162 +	movq %rdx,12*8(%rsp)
  10.163 +#	CFI_REL_OFFSET	rdx,RDX
  10.164 +	movq %rcx,11*8(%rsp)
  10.165 +#	CFI_REL_OFFSET	rcx,RCX
  10.166 +	movq %rsi,10*8(%rsp)	/* store rax */ 
  10.167 +#	CFI_REL_OFFSET	rax,RAX
  10.168 +	movq %r8, 9*8(%rsp)
  10.169 +#	CFI_REL_OFFSET	r8,R8
  10.170 +	movq %r9, 8*8(%rsp)
  10.171 +#	CFI_REL_OFFSET	r9,R9
  10.172 +	movq %r10,7*8(%rsp)
  10.173 +#	CFI_REL_OFFSET	r10,R10
  10.174 +	movq %r11,6*8(%rsp)
  10.175 +#	CFI_REL_OFFSET	r11,R11
  10.176 +	movq %rbx,5*8(%rsp) 
  10.177 +#	CFI_REL_OFFSET	rbx,RBX
  10.178 +	movq %rbp,4*8(%rsp) 
  10.179 +#	CFI_REL_OFFSET	rbp,RBP
  10.180 +	movq %r12,3*8(%rsp) 
  10.181 +#	CFI_REL_OFFSET	r12,R12
  10.182 +	movq %r13,2*8(%rsp) 
  10.183 +#	CFI_REL_OFFSET	r13,R13
  10.184 +	movq %r14,1*8(%rsp) 
  10.185 +#	CFI_REL_OFFSET	r14,R14
  10.186 +	movq %r15,(%rsp) 
  10.187 +#	CFI_REL_OFFSET	r15,R15
  10.188 +#if 0        
  10.189 +	cmpl $__KERNEL_CS,CS(%rsp)
  10.190 +	je  error_kernelspace
  10.191 +#endif        
  10.192 +error_call_handler:
  10.193 +	movq %rdi, RDI(%rsp)            
  10.194 +	movq %rsp,%rdi
  10.195 +	movq ORIG_RAX(%rsp),%rsi	# get error code 
  10.196 +	movq $-1,ORIG_RAX(%rsp)
  10.197 +	call *%rax
  10.198 +
  10.199 +.macro zeroentry sym
  10.200 +#	INTR_FRAME
  10.201 +    movq (%rsp),%rcx
  10.202 +    movq 8(%rsp),%r11
  10.203 +    addq $0x10,%rsp /* skip rcx and r11 */
  10.204 +	pushq $0	/* push error code/oldrax */ 
  10.205 +#	CFI_ADJUST_CFA_OFFSET 8
  10.206 +	pushq %rax	/* push real oldrax to the rdi slot */ 
  10.207 +#	CFI_ADJUST_CFA_OFFSET 8
  10.208 +	leaq  \sym(%rip),%rax
  10.209 +	jmp error_entry
  10.210 +#	CFI_ENDPROC
  10.211 +.endm	
  10.212 +
  10.213 +.macro errorentry sym
  10.214 +#	XCPT_FRAME
  10.215 +        movq (%rsp),%rcx
  10.216 +        movq 8(%rsp),%r11
  10.217 +        addq $0x10,%rsp /* rsp points to the error code */
  10.218 +	pushq %rax
  10.219 +#	CFI_ADJUST_CFA_OFFSET 8
  10.220 +	leaq  \sym(%rip),%rax
  10.221 +	jmp error_entry
  10.222 +#	CFI_ENDPROC
  10.223 +.endm
  10.224 +
  10.225 +#define XEN_GET_VCPU_INFO(reg)	movq HYPERVISOR_shared_info,reg
  10.226 +#define XEN_PUT_VCPU_INFO(reg)
  10.227 +#define XEN_PUT_VCPU_INFO_fixup
  10.228 +#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
  10.229 +#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
  10.230 +#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
  10.231 +
  10.232 +#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
  10.233 +                    			XEN_LOCKED_BLOCK_EVENTS(reg)	; \
  10.234 +    				            XEN_PUT_VCPU_INFO(reg)
  10.235 +
  10.236 +#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
  10.237 +                				XEN_LOCKED_UNBLOCK_EVENTS(reg)	; \
  10.238 +    			            	XEN_PUT_VCPU_INFO(reg)
  10.239 +
  10.240 +
  10.241 +
  10.242 +ENTRY(hypervisor_callback)
  10.243 +    zeroentry hypervisor_callback2
  10.244 +
  10.245 +ENTRY(hypervisor_callback2)
  10.246 +        movq %rdi, %rsp 
  10.247 +11:     movq %gs:8,%rax
  10.248 +        incl %gs:0
  10.249 +        cmovzq %rax,%rsp
  10.250 +        pushq %rdi
  10.251 +        call do_hypervisor_callback 
  10.252 +        popq %rsp
  10.253 +        decl %gs:0
  10.254 +        jmp error_exit
  10.255 +
  10.256 +#        ALIGN
  10.257 +restore_all_enable_events:  
  10.258 +	XEN_UNBLOCK_EVENTS(%rsi)        # %rsi is already set up...
  10.259 +
  10.260 +scrit:	/**** START OF CRITICAL REGION ****/
  10.261 +	XEN_TEST_PENDING(%rsi)
  10.262 +	jnz  14f			# process more events if necessary...
  10.263 +	XEN_PUT_VCPU_INFO(%rsi)
  10.264 +        RESTORE_ARGS 0,8,0
  10.265 +        HYPERVISOR_IRET 0
  10.266 +        
  10.267 +14:	XEN_LOCKED_BLOCK_EVENTS(%rsi)
  10.268 +	XEN_PUT_VCPU_INFO(%rsi)
  10.269 +	SAVE_REST
  10.270 +        movq %rsp,%rdi                  # set the argument again
  10.271 +	jmp  11b
  10.272 +ecrit:  /**** END OF CRITICAL REGION ****/
  10.273 +
  10.274 +
  10.275 +retint_kernel:
  10.276 +retint_restore_args:
  10.277 +	movl EFLAGS-REST_SKIP(%rsp), %eax
  10.278 +	shr $9, %eax			# EAX[0] == IRET_EFLAGS.IF
  10.279 +	XEN_GET_VCPU_INFO(%rsi)
  10.280 +	andb evtchn_upcall_mask(%rsi),%al
  10.281 +	andb $1,%al			# EAX[0] == IRET_EFLAGS.IF & event_mask
  10.282 +	jnz restore_all_enable_events	#        != 0 => enable event delivery
  10.283 +	XEN_PUT_VCPU_INFO(%rsi)
  10.284 +		
  10.285 +	RESTORE_ARGS 0,8,0
  10.286 +	HYPERVISOR_IRET 0
  10.287 +
  10.288 +
  10.289 +error_exit:		
  10.290 +	RESTORE_REST
  10.291 +/*	cli */
  10.292 +	XEN_BLOCK_EVENTS(%rsi)		
  10.293 +	jmp retint_kernel
  10.294 +
  10.295 +
  10.296 +
  10.297 +ENTRY(failsafe_callback)
  10.298 +        popq  %rcx
  10.299 +        popq  %r11
  10.300 +        iretq
  10.301 +
  10.302 +
  10.303 +ENTRY(coprocessor_error)
  10.304 +        zeroentry do_coprocessor_error
  10.305 +
  10.306 +
  10.307 +ENTRY(simd_coprocessor_error)
  10.308 +        zeroentry do_simd_coprocessor_error
  10.309 +
  10.310 +
  10.311 +ENTRY(device_not_available)
  10.312 +        zeroentry do_device_not_available
  10.313 +
  10.314 +
  10.315 +ENTRY(debug)
  10.316 +#       INTR_FRAME
  10.317 +#       CFI_ADJUST_CFA_OFFSET 8 */
  10.318 +        zeroentry do_debug
  10.319 +#       CFI_ENDPROC
  10.320 +
  10.321 +
  10.322 +ENTRY(int3)
  10.323 +#       INTR_FRAME
  10.324 +#       CFI_ADJUST_CFA_OFFSET 8 */
  10.325 +        zeroentry do_int3
  10.326 +#       CFI_ENDPROC
  10.327 +
  10.328 +ENTRY(overflow)
  10.329 +        zeroentry do_overflow
  10.330 +
  10.331 +
  10.332 +ENTRY(bounds)
  10.333 +        zeroentry do_bounds
  10.334 +    
  10.335 +    
  10.336 +ENTRY(invalid_op)
  10.337 +        zeroentry do_invalid_op
  10.338 +
  10.339 +
  10.340 +ENTRY(coprocessor_segment_overrun)
  10.341 +        zeroentry do_coprocessor_segment_overrun
  10.342 +
  10.343 +
  10.344 +ENTRY(invalid_TSS)
  10.345 +        errorentry do_invalid_TSS
  10.346 +
  10.347 +
  10.348 +ENTRY(segment_not_present)
  10.349 +        errorentry do_segment_not_present
  10.350 +
  10.351 +
  10.352 +/* runs on exception stack */
  10.353 +ENTRY(stack_segment)
  10.354 +#       XCPT_FRAME
  10.355 +        errorentry do_stack_segment
  10.356 +#       CFI_ENDPROC
  10.357 +                    
  10.358 +
  10.359 +ENTRY(general_protection)
  10.360 +        errorentry do_general_protection
  10.361 +
  10.362 +
  10.363 +ENTRY(alignment_check)
  10.364 +        errorentry do_alignment_check
  10.365 +
  10.366 +
  10.367 +ENTRY(divide_error)
  10.368 +        zeroentry do_divide_error
  10.369 +
  10.370 +
  10.371 +ENTRY(spurious_interrupt_bug)
  10.372 +        zeroentry do_spurious_interrupt_bug
  10.373 +            
  10.374 +
  10.375 +ENTRY(page_fault)
  10.376 +        errorentry do_page_fault
  10.377 +
  10.378 +
  10.379 +
  10.380 +
  10.381 +
  10.382 +ENTRY(thread_starter)
  10.383 +        popq %rdi
  10.384 +        popq %rbx
  10.385 +        call *%rbx
  10.386 +        call exit_thread 
  10.387 +        
  10.388 +
    11.1 --- a/extras/mini-os/minios-x86_32.lds	Wed Jan 17 19:55:48 2007 -0700
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,45 +0,0 @@
    11.4 -OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
    11.5 -OUTPUT_ARCH(i386)
    11.6 -ENTRY(_start)
    11.7 -SECTIONS
    11.8 -{
    11.9 -  . = 0x0;
   11.10 -  _text = .;			/* Text and read-only data */
   11.11 -  .text : {
   11.12 -	*(.text)
   11.13 -	*(.gnu.warning)
   11.14 -	} = 0x9090
   11.15 -
   11.16 -  _etext = .;			/* End of text section */
   11.17 -
   11.18 -  .rodata : { *(.rodata) *(.rodata.*) }
   11.19 -
   11.20 -  .data : {			/* Data */
   11.21 -	*(.data)
   11.22 -	CONSTRUCTORS
   11.23 -	}
   11.24 -
   11.25 -  _edata = .;			/* End of data section */
   11.26 -
   11.27 -  __bss_start = .;		/* BSS */
   11.28 -  .bss : {
   11.29 -	*(.bss)
   11.30 -	}
   11.31 -  _end = . ;
   11.32 -
   11.33 -  /* Sections to be discarded */
   11.34 -  /DISCARD/ : {
   11.35 -	*(.text.exit)
   11.36 -	*(.data.exit)
   11.37 -	*(.exitcall.exit)
   11.38 -	}
   11.39 -
   11.40 -  /* Stabs debugging sections.  */
   11.41 -  .stab 0 : { *(.stab) }
   11.42 -  .stabstr 0 : { *(.stabstr) }
   11.43 -  .stab.excl 0 : { *(.stab.excl) }
   11.44 -  .stab.exclstr 0 : { *(.stab.exclstr) }
   11.45 -  .stab.index 0 : { *(.stab.index) }
   11.46 -  .stab.indexstr 0 : { *(.stab.indexstr) }
   11.47 -  .comment 0 : { *(.comment) }
   11.48 -}
    12.1 --- a/extras/mini-os/minios-x86_64.lds	Wed Jan 17 19:55:48 2007 -0700
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,54 +0,0 @@
    12.4 -OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
    12.5 -OUTPUT_ARCH(i386:x86-64)
    12.6 -ENTRY(_start)
    12.7 -SECTIONS
    12.8 -{
    12.9 -  . = 0x0;
   12.10 -  _text = .;			/* Text and read-only data */
   12.11 -  .text : {
   12.12 -	*(.text)
   12.13 -	*(.gnu.warning)
   12.14 -	} = 0x9090
   12.15 -
   12.16 -  _etext = .;			/* End of text section */
   12.17 -
   12.18 -  .rodata : { *(.rodata) *(.rodata.*) }
   12.19 -
   12.20 -  .data : {			/* Data */
   12.21 -	*(.data)
   12.22 -	CONSTRUCTORS
   12.23 -	}
   12.24 -
   12.25 -  _edata = .;			/* End of data section */
   12.26 -
   12.27 -  . = ALIGN(8192);		/* init_task */
   12.28 -  .data.init_task : { *(.data.init_task) }
   12.29 -
   12.30 -  . = ALIGN(4096);
   12.31 -  .data.page_aligned : { *(.data.idt) }
   12.32 -
   12.33 -  . = ALIGN(32);
   12.34 -  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
   12.35 -
   12.36 -  __bss_start = .;		/* BSS */
   12.37 -  .bss : {
   12.38 -	*(.bss)
   12.39 -	}
   12.40 -  _end = . ;
   12.41 -
   12.42 -  /* Sections to be discarded */
   12.43 -  /DISCARD/ : {
   12.44 -	*(.text.exit)
   12.45 -	*(.data.exit)
   12.46 -	*(.exitcall.exit)
   12.47 -	}
   12.48 -
   12.49 -  /* Stabs debugging sections.  */
   12.50 -  .stab 0 : { *(.stab) }
   12.51 -  .stabstr 0 : { *(.stabstr) }
   12.52 -  .stab.excl 0 : { *(.stab.excl) }
   12.53 -  .stab.exclstr 0 : { *(.stab.exclstr) }
   12.54 -  .stab.index 0 : { *(.stab.index) }
   12.55 -  .stab.indexstr 0 : { *(.stab.indexstr) }
   12.56 -  .comment 0 : { *(.comment) }
   12.57 -}
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/extras/mini-os/minios.mk	Wed Jan 17 21:31:22 2007 -0700
    13.3 @@ -0,0 +1,62 @@
    13.4 +#
    13.5 +# The file contains the common make rules for building mini-os.
    13.6 +#
    13.7 +
    13.8 +debug = y
    13.9 +
   13.10 +# Define some default flags.
   13.11 +# NB. '-Wcast-qual' is nasty, so I omitted it.
   13.12 +DEF_CFLAGS := -fno-builtin -Wall -Werror -Wredundant-decls -Wno-format
   13.13 +DEF_CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline
   13.14 +DEF_CFLAGS += -D__XEN_INTERFACE_VERSION__=$(XEN_INTERFACE_VERSION)
   13.15 +
   13.16 +DEF_ASFLAGS = -D__ASSEMBLY__
   13.17 +
   13.18 +ifeq ($(debug),y)
   13.19 +DEF_CFLAGS += -g
   13.20 +else
   13.21 +DEF_CFLAGS += -O3
   13.22 +endif
   13.23 +
   13.24 +# Build the CFLAGS and ASFLAGS for compiling and assembling.
   13.25 +# DEF_... flags are the common mini-os flags,
   13.26 +# ARCH_... flags may be defined in arch/$(TARGET_ARCH_FAM/rules.mk
   13.27 +CFLAGS := $(DEF_CFLAGS) $(ARCH_CFLAGS)
   13.28 +ASFLAGS := $(DEF_ASFLAGS) $(ARCH_ASFLAGS)
   13.29 +
   13.30 +# The path pointing to the architecture specific header files.
   13.31 +ARCH_SPEC_INC := $(MINI-OS_ROOT)/include/$(TARGET_ARCH_FAM)
   13.32 +
   13.33 +# Find all header files for checking dependencies.
   13.34 +HDRS := $(wildcard $(MINI-OS_ROOT)/include/*.h)
   13.35 +HDRS += $(wildcard $(MINI-OS_ROOT)/include/xen/*.h)
   13.36 +HDRS += $(wildcard $(ARCH_SPEC_INC)/*.h)
   13.37 +# For special wanted header directories.
   13.38 +extra_heads := $(foreach dir,$(EXTRA_INC),$(wildcard $(dir)/*.h))
   13.39 +HDRS += $(extra_heads)
   13.40 +
   13.41 +# Add the special header directories to the include paths.
   13.42 +extra_incl := $(foreach dir,$(EXTRA_INC),-I$(MINI-OS_ROOT)/include/$(dir))
   13.43 +override CPPFLAGS := -I$(MINI-OS_ROOT)/include $(CPPFLAGS) -I$(ARCH_SPEC_INC)	$(extra_incl)
   13.44 +
   13.45 +# The name of the architecture specific library.
   13.46 +# This is on x86_32: libx86_32.a
   13.47 +# $(ARCH_LIB) has to built in the architecture specific directory.
   13.48 +ARCH_LIB_NAME = $(TARGET_ARCH)
   13.49 +ARCH_LIB := lib$(ARCH_LIB_NAME).a
   13.50 +
   13.51 +# This object contains the entrypoint for startup from Xen.
   13.52 +# $(HEAD_ARCH_OBJ) has to be built in the architecture specific directory.
   13.53 +HEAD_ARCH_OBJ := $(TARGET_ARCH).o
   13.54 +HEAD_OBJ := $(TARGET_ARCH_DIR)/$(HEAD_ARCH_OBJ)
   13.55 +
   13.56 +
   13.57 +%.o: %.c $(HDRS) Makefile $(SPEC_DEPENDS)
   13.58 +	$(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@
   13.59 +
   13.60 +%.o: %.S $(HDRS) Makefile $(SPEC_DEPENDS)
   13.61 +	$(CC) $(ASFLAGS) $(CPPFLAGS) -c $< -o $@
   13.62 +
   13.63 +
   13.64 +
   13.65 +
    14.1 --- a/extras/mini-os/x86_32.S	Wed Jan 17 19:55:48 2007 -0700
    14.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.3 @@ -1,287 +0,0 @@
    14.4 -#include <os.h>
    14.5 -#include <xen/arch-x86_32.h>
    14.6 -
    14.7 -.section __xen_guest
    14.8 -	.ascii	"GUEST_OS=Mini-OS"
    14.9 -	.ascii	",XEN_VER=xen-3.0"
   14.10 -	.ascii	",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
   14.11 -	.ascii	",ELF_PADDR_OFFSET=0x0"
   14.12 -	.ascii	",HYPERCALL_PAGE=0x2"
   14.13 -#ifdef CONFIG_X86_PAE
   14.14 -	.ascii	",PAE=yes"
   14.15 -#else
   14.16 -	.ascii	",PAE=no"
   14.17 -#endif
   14.18 -	.ascii	",LOADER=generic"
   14.19 -	.byte	0
   14.20 -.text
   14.21 -
   14.22 -.globl _start, shared_info, hypercall_page
   14.23 -                        
   14.24 -_start:
   14.25 -        cld
   14.26 -        lss stack_start,%esp
   14.27 -        push %esi 
   14.28 -        call start_kernel
   14.29 -
   14.30 -stack_start:
   14.31 -	.long stack+8192, __KERNEL_SS
   14.32 -
   14.33 -        /* Unpleasant -- the PTE that maps this page is actually overwritten */
   14.34 -        /* to map the real shared-info page! :-)                             */
   14.35 -        .org 0x1000
   14.36 -shared_info:
   14.37 -        .org 0x2000
   14.38 -
   14.39 -hypercall_page:
   14.40 -        .org 0x3000
   14.41 -
   14.42 -ES		= 0x20
   14.43 -ORIG_EAX	= 0x24
   14.44 -EIP		= 0x28
   14.45 -CS		= 0x2C
   14.46 -
   14.47 -#define ENTRY(X) .globl X ; X :
   14.48 -
   14.49 -#define SAVE_ALL \
   14.50 -	cld; \
   14.51 -	pushl %es; \
   14.52 -	pushl %ds; \
   14.53 -	pushl %eax; \
   14.54 -	pushl %ebp; \
   14.55 -	pushl %edi; \
   14.56 -	pushl %esi; \
   14.57 -	pushl %edx; \
   14.58 -	pushl %ecx; \
   14.59 -	pushl %ebx; \
   14.60 -	movl $(__KERNEL_DS),%edx; \
   14.61 -	movl %edx,%ds; \
   14.62 -	movl %edx,%es;
   14.63 -
   14.64 -#define RESTORE_ALL	\
   14.65 -	popl %ebx;	\
   14.66 -	popl %ecx;	\
   14.67 -	popl %edx;	\
   14.68 -	popl %esi;	\
   14.69 -	popl %edi;	\
   14.70 -	popl %ebp;	\
   14.71 -	popl %eax;	\
   14.72 -	popl %ds;	\
   14.73 -	popl %es;	\
   14.74 -	addl $4,%esp;	\
   14.75 -	iret;		\
   14.76 -
   14.77 -ENTRY(divide_error)
   14.78 -	pushl $0		# no error code
   14.79 -	pushl $do_divide_error
   14.80 -do_exception:
   14.81 -    pushl %ds
   14.82 -	pushl %eax
   14.83 -	xorl %eax, %eax
   14.84 -	pushl %ebp
   14.85 -	pushl %edi
   14.86 -	pushl %esi
   14.87 -	pushl %edx
   14.88 -	decl %eax			# eax = -1
   14.89 -	pushl %ecx
   14.90 -	pushl %ebx
   14.91 -	cld
   14.92 -	movl %es, %ecx
   14.93 -	movl ES(%esp), %edi		# get the function address
   14.94 -	movl ORIG_EAX(%esp), %edx	# get the error code
   14.95 -	movl %eax, ORIG_EAX(%esp)
   14.96 -	movl %ecx, ES(%esp)
   14.97 -	movl $(__KERNEL_DS), %ecx
   14.98 -	movl %ecx, %ds
   14.99 -	movl %ecx, %es
  14.100 -	movl %esp,%eax			# pt_regs pointer
  14.101 -    pushl %edx
  14.102 -    pushl %eax
  14.103 -	call *%edi
  14.104 -    jmp ret_from_exception
  14.105 -    
  14.106 -ret_from_exception:
  14.107 -        movb CS(%esp),%cl
  14.108 -	test $2,%cl          # slow return to ring 2 or 3
  14.109 -	jne  safesti
  14.110 -        RESTORE_ALL
  14.111 -
  14.112 -# A note on the "critical region" in our callback handler.
  14.113 -# We want to avoid stacking callback handlers due to events occurring
  14.114 -# during handling of the last event. To do this, we keep events disabled
  14.115 -# until weve done all processing. HOWEVER, we must enable events before
  14.116 -# popping the stack frame (cant be done atomically) and so it would still
  14.117 -# be possible to get enough handler activations to overflow the stack.
  14.118 -# Although unlikely, bugs of that kind are hard to track down, so wed
  14.119 -# like to avoid the possibility.
  14.120 -# So, on entry to the handler we detect whether we interrupted an
  14.121 -# existing activation in its critical region -- if so, we pop the current
  14.122 -# activation and restart the handler using the previous one.
  14.123 -ENTRY(hypervisor_callback)
  14.124 -        pushl %eax
  14.125 -        SAVE_ALL
  14.126 -        movl EIP(%esp),%eax
  14.127 -        cmpl $scrit,%eax
  14.128 -        jb   11f
  14.129 -        cmpl $ecrit,%eax
  14.130 -        jb   critical_region_fixup
  14.131 -11:     push %esp
  14.132 -        call do_hypervisor_callback
  14.133 -        add  $4,%esp
  14.134 -        movl HYPERVISOR_shared_info,%esi
  14.135 -        xorl %eax,%eax
  14.136 -        movb CS(%esp),%cl
  14.137 -    	test $2,%cl          # slow return to ring 2 or 3
  14.138 -        jne  safesti
  14.139 -safesti:movb $0,1(%esi)     # reenable event callbacks
  14.140 -scrit:  /**** START OF CRITICAL REGION ****/
  14.141 -        testb $0xFF,(%esi)
  14.142 -        jnz  14f              # process more events if necessary...
  14.143 -        RESTORE_ALL
  14.144 -14:     movb $1,1(%esi)
  14.145 -        jmp  11b
  14.146 -ecrit:  /**** END OF CRITICAL REGION ****/
  14.147 -# [How we do the fixup]. We want to merge the current stack frame with the
  14.148 -# just-interrupted frame. How we do this depends on where in the critical
  14.149 -# region the interrupted handler was executing, and so how many saved
  14.150 -# registers are in each frame. We do this quickly using the lookup table
  14.151 -# 'critical_fixup_table'. For each byte offset in the critical region, it
  14.152 -# provides the number of bytes which have already been popped from the
  14.153 -# interrupted stack frame. 
  14.154 -critical_region_fixup:
  14.155 -        addl $critical_fixup_table-scrit,%eax
  14.156 -        movzbl (%eax),%eax    # %eax contains num bytes popped
  14.157 -        mov  %esp,%esi
  14.158 -        add  %eax,%esi        # %esi points at end of src region
  14.159 -        mov  %esp,%edi
  14.160 -        add  $0x34,%edi       # %edi points at end of dst region
  14.161 -        mov  %eax,%ecx
  14.162 -        shr  $2,%ecx          # convert words to bytes
  14.163 -        je   16f              # skip loop if nothing to copy
  14.164 -15:     subl $4,%esi          # pre-decrementing copy loop
  14.165 -        subl $4,%edi
  14.166 -        movl (%esi),%eax
  14.167 -        movl %eax,(%edi)
  14.168 -        loop 15b
  14.169 -16:     movl %edi,%esp        # final %edi is top of merged stack
  14.170 -        jmp  11b
  14.171 -         
  14.172 -critical_fixup_table:        
  14.173 -        .byte 0x00,0x00,0x00                  # testb $0xff,(%esi)
  14.174 -        .byte 0x00,0x00                       # jne  14f
  14.175 -        .byte 0x00                            # pop  %ebx
  14.176 -        .byte 0x04                            # pop  %ecx
  14.177 -        .byte 0x08                            # pop  %edx
  14.178 -        .byte 0x0c                            # pop  %esi
  14.179 -        .byte 0x10                            # pop  %edi
  14.180 -        .byte 0x14                            # pop  %ebp
  14.181 -        .byte 0x18                            # pop  %eax
  14.182 -        .byte 0x1c                            # pop  %ds
  14.183 -        .byte 0x20                            # pop  %es
  14.184 -        .byte 0x24,0x24,0x24                  # add  $4,%esp
  14.185 -        .byte 0x28                            # iret
  14.186 -        .byte 0x00,0x00,0x00,0x00             # movb $1,1(%esi)
  14.187 -        .byte 0x00,0x00                       # jmp  11b
  14.188 -       
  14.189 -# Hypervisor uses this for application faults while it executes.
  14.190 -ENTRY(failsafe_callback)
  14.191 -      pop  %ds
  14.192 -      pop  %es
  14.193 -      pop  %fs
  14.194 -      pop  %gs
  14.195 -      iret
  14.196 -                
  14.197 -ENTRY(coprocessor_error)
  14.198 -	pushl $0
  14.199 -	pushl $do_coprocessor_error
  14.200 -	jmp do_exception
  14.201 -
  14.202 -ENTRY(simd_coprocessor_error)
  14.203 -	pushl $0
  14.204 -	pushl $do_simd_coprocessor_error
  14.205 -	jmp do_exception
  14.206 -
  14.207 -ENTRY(device_not_available)
  14.208 -        iret
  14.209 -
  14.210 -ENTRY(debug)
  14.211 -	pushl $0
  14.212 -	pushl $do_debug
  14.213 -	jmp do_exception
  14.214 -
  14.215 -ENTRY(int3)
  14.216 -	pushl $0
  14.217 -	pushl $do_int3
  14.218 -	jmp do_exception
  14.219 -
  14.220 -ENTRY(overflow)
  14.221 -	pushl $0
  14.222 -	pushl $do_overflow
  14.223 -	jmp do_exception
  14.224 -
  14.225 -ENTRY(bounds)
  14.226 -	pushl $0
  14.227 -	pushl $do_bounds
  14.228 -	jmp do_exception
  14.229 -
  14.230 -ENTRY(invalid_op)
  14.231 -	pushl $0
  14.232 -	pushl $do_invalid_op
  14.233 -	jmp do_exception
  14.234 -
  14.235 -
  14.236 -ENTRY(coprocessor_segment_overrun)
  14.237 -	pushl $0
  14.238 -	pushl $do_coprocessor_segment_overrun
  14.239 -	jmp do_exception
  14.240 -
  14.241 -
  14.242 -ENTRY(invalid_TSS)
  14.243 -	pushl $do_invalid_TSS
  14.244 -	jmp do_exception
  14.245 -
  14.246 -
  14.247 -ENTRY(segment_not_present)
  14.248 -	pushl $do_segment_not_present
  14.249 -	jmp do_exception
  14.250 -
  14.251 -
  14.252 -ENTRY(stack_segment)
  14.253 -	pushl $do_stack_segment
  14.254 -	jmp do_exception
  14.255 -
  14.256 -
  14.257 -ENTRY(general_protection)
  14.258 -	pushl $do_general_protection
  14.259 -	jmp do_exception
  14.260 -
  14.261 -
  14.262 -ENTRY(alignment_check)
  14.263 -	pushl $do_alignment_check
  14.264 -	jmp do_exception
  14.265 -
  14.266 -
  14.267 -ENTRY(page_fault)
  14.268 -    pushl $do_page_fault
  14.269 -    jmp do_exception
  14.270 -    
  14.271 -ENTRY(machine_check)
  14.272 -	pushl $0
  14.273 -	pushl $do_machine_check
  14.274 -	jmp do_exception
  14.275 -
  14.276 -
  14.277 -ENTRY(spurious_interrupt_bug)
  14.278 -	pushl $0
  14.279 -	pushl $do_spurious_interrupt_bug
  14.280 -	jmp do_exception
  14.281 -
  14.282 -
  14.283 -
  14.284 -ENTRY(thread_starter)
  14.285 -    popl %eax
  14.286 -    popl %ebx
  14.287 -    pushl %eax
  14.288 -    call *%ebx
  14.289 -    call exit_thread 
  14.290 -    
    15.1 --- a/extras/mini-os/x86_64.S	Wed Jan 17 19:55:48 2007 -0700
    15.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.3 @@ -1,385 +0,0 @@
    15.4 -#include <os.h>
    15.5 -#include <xen/features.h>
    15.6 -
    15.7 -.section __xen_guest
    15.8 -	.ascii	"GUEST_OS=Mini-OS"
    15.9 -	.ascii	",XEN_VER=xen-3.0"
   15.10 -	.ascii	",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
   15.11 -	.ascii	",ELF_PADDR_OFFSET=0x0"
   15.12 -	.ascii	",HYPERCALL_PAGE=0x2"
   15.13 -	.ascii	",LOADER=generic"
   15.14 -	.byte	0
   15.15 -.text
   15.16 -
   15.17 -#define ENTRY(X) .globl X ; X :
   15.18 -.globl _start, shared_info, hypercall_page
   15.19 -
   15.20 -
   15.21 -_start:
   15.22 -        cld
   15.23 -        movq stack_start(%rip),%rsp
   15.24 -        movq %rsi,%rdi
   15.25 -        call start_kernel
   15.26 -
   15.27 -stack_start:
   15.28 -        .quad stack+8192
   15.29 -
   15.30 -        /* Unpleasant -- the PTE that maps this page is actually overwritten */
   15.31 -        /* to map the real shared-info page! :-)                             */
   15.32 -        .org 0x1000
   15.33 -shared_info:
   15.34 -        .org 0x2000
   15.35 -
   15.36 -hypercall_page:
   15.37 -        .org 0x3000
   15.38 -
   15.39 -
   15.40 -/* Offsets into shared_info_t. */                
   15.41 -#define evtchn_upcall_pending		/* 0 */
   15.42 -#define evtchn_upcall_mask		1
   15.43 -
   15.44 -NMI_MASK = 0x80000000
   15.45 -
   15.46 -#define RDI 112
   15.47 -#define ORIG_RAX 120       /* + error_code */ 
   15.48 -#define EFLAGS 144
   15.49 -
   15.50 -#define REST_SKIP 6*8			
   15.51 -.macro SAVE_REST
   15.52 -	subq $REST_SKIP,%rsp
   15.53 -#	CFI_ADJUST_CFA_OFFSET	REST_SKIP
   15.54 -	movq %rbx,5*8(%rsp) 
   15.55 -#	CFI_REL_OFFSET	rbx,5*8
   15.56 -	movq %rbp,4*8(%rsp) 
   15.57 -#	CFI_REL_OFFSET	rbp,4*8
   15.58 -	movq %r12,3*8(%rsp) 
   15.59 -#	CFI_REL_OFFSET	r12,3*8
   15.60 -	movq %r13,2*8(%rsp) 
   15.61 -#	CFI_REL_OFFSET	r13,2*8
   15.62 -	movq %r14,1*8(%rsp) 
   15.63 -#	CFI_REL_OFFSET	r14,1*8
   15.64 -	movq %r15,(%rsp) 
   15.65 -#	CFI_REL_OFFSET	r15,0*8
   15.66 -.endm		
   15.67 -
   15.68 -
   15.69 -.macro RESTORE_REST
   15.70 -	movq (%rsp),%r15
   15.71 -#	CFI_RESTORE r15
   15.72 -	movq 1*8(%rsp),%r14
   15.73 -#	CFI_RESTORE r14
   15.74 -	movq 2*8(%rsp),%r13
   15.75 -#	CFI_RESTORE r13
   15.76 -	movq 3*8(%rsp),%r12
   15.77 -#	CFI_RESTORE r12
   15.78 -	movq 4*8(%rsp),%rbp
   15.79 -#	CFI_RESTORE rbp
   15.80 -	movq 5*8(%rsp),%rbx
   15.81 -#	CFI_RESTORE rbx
   15.82 -	addq $REST_SKIP,%rsp
   15.83 -#	CFI_ADJUST_CFA_OFFSET	-(REST_SKIP)
   15.84 -.endm
   15.85 -
   15.86 -
   15.87 -#define ARG_SKIP 9*8
   15.88 -.macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
   15.89 -	.if \skipr11
   15.90 -	.else
   15.91 -	movq (%rsp),%r11
   15.92 -#	CFI_RESTORE r11
   15.93 -	.endif
   15.94 -	.if \skipr8910
   15.95 -	.else
   15.96 -	movq 1*8(%rsp),%r10
   15.97 -#	CFI_RESTORE r10
   15.98 -	movq 2*8(%rsp),%r9
   15.99 -#	CFI_RESTORE r9
  15.100 -	movq 3*8(%rsp),%r8
  15.101 -#	CFI_RESTORE r8
  15.102 -	.endif
  15.103 -	.if \skiprax
  15.104 -	.else
  15.105 -	movq 4*8(%rsp),%rax
  15.106 -#	CFI_RESTORE rax
  15.107 -	.endif
  15.108 -	.if \skiprcx
  15.109 -	.else
  15.110 -	movq 5*8(%rsp),%rcx
  15.111 -#	CFI_RESTORE rcx
  15.112 -	.endif
  15.113 -	.if \skiprdx
  15.114 -	.else
  15.115 -	movq 6*8(%rsp),%rdx
  15.116 -#	CFI_RESTORE rdx
  15.117 -	.endif
  15.118 -	movq 7*8(%rsp),%rsi
  15.119 -#	CFI_RESTORE rsi
  15.120 -	movq 8*8(%rsp),%rdi
  15.121 -#	CFI_RESTORE rdi
  15.122 -	.if ARG_SKIP+\addskip > 0
  15.123 -	addq $ARG_SKIP+\addskip,%rsp
  15.124 -#	CFI_ADJUST_CFA_OFFSET	-(ARG_SKIP+\addskip)
  15.125 -	.endif
  15.126 -.endm	
  15.127 -
  15.128 -
  15.129 -.macro HYPERVISOR_IRET flag
  15.130 -#    testb $3,1*8(%rsp)    /* Don't need to do that in Mini-os, as */
  15.131 -#	jnz   2f               /* there is no userspace? */
  15.132 -	testl $NMI_MASK,2*8(%rsp)
  15.133 -	jnz   2f
  15.134 -
  15.135 -	testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
  15.136 -	jnz   1f
  15.137 -
  15.138 -	/* Direct iret to kernel space. Correct CS and SS. */
  15.139 -	orb   $3,1*8(%rsp)
  15.140 -	orb   $3,4*8(%rsp)
  15.141 -1:	iretq
  15.142 -
  15.143 -2:	/* Slow iret via hypervisor. */
  15.144 -	andl  $~NMI_MASK, 16(%rsp)
  15.145 -	pushq $\flag
  15.146 -	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
  15.147 -.endm
  15.148 -
  15.149 -/*
  15.150 - * Exception entry point. This expects an error code/orig_rax on the stack
  15.151 - * and the exception handler in %rax.	
  15.152 - */ 		  				
  15.153 -ENTRY(error_entry)
  15.154 -#	_frame RDI
  15.155 -	/* rdi slot contains rax, oldrax contains error code */
  15.156 -	cld	
  15.157 -	subq  $14*8,%rsp
  15.158 -#	CFI_ADJUST_CFA_OFFSET	(14*8)
  15.159 -	movq %rsi,13*8(%rsp)
  15.160 -#	CFI_REL_OFFSET	rsi,RSI
  15.161 -	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */
  15.162 -	movq %rdx,12*8(%rsp)
  15.163 -#	CFI_REL_OFFSET	rdx,RDX
  15.164 -	movq %rcx,11*8(%rsp)
  15.165 -#	CFI_REL_OFFSET	rcx,RCX
  15.166 -	movq %rsi,10*8(%rsp)	/* store rax */ 
  15.167 -#	CFI_REL_OFFSET	rax,RAX
  15.168 -	movq %r8, 9*8(%rsp)
  15.169 -#	CFI_REL_OFFSET	r8,R8
  15.170 -	movq %r9, 8*8(%rsp)
  15.171 -#	CFI_REL_OFFSET	r9,R9
  15.172 -	movq %r10,7*8(%rsp)
  15.173 -#	CFI_REL_OFFSET	r10,R10
  15.174 -	movq %r11,6*8(%rsp)
  15.175 -#	CFI_REL_OFFSET	r11,R11
  15.176 -	movq %rbx,5*8(%rsp) 
  15.177 -#	CFI_REL_OFFSET	rbx,RBX
  15.178 -	movq %rbp,4*8(%rsp) 
  15.179 -#	CFI_REL_OFFSET	rbp,RBP
  15.180 -	movq %r12,3*8(%rsp) 
  15.181 -#	CFI_REL_OFFSET	r12,R12
  15.182 -	movq %r13,2*8(%rsp) 
  15.183 -#	CFI_REL_OFFSET	r13,R13
  15.184 -	movq %r14,1*8(%rsp) 
  15.185 -#	CFI_REL_OFFSET	r14,R14
  15.186 -	movq %r15,(%rsp) 
  15.187 -#	CFI_REL_OFFSET	r15,R15
  15.188 -#if 0        
  15.189 -	cmpl $__KERNEL_CS,CS(%rsp)
  15.190 -	je  error_kernelspace
  15.191 -#endif        
  15.192 -error_call_handler:
  15.193 -	movq %rdi, RDI(%rsp)            
  15.194 -	movq %rsp,%rdi
  15.195 -	movq ORIG_RAX(%rsp),%rsi	# get error code 
  15.196 -	movq $-1,ORIG_RAX(%rsp)
  15.197 -	call *%rax
  15.198 -
  15.199 -.macro zeroentry sym
  15.200 -#	INTR_FRAME
  15.201 -    movq (%rsp),%rcx
  15.202 -    movq 8(%rsp),%r11
  15.203 -    addq $0x10,%rsp /* skip rcx and r11 */
  15.204 -	pushq $0	/* push error code/oldrax */ 
  15.205 -#	CFI_ADJUST_CFA_OFFSET 8
  15.206 -	pushq %rax	/* push real oldrax to the rdi slot */ 
  15.207 -#	CFI_ADJUST_CFA_OFFSET 8
  15.208 -	leaq  \sym(%rip),%rax
  15.209 -	jmp error_entry
  15.210 -#	CFI_ENDPROC
  15.211 -.endm	
  15.212 -
  15.213 -.macro errorentry sym
  15.214 -#	XCPT_FRAME
  15.215 -        movq (%rsp),%rcx
  15.216 -        movq 8(%rsp),%r11
  15.217 -        addq $0x10,%rsp /* rsp points to the error code */
  15.218 -	pushq %rax
  15.219 -#	CFI_ADJUST_CFA_OFFSET 8
  15.220 -	leaq  \sym(%rip),%rax
  15.221 -	jmp error_entry
  15.222 -#	CFI_ENDPROC
  15.223 -.endm
  15.224 -
  15.225 -#define XEN_GET_VCPU_INFO(reg)	movq HYPERVISOR_shared_info,reg
  15.226 -#define XEN_PUT_VCPU_INFO(reg)
  15.227 -#define XEN_PUT_VCPU_INFO_fixup
  15.228 -#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
  15.229 -#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
  15.230 -#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
  15.231 -
  15.232 -#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
  15.233 -                    			XEN_LOCKED_BLOCK_EVENTS(reg)	; \
  15.234 -    				            XEN_PUT_VCPU_INFO(reg)
  15.235 -
  15.236 -#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
  15.237 -                				XEN_LOCKED_UNBLOCK_EVENTS(reg)	; \
  15.238 -    			            	XEN_PUT_VCPU_INFO(reg)
  15.239 -
  15.240 -
  15.241 -
  15.242 -ENTRY(hypervisor_callback)
  15.243 -    zeroentry hypervisor_callback2
  15.244 -
  15.245 -ENTRY(hypervisor_callback2)
  15.246 -        movq %rdi, %rsp 
  15.247 -11:     movq %gs:8,%rax
  15.248 -        incl %gs:0
  15.249 -        cmovzq %rax,%rsp
  15.250 -        pushq %rdi
  15.251 -        call do_hypervisor_callback 
  15.252 -        popq %rsp
  15.253 -        decl %gs:0
  15.254 -        jmp error_exit
  15.255 -
  15.256 -#        ALIGN
  15.257 -restore_all_enable_events:  
  15.258 -	XEN_UNBLOCK_EVENTS(%rsi)        # %rsi is already set up...
  15.259 -
  15.260 -scrit:	/**** START OF CRITICAL REGION ****/
  15.261 -	XEN_TEST_PENDING(%rsi)
  15.262 -	jnz  14f			# process more events if necessary...
  15.263 -	XEN_PUT_VCPU_INFO(%rsi)
  15.264 -        RESTORE_ARGS 0,8,0
  15.265 -        HYPERVISOR_IRET 0
  15.266 -        
  15.267 -14:	XEN_LOCKED_BLOCK_EVENTS(%rsi)
  15.268 -	XEN_PUT_VCPU_INFO(%rsi)
  15.269 -	SAVE_REST
  15.270 -        movq %rsp,%rdi                  # set the argument again
  15.271 -	jmp  11b
  15.272 -ecrit:  /**** END OF CRITICAL REGION ****/
  15.273 -
  15.274 -
  15.275 -retint_kernel:
  15.276 -retint_restore_args:
  15.277 -	movl EFLAGS-REST_SKIP(%rsp), %eax
  15.278 -	shr $9, %eax			# EAX[0] == IRET_EFLAGS.IF
  15.279 -	XEN_GET_VCPU_INFO(%rsi)
  15.280 -	andb evtchn_upcall_mask(%rsi),%al
  15.281 -	andb $1,%al			# EAX[0] == IRET_EFLAGS.IF & event_mask
  15.282 -	jnz restore_all_enable_events	#        != 0 => enable event delivery
  15.283 -	XEN_PUT_VCPU_INFO(%rsi)
  15.284 -		
  15.285 -	RESTORE_ARGS 0,8,0
  15.286 -	HYPERVISOR_IRET 0
  15.287 -
  15.288 -
  15.289 -error_exit:		
  15.290 -	RESTORE_REST
  15.291 -/*	cli */
  15.292 -	XEN_BLOCK_EVENTS(%rsi)		
  15.293 -	jmp retint_kernel
  15.294 -
  15.295 -
  15.296 -
  15.297 -ENTRY(failsafe_callback)
  15.298 -        popq  %rcx
  15.299 -        popq  %r11
  15.300 -        iretq
  15.301 -
  15.302 -
  15.303 -ENTRY(coprocessor_error)
  15.304 -        zeroentry do_coprocessor_error
  15.305 -
  15.306 -
  15.307 -ENTRY(simd_coprocessor_error)
  15.308 -        zeroentry do_simd_coprocessor_error
  15.309 -
  15.310 -
  15.311 -ENTRY(device_not_available)
  15.312 -        zeroentry do_device_not_available
  15.313 -
  15.314 -
  15.315 -ENTRY(debug)
  15.316 -#       INTR_FRAME
  15.317 -#       CFI_ADJUST_CFA_OFFSET 8 */
  15.318 -        zeroentry do_debug
  15.319 -#       CFI_ENDPROC
  15.320 -
  15.321 -
  15.322 -ENTRY(int3)
  15.323 -#       INTR_FRAME
  15.324 -#       CFI_ADJUST_CFA_OFFSET 8 */
  15.325 -        zeroentry do_int3
  15.326 -#       CFI_ENDPROC
  15.327 -
  15.328 -ENTRY(overflow)
  15.329 -        zeroentry do_overflow
  15.330 -
  15.331 -
  15.332 -ENTRY(bounds)
  15.333 -        zeroentry do_bounds
  15.334 -    
  15.335 -    
  15.336 -ENTRY(invalid_op)
  15.337 -        zeroentry do_invalid_op
  15.338 -
  15.339 -
  15.340 -ENTRY(coprocessor_segment_overrun)
  15.341 -        zeroentry do_coprocessor_segment_overrun
  15.342 -
  15.343 -
  15.344 -ENTRY(invalid_TSS)
  15.345 -        errorentry do_invalid_TSS
  15.346 -
  15.347 -
  15.348 -ENTRY(segment_not_present)
  15.349 -        errorentry do_segment_not_present
  15.350 -
  15.351 -
  15.352 -/* runs on exception stack */
  15.353 -ENTRY(stack_segment)
  15.354 -#       XCPT_FRAME
  15.355 -        errorentry do_stack_segment
  15.356 -#       CFI_ENDPROC
  15.357 -                    
  15.358 -
  15.359 -ENTRY(general_protection)
  15.360 -        errorentry do_general_protection
  15.361 -
  15.362 -
  15.363 -ENTRY(alignment_check)
  15.364 -        errorentry do_alignment_check
  15.365 -
  15.366 -
  15.367 -ENTRY(divide_error)
  15.368 -        zeroentry do_divide_error
  15.369 -
  15.370 -
  15.371 -ENTRY(spurious_interrupt_bug)
  15.372 -        zeroentry do_spurious_interrupt_bug
  15.373 -            
  15.374 -
  15.375 -ENTRY(page_fault)
  15.376 -        errorentry do_page_fault
  15.377 -
  15.378 -
  15.379 -
  15.380 -
  15.381 -
  15.382 -ENTRY(thread_starter)
  15.383 -        popq %rdi
  15.384 -        popq %rbx
  15.385 -        call *%rbx
  15.386 -        call exit_thread 
  15.387 -        
  15.388 -
    16.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/microcode-xen.c	Wed Jan 17 19:55:48 2007 -0700
    16.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/microcode-xen.c	Wed Jan 17 21:31:22 2007 -0700
    16.3 @@ -85,7 +85,7 @@ static ssize_t microcode_write (struct f
    16.4  {
    16.5  	ssize_t ret;
    16.6  
    16.7 -	if (len < DEFAULT_UCODE_TOTALSIZE) {
    16.8 +	if (len < MC_HEADER_SIZE) {
    16.9  		printk(KERN_ERR "microcode: not enough data\n"); 
   16.10  		return -EINVAL;
   16.11  	}
    17.1 --- a/linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c	Wed Jan 17 19:55:48 2007 -0700
    17.2 +++ b/linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c	Wed Jan 17 21:31:22 2007 -0700
    17.3 @@ -232,9 +232,12 @@ static void dump_fault_path(unsigned lon
    17.4  		p += (address >> 21) * 2;
    17.5  		printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n", 
    17.6  		       page, p[1], p[0]);
    17.7 -#ifndef CONFIG_HIGHPTE
    17.8 +		mfn  = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
    17.9 +#ifdef CONFIG_HIGHPTE
   17.10 +		if (mfn_to_pfn(mfn) >= highstart_pfn)
   17.11 +			return;
   17.12 +#endif
   17.13  		if (p[0] & 1) {
   17.14 -			mfn  = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
   17.15  			page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
   17.16  			p  = (unsigned long *) __va(page);
   17.17  			address &= 0x001fffff;
   17.18 @@ -242,7 +245,6 @@ static void dump_fault_path(unsigned lon
   17.19  			printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
   17.20  			       page, p[1], p[0]);
   17.21  		}
   17.22 -#endif
   17.23  	}
   17.24  }
   17.25  #else
   17.26 @@ -254,13 +256,16 @@ static void dump_fault_path(unsigned lon
   17.27  	page = ((unsigned long *) __va(page))[address >> 22];
   17.28  	printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
   17.29  	       machine_to_phys(page));
   17.30 +#ifdef CONFIG_HIGHPTE
   17.31  	/*
   17.32  	 * We must not directly access the pte in the highpte
   17.33 -	 * case, the page table might be allocated in highmem.
   17.34 +	 * case if the page table is located in highmem.
   17.35  	 * And lets rather not kmap-atomic the pte, just in case
   17.36  	 * it's allocated already.
   17.37  	 */
   17.38 -#ifndef CONFIG_HIGHPTE
   17.39 +	if ((page >> PAGE_SHIFT) >= highstart_pfn)
   17.40 +		return;
   17.41 +#endif
   17.42  	if (page & 1) {
   17.43  		page &= PAGE_MASK;
   17.44  		address &= 0x003ff000;
   17.45 @@ -269,7 +274,6 @@ static void dump_fault_path(unsigned lon
   17.46  		printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
   17.47  		       machine_to_phys(page));
   17.48  	}
   17.49 -#endif
   17.50  }
   17.51  #endif
   17.52  
    18.1 --- a/tools/libfsimage/common/Makefile	Wed Jan 17 19:55:48 2007 -0700
    18.2 +++ b/tools/libfsimage/common/Makefile	Wed Jan 17 21:31:22 2007 -0700
    18.3 @@ -1,7 +1,7 @@
    18.4  XEN_ROOT = ../../..
    18.5  include $(XEN_ROOT)/tools/Rules.mk
    18.6  
    18.7 -MAJOR = 1.0
    18.8 +MAJOR = 1.1
    18.9  MINOR = 0
   18.10  
   18.11  CFLAGS += -Werror -Wp,-MD,.$(@F).d
    19.1 --- a/tools/libfsimage/common/fsimage.c	Wed Jan 17 19:55:48 2007 -0700
    19.2 +++ b/tools/libfsimage/common/fsimage.c	Wed Jan 17 21:31:22 2007 -0700
    19.3 @@ -74,7 +74,7 @@ void fsi_close_fsimage(fsi_t *fsi)
    19.4  	pthread_mutex_lock(&fsi_lock);
    19.5          fsi->f_plugin->fp_ops->fpo_umount(fsi);
    19.6          (void) close(fsi->f_fd);
    19.7 -	fsip_fs_free(fsi);
    19.8 +	free(fsi);
    19.9  	pthread_mutex_unlock(&fsi_lock);
   19.10  }
   19.11  
    20.1 --- a/tools/libfsimage/common/fsimage_grub.c	Wed Jan 17 19:55:48 2007 -0700
    20.2 +++ b/tools/libfsimage/common/fsimage_grub.c	Wed Jan 17 21:31:22 2007 -0700
    20.3 @@ -193,6 +193,7 @@ fsig_mount(fsi_t *fsi, const char *path)
    20.4  static int
    20.5  fsig_umount(fsi_t *fsi)
    20.6  {
    20.7 +	free(fsi->f_data);
    20.8  	return (0);
    20.9  }
   20.10  
   20.11 @@ -250,6 +251,7 @@ fsig_read(fsi_file_t *ffi, void *buf, si
   20.12  static int
   20.13  fsig_close(fsi_file_t *ffi)
   20.14  {
   20.15 +	free(ffi->ff_data);
   20.16  	fsip_file_free(ffi);
   20.17  	return (0);
   20.18  }
    21.1 --- a/tools/libfsimage/common/fsimage_plugin.c	Wed Jan 17 19:55:48 2007 -0700
    21.2 +++ b/tools/libfsimage/common/fsimage_plugin.c	Wed Jan 17 21:31:22 2007 -0700
    21.3 @@ -40,13 +40,6 @@ fsip_fs_set_data(fsi_t *fsi, void *data)
    21.4  	fsi->f_data = data;
    21.5  }
    21.6  
    21.7 -void
    21.8 -fsip_fs_free(fsi_t *fsi)
    21.9 -{
   21.10 -	free(fsi->f_data);
   21.11 -	free(fsi);
   21.12 -}
   21.13 -
   21.14  fsi_file_t *
   21.15  fsip_file_alloc(fsi_t *fsi, void *data)
   21.16  {
   21.17 @@ -64,7 +57,6 @@ fsip_file_alloc(fsi_t *fsi, void *data)
   21.18  void
   21.19  fsip_file_free(fsi_file_t *ffi)
   21.20  {
   21.21 -	free(ffi->ff_data);
   21.22  	free(ffi);
   21.23  }
   21.24  
    22.1 --- a/tools/libfsimage/common/fsimage_plugin.h	Wed Jan 17 19:55:48 2007 -0700
    22.2 +++ b/tools/libfsimage/common/fsimage_plugin.h	Wed Jan 17 21:31:22 2007 -0700
    22.3 @@ -50,11 +50,10 @@ typedef fsi_plugin_ops_t *
    22.4      (*fsi_plugin_init_t)(int, fsi_plugin_t *, const char **);
    22.5  
    22.6  void fsip_fs_set_data(fsi_t *, void *);
    22.7 -void fsip_fs_free(fsi_t *);
    22.8  fsi_file_t *fsip_file_alloc(fsi_t *, void *);
    22.9  void fsip_file_free(fsi_file_t *);
   22.10 -fsi_t * fsip_fs(fsi_file_t *ffi);
   22.11 -uint64_t fsip_fs_offset(fsi_t *fsi);
   22.12 +fsi_t *fsip_fs(fsi_file_t *);
   22.13 +uint64_t fsip_fs_offset(fsi_t *);
   22.14  void *fsip_fs_data(fsi_t *);
   22.15  void *fsip_file_data(fsi_file_t *);
   22.16  
    23.1 --- a/tools/libfsimage/common/mapfile-GNU	Wed Jan 17 19:55:48 2007 -0700
    23.2 +++ b/tools/libfsimage/common/mapfile-GNU	Wed Jan 17 21:31:22 2007 -0700
    23.3 @@ -1,5 +1,5 @@
    23.4  VERSION {
    23.5 -	libfsimage.so.1.1 {
    23.6 +	libfsimage.so.1.0 {
    23.7  		global:
    23.8  			fsi_open_fsimage;
    23.9  			fsi_close_fsimage;
   23.10 @@ -10,7 +10,6 @@ VERSION {
   23.11  			fsi_pread_file;
   23.12  	
   23.13  			fsip_fs_set_data;
   23.14 -			fsip_fs_free;
   23.15  			fsip_file_alloc;
   23.16  			fsip_file_free;
   23.17  			fsip_fs;
    24.1 --- a/tools/libfsimage/common/mapfile-SunOS	Wed Jan 17 19:55:48 2007 -0700
    24.2 +++ b/tools/libfsimage/common/mapfile-SunOS	Wed Jan 17 21:31:22 2007 -0700
    24.3 @@ -1,4 +1,4 @@
    24.4 -libfsimage.so.1.1 {
    24.5 +libfsimage.so.1.0 {
    24.6  	global:
    24.7  		fsi_open_fsimage;
    24.8  		fsi_close_fsimage;
    24.9 @@ -9,7 +9,6 @@ libfsimage.so.1.1 {
   24.10  		fsi_pread_file;
   24.11  
   24.12  		fsip_fs_set_data;
   24.13 -		fsip_fs_free;
   24.14  		fsip_file_alloc;
   24.15  		fsip_file_free;
   24.16  		fsip_fs;
    25.1 --- a/tools/libfsimage/ext2fs-lib/ext2fs-lib.c	Wed Jan 17 19:55:48 2007 -0700
    25.2 +++ b/tools/libfsimage/ext2fs-lib/ext2fs-lib.c	Wed Jan 17 21:31:22 2007 -0700
    25.3 @@ -58,9 +58,11 @@ ext2lib_umount(fsi_t *fsi)
    25.4  {
    25.5  	ext2_filsys *fs = fsip_fs_data(fsi);
    25.6  	if (ext2fs_close(*fs) != 0) {
    25.7 +		free(fs);
    25.8  		errno = EINVAL;
    25.9  		return (-1);
   25.10  	}
   25.11 +	free(fs);
   25.12  	return (0);
   25.13  }
   25.14  
    26.1 --- a/tools/libxc/xc_linux_build.c	Wed Jan 17 19:55:48 2007 -0700
    26.2 +++ b/tools/libxc/xc_linux_build.c	Wed Jan 17 21:31:22 2007 -0700
    26.3 @@ -741,7 +741,7 @@ static int setup_guest(int xc_handle,
    26.4          /*
    26.5           * Enable shadow translate mode. This must happen after
    26.6           * populate physmap because the p2m reservation is based on
    26.7 -         * the domains current memory allocation.
    26.8 +         * the domain's current memory allocation.
    26.9           */
   26.10          if ( xc_shadow_control(xc_handle, dom,
   26.11                             XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE,
    27.1 --- a/tools/libxc/xc_linux_restore.c	Wed Jan 17 19:55:48 2007 -0700
    27.2 +++ b/tools/libxc/xc_linux_restore.c	Wed Jan 17 21:31:22 2007 -0700
    27.3 @@ -12,7 +12,7 @@
    27.4  #include "xg_private.h"
    27.5  #include "xg_save_restore.h"
    27.6  
    27.7 -/* max mfn of the whole machine */
    27.8 +/* max mfn of the current host machine */
    27.9  static unsigned long max_mfn;
   27.10  
   27.11  /* virtual starting address of the hypervisor */
   27.12 @@ -30,6 +30,9 @@ static xen_pfn_t *live_p2m = NULL;
   27.13  /* A table mapping each PFN to its new MFN. */
   27.14  static xen_pfn_t *p2m = NULL;
   27.15  
   27.16 +/* A table of P2M mappings in the current region */
   27.17 +static xen_pfn_t *p2m_batch = NULL;
   27.18 +
   27.19  
   27.20  static ssize_t
   27.21  read_exact(int fd, void *buf, size_t count)
   27.22 @@ -57,46 +60,78 @@ read_exact(int fd, void *buf, size_t cou
   27.23  ** This function inverts that operation, replacing the pfn values with
   27.24  ** the (now known) appropriate mfn values.
   27.25  */
   27.26 -static int uncanonicalize_pagetable(unsigned long type, void *page)
   27.27 +static int uncanonicalize_pagetable(int xc_handle, uint32_t dom, 
   27.28 +                                    unsigned long type, void *page)
   27.29  {
   27.30      int i, pte_last;
   27.31      unsigned long pfn;
   27.32      uint64_t pte;
   27.33 +    int nr_mfns = 0; 
   27.34  
   27.35      pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
   27.36  
   27.37 -    /* Now iterate through the page table, uncanonicalizing each PTE */
   27.38 +    /* First pass: work out how many (if any) MFNs we need to alloc */
   27.39 +    for(i = 0; i < pte_last; i++) {
   27.40 +        
   27.41 +        if(pt_levels == 2)
   27.42 +            pte = ((uint32_t *)page)[i];
   27.43 +        else
   27.44 +            pte = ((uint64_t *)page)[i];
   27.45 +        
   27.46 +        /* XXX SMH: below needs fixing for PROT_NONE etc */
   27.47 +        if(!(pte & _PAGE_PRESENT))
   27.48 +            continue; 
   27.49 +        
   27.50 +        pfn = (pte >> PAGE_SHIFT) & 0xffffffff;
   27.51 +        
   27.52 +        if(pfn >= max_pfn) {
   27.53 +            /* This "page table page" is probably not one; bail. */
   27.54 +            ERROR("Frame number in type %lu page table is out of range: "
   27.55 +                  "i=%d pfn=0x%lx max_pfn=%lu",
   27.56 +                  type >> 28, i, pfn, max_pfn);
   27.57 +            return 0;
   27.58 +        }
   27.59 +        
   27.60 +        if(p2m[pfn] == INVALID_P2M_ENTRY) {
   27.61 +            /* Have a 'valid' PFN without a matching MFN - need to alloc */
   27.62 +            p2m_batch[nr_mfns++] = pfn; 
   27.63 +        }
   27.64 +    }
   27.65 +    
   27.66 +    
   27.67 +    /* Alllocate the requistite number of mfns */
   27.68 +    if (nr_mfns && xc_domain_memory_populate_physmap(
   27.69 +            xc_handle, dom, nr_mfns, 0, 0, p2m_batch) != 0) { 
   27.70 +        ERROR("Failed to allocate memory for batch.!\n"); 
   27.71 +        errno = ENOMEM;
   27.72 +        return 0; 
   27.73 +    }
   27.74 +    
   27.75 +    /* Second pass: uncanonicalize each present PTE */
   27.76 +    nr_mfns = 0;
   27.77      for(i = 0; i < pte_last; i++) {
   27.78  
   27.79          if(pt_levels == 2)
   27.80              pte = ((uint32_t *)page)[i];
   27.81          else
   27.82              pte = ((uint64_t *)page)[i];
   27.83 -
   27.84 -        if(pte & _PAGE_PRESENT) {
   27.85 -
   27.86 -            pfn = (pte >> PAGE_SHIFT) & 0xffffffff;
   27.87 +        
   27.88 +        /* XXX SMH: below needs fixing for PROT_NONE etc */
   27.89 +        if(!(pte & _PAGE_PRESENT))
   27.90 +            continue;
   27.91 +        
   27.92 +        pfn = (pte >> PAGE_SHIFT) & 0xffffffff;
   27.93 +        
   27.94 +        if(p2m[pfn] == INVALID_P2M_ENTRY)
   27.95 +            p2m[pfn] = p2m_batch[nr_mfns++];
   27.96  
   27.97 -            if(pfn >= max_pfn) {
   27.98 -                /* This "page table page" is probably not one; bail. */
   27.99 -                ERROR("Frame number in type %lu page table is out of range: "
  27.100 -                    "i=%d pfn=0x%lx max_pfn=%lu",
  27.101 -                    type >> 28, i, pfn, max_pfn);
  27.102 -                return 0;
  27.103 -            }
  27.104 -
  27.105 +        pte &= 0xffffff0000000fffULL;
  27.106 +        pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
  27.107  
  27.108 -            pte &= 0xffffff0000000fffULL;
  27.109 -            pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
  27.110 -
  27.111 -            if(pt_levels == 2)
  27.112 -                ((uint32_t *)page)[i] = (uint32_t)pte;
  27.113 -            else
  27.114 -                ((uint64_t *)page)[i] = (uint64_t)pte;
  27.115 -
  27.116 -
  27.117 -
  27.118 -        }
  27.119 +        if(pt_levels == 2)
  27.120 +            ((uint32_t *)page)[i] = (uint32_t)pte;
  27.121 +        else
  27.122 +            ((uint64_t *)page)[i] = (uint64_t)pte;
  27.123      }
  27.124  
  27.125      return 1;
  27.126 @@ -140,6 +175,7 @@ int xc_linux_restore(int xc_handle, int 
  27.127      /* A temporary mapping of the guest's start_info page. */
  27.128      start_info_t *start_info;
  27.129  
  27.130 +    /* Our mapping of the current region (batch) */
  27.131      char *region_base;
  27.132  
  27.133      xc_mmu_t *mmu = NULL;
  27.134 @@ -244,8 +280,10 @@ int xc_linux_restore(int xc_handle, int 
  27.135      p2m        = calloc(max_pfn, sizeof(xen_pfn_t));
  27.136      pfn_type   = calloc(max_pfn, sizeof(unsigned long));
  27.137      region_mfn = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
  27.138 +    p2m_batch  = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
  27.139  
  27.140 -    if ((p2m == NULL) || (pfn_type == NULL) || (region_mfn == NULL)) {
  27.141 +    if ((p2m == NULL) || (pfn_type == NULL) ||
  27.142 +        (region_mfn == NULL) || (p2m_batch == NULL)) {
  27.143          ERROR("memory alloc failed");
  27.144          errno = ENOMEM;
  27.145          goto out;
  27.146 @@ -256,6 +294,11 @@ int xc_linux_restore(int xc_handle, int 
  27.147          goto out;
  27.148      }
  27.149  
  27.150 +    if (lock_pages(p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) {
  27.151 +        ERROR("Could not lock p2m_batch");
  27.152 +        goto out;
  27.153 +    }
  27.154 +
  27.155      /* Get the domain's shared-info frame. */
  27.156      domctl.cmd = XEN_DOMCTL_getdomaininfo;
  27.157      domctl.domain = (domid_t)dom;
  27.158 @@ -270,17 +313,9 @@ int xc_linux_restore(int xc_handle, int 
  27.159          goto out;
  27.160      }
  27.161  
  27.162 +    /* Mark all PFNs as invalid; we allocate on demand */
  27.163      for ( pfn = 0; pfn < max_pfn; pfn++ )
  27.164 -        p2m[pfn] = pfn;
  27.165 -
  27.166 -    if (xc_domain_memory_populate_physmap(xc_handle, dom, max_pfn,
  27.167 -                                          0, 0, p2m) != 0) {
  27.168 -        ERROR("Failed to increase reservation by %lx KB", PFN_TO_KB(max_pfn));
  27.169 -        errno = ENOMEM;
  27.170 -        goto out;
  27.171 -    }
  27.172 -
  27.173 -    DPRINTF("Increased domain reservation by %lx KB\n", PFN_TO_KB(max_pfn));
  27.174 +        p2m[pfn] = INVALID_P2M_ENTRY;
  27.175  
  27.176      if(!(mmu = xc_init_mmu_updates(xc_handle, dom))) {
  27.177          ERROR("Could not initialise for MMU updates");
  27.178 @@ -298,7 +333,7 @@ int xc_linux_restore(int xc_handle, int 
  27.179      n = 0;
  27.180      while (1) {
  27.181  
  27.182 -        int j;
  27.183 +        int j, nr_mfns = 0; 
  27.184  
  27.185          this_pc = (n * 100) / max_pfn;
  27.186          if ( (this_pc - prev_pc) >= 5 )
  27.187 @@ -333,6 +368,33 @@ int xc_linux_restore(int xc_handle, int 
  27.188              goto out;
  27.189          }
  27.190  
  27.191 +        /* First pass for this batch: work out how much memory to alloc */
  27.192 +        nr_mfns = 0; 
  27.193 +        for ( i = 0; i < j; i++ )
  27.194 +        {
  27.195 +            unsigned long pfn, pagetype;
  27.196 +            pfn      = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
  27.197 +            pagetype = region_pfn_type[i] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
  27.198 +
  27.199 +            if ( (pagetype != XEN_DOMCTL_PFINFO_XTAB) && 
  27.200 +                 (p2m[pfn] == INVALID_P2M_ENTRY) )
  27.201 +            {
  27.202 +                /* Have a live PFN which hasn't had an MFN allocated */
  27.203 +                p2m_batch[nr_mfns++] = pfn; 
  27.204 +            }
  27.205 +        } 
  27.206 +
  27.207 +
  27.208 +        /* Now allocate a bunch of mfns for this batch */
  27.209 +        if (nr_mfns && xc_domain_memory_populate_physmap(
  27.210 +                xc_handle, dom, nr_mfns, 0, 0, p2m_batch) != 0) { 
  27.211 +            ERROR("Failed to allocate memory for batch.!\n"); 
  27.212 +            errno = ENOMEM;
  27.213 +            goto out;
  27.214 +        }
  27.215 +
  27.216 +        /* Second pass for this batch: update p2m[] and region_mfn[] */
  27.217 +        nr_mfns = 0; 
  27.218          for ( i = 0; i < j; i++ )
  27.219          {
  27.220              unsigned long pfn, pagetype;
  27.221 @@ -340,13 +402,23 @@ int xc_linux_restore(int xc_handle, int 
  27.222              pagetype = region_pfn_type[i] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
  27.223  
  27.224              if ( pagetype == XEN_DOMCTL_PFINFO_XTAB)
  27.225 -                region_mfn[i] = 0; /* we know map will fail, but don't care */
  27.226 -            else
  27.227 -                region_mfn[i] = p2m[pfn];
  27.228 -        }
  27.229 +                region_mfn[i] = ~0UL; /* map will fail but we don't care */
  27.230 +            else 
  27.231 +            {
  27.232 +                if (p2m[pfn] == INVALID_P2M_ENTRY) {
  27.233 +                    /* We just allocated a new mfn above; update p2m */
  27.234 +                    p2m[pfn] = p2m_batch[nr_mfns++]; 
  27.235 +                }
  27.236  
  27.237 +                /* setup region_mfn[] for batch map */
  27.238 +                region_mfn[i] = p2m[pfn]; 
  27.239 +            }
  27.240 +        } 
  27.241 +
  27.242 +        /* Map relevant mfns */
  27.243          region_base = xc_map_foreign_batch(
  27.244              xc_handle, dom, PROT_WRITE, region_mfn, j);
  27.245 +
  27.246          if ( region_base == NULL )
  27.247          {
  27.248              ERROR("map batch failed");
  27.249 @@ -401,7 +473,8 @@ int xc_linux_restore(int xc_handle, int 
  27.250                      pae_extended_cr3 ||
  27.251                      (pagetype != XEN_DOMCTL_PFINFO_L1TAB)) {
  27.252  
  27.253 -                    if (!uncanonicalize_pagetable(pagetype, page)) {
  27.254 +                    if (!uncanonicalize_pagetable(xc_handle, dom, 
  27.255 +                                                  pagetype, page)) {
  27.256                          /*
  27.257                          ** Failing to uncanonicalize a page table can be ok
  27.258                          ** under live migration since the pages type may have
  27.259 @@ -411,10 +484,8 @@ int xc_linux_restore(int xc_handle, int 
  27.260                                  pagetype >> 28, pfn, mfn);
  27.261                          nraces++;
  27.262                          continue;
  27.263 -                    }
  27.264 -
  27.265 +                    } 
  27.266                  }
  27.267 -
  27.268              }
  27.269              else if ( pagetype != XEN_DOMCTL_PFINFO_NOTAB )
  27.270              {
  27.271 @@ -486,7 +557,7 @@ int xc_linux_restore(int xc_handle, int 
  27.272          */
  27.273  
  27.274          int j, k;
  27.275 -
  27.276 +        
  27.277          /* First pass: find all L3TABs current in > 4G mfns and get new mfns */
  27.278          for ( i = 0; i < max_pfn; i++ )
  27.279          {
  27.280 @@ -555,7 +626,8 @@ int xc_linux_restore(int xc_handle, int 
  27.281                  }
  27.282  
  27.283                  for(k = 0; k < j; k++) {
  27.284 -                    if(!uncanonicalize_pagetable(XEN_DOMCTL_PFINFO_L1TAB,
  27.285 +                    if(!uncanonicalize_pagetable(xc_handle, dom, 
  27.286 +                                                 XEN_DOMCTL_PFINFO_L1TAB,
  27.287                                                   region_base + k*PAGE_SIZE)) {
  27.288                          ERROR("failed uncanonicalize pt!");
  27.289                          goto out;
  27.290 @@ -631,7 +703,7 @@ int xc_linux_restore(int xc_handle, int 
  27.291      {
  27.292          unsigned int count;
  27.293          unsigned long *pfntab;
  27.294 -        int rc;
  27.295 +        int nr_frees, rc;
  27.296  
  27.297          if (!read_exact(io_fd, &count, sizeof(count))) {
  27.298              ERROR("Error when reading pfn count");
  27.299 @@ -648,29 +720,30 @@ int xc_linux_restore(int xc_handle, int 
  27.300              goto out;
  27.301          }
  27.302  
  27.303 +        nr_frees = 0; 
  27.304          for (i = 0; i < count; i++) {
  27.305  
  27.306              unsigned long pfn = pfntab[i];
  27.307  
  27.308 -            if(pfn > max_pfn)
  27.309 -                /* shouldn't happen - continue optimistically */
  27.310 -                continue;
  27.311 -
  27.312 -            pfntab[i] = p2m[pfn];
  27.313 -            p2m[pfn]  = INVALID_P2M_ENTRY; // not in pseudo-physical map
  27.314 +            if(p2m[pfn] != INVALID_P2M_ENTRY) {
  27.315 +                /* pfn is not in physmap now, but was at some point during 
  27.316 +                   the save/migration process - need to free it */
  27.317 +                pfntab[nr_frees++] = p2m[pfn];
  27.318 +                p2m[pfn]  = INVALID_P2M_ENTRY; // not in pseudo-physical map
  27.319 +            }
  27.320          }
  27.321  
  27.322 -        if (count > 0) {
  27.323 +        if (nr_frees > 0) {
  27.324  
  27.325              struct xen_memory_reservation reservation = {
  27.326 -                .nr_extents   = count,
  27.327 +                .nr_extents   = nr_frees,
  27.328                  .extent_order = 0,
  27.329                  .domid        = dom
  27.330              };
  27.331              set_xen_guest_handle(reservation.extent_start, pfntab);
  27.332  
  27.333              if ((rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
  27.334 -                                   &reservation)) != count) {
  27.335 +                                   &reservation)) != nr_frees) {
  27.336                  ERROR("Could not decrease reservation : %d", rc);
  27.337                  goto out;
  27.338              } else
  27.339 @@ -791,6 +864,6 @@ int xc_linux_restore(int xc_handle, int 
  27.340      free(pfn_type);
  27.341  
  27.342      DPRINTF("Restore exit with rc=%d\n", rc);
  27.343 -
  27.344 +    
  27.345      return rc;
  27.346  }
    28.1 --- a/tools/libxc/xc_linux_save.c	Wed Jan 17 19:55:48 2007 -0700
    28.2 +++ b/tools/libxc/xc_linux_save.c	Wed Jan 17 21:31:22 2007 -0700
    28.3 @@ -660,13 +660,6 @@ int xc_linux_save(int xc_handle, int io_
    28.4          goto out;
    28.5      }
    28.6  
    28.7 -   /* cheesy sanity check */
    28.8 -    if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
    28.9 -        ERROR("Invalid state record -- pfn count out of range: %lu",
   28.10 -            (info.max_memkb >> (PAGE_SHIFT - 10)));
   28.11 -        goto out;
   28.12 -     }
   28.13 -
   28.14      /* Map the shared info frame */
   28.15      if(!(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
   28.16                                              PROT_READ, shared_info_frame))) {
    29.1 --- a/tools/pygrub/src/pygrub	Wed Jan 17 19:55:48 2007 -0700
    29.2 +++ b/tools/pygrub/src/pygrub	Wed Jan 17 21:31:22 2007 -0700
    29.3 @@ -405,6 +405,9 @@ class Grub:
    29.4              c = self.screen.getch()
    29.5              if mytime != -1:
    29.6                  mytime += 1
    29.7 +                if mytime >= int(timeout):
    29.8 +                    self.isdone = True
    29.9 +                    break
   29.10  
   29.11              # handle keypresses
   29.12              if c == ord('c'):
    30.1 --- a/tools/python/xen/xend/XendCheckpoint.py	Wed Jan 17 19:55:48 2007 -0700
    30.2 +++ b/tools/python/xen/xend/XendCheckpoint.py	Wed Jan 17 21:31:22 2007 -0700
    30.3 @@ -147,18 +147,20 @@ def restore(xd, fd, dominfo = None, paus
    30.4      assert store_port
    30.5      assert console_port
    30.6  
    30.7 +    nr_pfns = (dominfo.getMemoryTarget() + 3) / 4 
    30.8 +
    30.9      try:
   30.10          l = read_exact(fd, sizeof_unsigned_long,
   30.11                         "not a valid guest state file: pfn count read")
   30.12 -        nr_pfns = unpack("L", l)[0]    # native sizeof long
   30.13 -        if nr_pfns > 16*1024*1024:     # XXX 
   30.14 +        max_pfn = unpack("L", l)[0]    # native sizeof long
   30.15 +        if max_pfn > 16*1024*1024:     # XXX 
   30.16              raise XendError(
   30.17                  "not a valid guest state file: pfn count out of range")
   30.18  
   30.19          balloon.free(xc.pages_to_kib(nr_pfns))
   30.20  
   30.21          cmd = map(str, [xen.util.auxbin.pathTo(XC_RESTORE),
   30.22 -                        fd, dominfo.getDomid(), nr_pfns,
   30.23 +                        fd, dominfo.getDomid(), max_pfn,
   30.24                          store_port, console_port])
   30.25          log.debug("[xc_restore]: %s", string.join(cmd))
   30.26  
    31.1 --- a/tools/python/xen/xend/XendConfig.py	Wed Jan 17 19:55:48 2007 -0700
    31.2 +++ b/tools/python/xen/xend/XendConfig.py	Wed Jan 17 21:31:22 2007 -0700
    31.3 @@ -126,6 +126,7 @@ XENAPI_CFG_TYPES = {
    31.4      'memory_dynamic_min': int,
    31.5      'memory_dynamic_max': int,
    31.6      'memory_actual': int,
    31.7 +    'cpus': list,
    31.8      'vcpus_policy': str,
    31.9      'vcpus_params': str,
   31.10      'vcpus_number': int,
    32.1 --- a/tools/python/xen/xend/XendNode.py	Wed Jan 17 19:55:48 2007 -0700
    32.2 +++ b/tools/python/xen/xend/XendNode.py	Wed Jan 17 21:31:22 2007 -0700
    32.3 @@ -365,14 +365,24 @@ class XendNode:
    32.4  
    32.5          return [[k, info[k]] for k in ITEM_ORDER]
    32.6  
    32.7 +    def xenschedinfo(self):
    32.8 +        sched_id = self.xc.sched_id_get()
    32.9 +        if sched_id == xen.lowlevel.xc.XEN_SCHEDULER_SEDF:
   32.10 +            return 'sedf'
   32.11 +        elif sched_id == xen.lowlevel.xc.XEN_SCHEDULER_CREDIT:
   32.12 +            return 'credit'
   32.13 +        else:
   32.14 +            return 'unknown'
   32.15  
   32.16      def xeninfo(self):
   32.17          info = self.xc.xeninfo()
   32.18 +        info['xen_scheduler'] = self.xenschedinfo()
   32.19  
   32.20          ITEM_ORDER = ['xen_major',
   32.21                        'xen_minor',
   32.22                        'xen_extra',
   32.23                        'xen_caps',
   32.24 +                      'xen_scheduler',
   32.25                        'xen_pagesize',
   32.26                        'platform_params',
   32.27                        'xen_changeset',
    33.1 --- a/tools/tests/test_x86_emulator.c	Wed Jan 17 19:55:48 2007 -0700
    33.2 +++ b/tools/tests/test_x86_emulator.c	Wed Jan 17 21:31:22 2007 -0700
    33.3 @@ -118,7 +118,8 @@ int main(int argc, char **argv)
    33.4  #endif
    33.5  
    33.6      ctxt.regs = &regs;
    33.7 -    ctxt.address_bytes = 4;
    33.8 +    ctxt.addr_size = 32;
    33.9 +    ctxt.sp_size   = 32;
   33.10  
   33.11      res = mmap((void *)0x100000, MMAP_SZ, PROT_READ|PROT_WRITE,
   33.12                 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
    34.1 --- a/xen/arch/x86/hvm/i8254.c	Wed Jan 17 19:55:48 2007 -0700
    34.2 +++ b/xen/arch/x86/hvm/i8254.c	Wed Jan 17 21:31:22 2007 -0700
    34.3 @@ -182,11 +182,9 @@ void pit_time_fired(struct vcpu *v, void
    34.4      s->count_load_time = hvm_get_guest_time(v);
    34.5  }
    34.6  
    34.7 -static inline void pit_load_count(PITChannelState *s, int val)
    34.8 +static inline void pit_load_count(PITChannelState *s, int channel, int val)
    34.9  {
   34.10      u32 period;
   34.11 -    PITChannelState *ch0 =
   34.12 -        &current->domain->arch.hvm_domain.pl_time.vpit.channels[0];
   34.13  
   34.14      if (val == 0)
   34.15          val = 0x10000;
   34.16 @@ -194,7 +192,7 @@ static inline void pit_load_count(PITCha
   34.17      s->count = val;
   34.18      period = DIV_ROUND((val * 1000000000ULL), PIT_FREQ);
   34.19  
   34.20 -    if (s != ch0)
   34.21 +    if (channel != 0)
   34.22          return;
   34.23  
   34.24  #ifdef DEBUG_PIT
   34.25 @@ -282,17 +280,17 @@ static void pit_ioport_write(void *opaqu
   34.26          switch(s->write_state) {
   34.27          default:
   34.28          case RW_STATE_LSB:
   34.29 -            pit_load_count(s, val);
   34.30 +            pit_load_count(s, addr, val);
   34.31              break;
   34.32          case RW_STATE_MSB:
   34.33 -            pit_load_count(s, val << 8);
   34.34 +            pit_load_count(s, addr, val << 8);
   34.35              break;
   34.36          case RW_STATE_WORD0:
   34.37              s->write_latch = val;
   34.38              s->write_state = RW_STATE_WORD1;
   34.39              break;
   34.40          case RW_STATE_WORD1:
   34.41 -            pit_load_count(s, s->write_latch | (val << 8));
   34.42 +            pit_load_count(s, addr, s->write_latch | (val << 8));
   34.43              s->write_state = RW_STATE_WORD0;
   34.44              break;
   34.45          }
   34.46 @@ -369,7 +367,7 @@ static void pit_reset(void *opaque)
   34.47          destroy_periodic_time(&s->pt);
   34.48          s->mode = 0xff; /* the init mode */
   34.49          s->gate = (i != 2);
   34.50 -        pit_load_count(s, 0);
   34.51 +        pit_load_count(s, i, 0);
   34.52      }
   34.53  }
   34.54  
    35.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Jan 17 19:55:48 2007 -0700
    35.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Jan 17 21:31:22 2007 -0700
    35.3 @@ -482,8 +482,8 @@ static int svm_guest_x86_mode(struct vcp
    35.4  {
    35.5      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    35.6  
    35.7 -    if ( vmcb->efer & EFER_LMA )
    35.8 -        return (vmcb->cs.attr.fields.l ? 8 : 4);
    35.9 +    if ( (vmcb->efer & EFER_LMA) && vmcb->cs.attr.fields.l )
   35.10 +        return 8;
   35.11  
   35.12      if ( svm_realmode(v) )
   35.13          return 2;
    36.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Jan 17 19:55:48 2007 -0700
    36.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Jan 17 21:31:22 2007 -0700
    36.3 @@ -491,8 +491,7 @@ static unsigned long vmx_get_segment_bas
    36.4      ASSERT(v == current);
    36.5  
    36.6  #ifdef __x86_64__
    36.7 -    if ( vmx_long_mode_enabled(v) &&
    36.8 -         (__vmread(GUEST_CS_AR_BYTES) & (1u<<13)) )
    36.9 +    if ( vmx_long_mode_enabled(v) && (__vmread(GUEST_CS_AR_BYTES) & (1u<<13)) )
   36.10          long_mode = 1;
   36.11  #endif
   36.12  
   36.13 @@ -667,8 +666,8 @@ static int vmx_guest_x86_mode(struct vcp
   36.14  
   36.15      cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
   36.16  
   36.17 -    if ( vmx_long_mode_enabled(v) )
   36.18 -        return ((cs_ar_bytes & (1u<<13)) ? 8 : 4);
   36.19 +    if ( vmx_long_mode_enabled(v) && (cs_ar_bytes & (1u<<13)) )
   36.20 +        return 8;
   36.21  
   36.22      if ( vmx_realmode(v) )
   36.23          return 2;
    37.1 --- a/xen/arch/x86/microcode.c	Wed Jan 17 19:55:48 2007 -0700
    37.2 +++ b/xen/arch/x86/microcode.c	Wed Jan 17 21:31:22 2007 -0700
    37.3 @@ -249,14 +249,14 @@ static int find_matching_ucodes (void)
    37.4  		}
    37.5  
    37.6  		total_size = get_totalsize(&mc_header);
    37.7 -		if ((cursor + total_size > user_buffer_size) || (total_size < DEFAULT_UCODE_TOTALSIZE)) {
    37.8 +		if (cursor + total_size > user_buffer_size) {
    37.9  			printk(KERN_ERR "microcode: error! Bad data in microcode data file\n");
   37.10  			error = -EINVAL;
   37.11  			goto out;
   37.12  		}
   37.13  
   37.14  		data_size = get_datasize(&mc_header);
   37.15 -		if ((data_size + MC_HEADER_SIZE > total_size) || (data_size < DEFAULT_UCODE_DATASIZE)) {
   37.16 +		if (data_size + MC_HEADER_SIZE > total_size) {
   37.17  			printk(KERN_ERR "microcode: error! Bad data in microcode data file\n");
   37.18  			error = -EINVAL;
   37.19  			goto out;
   37.20 @@ -459,11 +459,6 @@ int microcode_update(XEN_GUEST_HANDLE(vo
   37.21  {
   37.22  	int ret;
   37.23  
   37.24 -	if (len < DEFAULT_UCODE_TOTALSIZE) {
   37.25 -		printk(KERN_ERR "microcode: not enough data\n");
   37.26 -		return -EINVAL;
   37.27 -	}
   37.28 -
   37.29  	if (len != (typeof(user_buffer_size))len) {
   37.30  		printk(KERN_ERR "microcode: too much data\n");
   37.31  		return -E2BIG;
    38.1 --- a/xen/arch/x86/mm.c	Wed Jan 17 19:55:48 2007 -0700
    38.2 +++ b/xen/arch/x86/mm.c	Wed Jan 17 21:31:22 2007 -0700
    38.3 @@ -3236,15 +3236,14 @@ static int ptwr_emulated_update(
    38.4      if ( unlikely(!get_page_from_l1e(gl1e_to_ml1e(d, nl1e), d)) )
    38.5      {
    38.6          if ( (CONFIG_PAGING_LEVELS == 3 || IS_COMPAT(d)) &&
    38.7 -             (bytes == 4) &&
    38.8 -             !do_cmpxchg &&
    38.9 +             (bytes == 4) && (addr & 4) && !do_cmpxchg &&
   38.10               (l1e_get_flags(nl1e) & _PAGE_PRESENT) )
   38.11          {
   38.12              /*
   38.13 -             * If this is a half-write to a PAE PTE then we assume that the
   38.14 -             * guest has simply got the two writes the wrong way round. We
   38.15 -             * zap the PRESENT bit on the assumption the bottom half will be
   38.16 -             * written immediately after we return to the guest.
   38.17 +             * If this is an upper-half write to a PAE PTE then we assume that
   38.18 +             * the guest has simply got the two writes the wrong way round. We
   38.19 +             * zap the PRESENT bit on the assumption that the bottom half will
   38.20 +             * be written immediately after we return to the guest.
   38.21               */
   38.22              MEM_LOG("ptwr_emulate: fixing up invalid PAE PTE %"PRIpte,
   38.23                      l1e_get_intpte(nl1e));
   38.24 @@ -3375,8 +3374,9 @@ int ptwr_do_page_fault(struct vcpu *v, u
   38.25           (page_get_owner(page) != d) )
   38.26          goto bail;
   38.27  
   38.28 -    ptwr_ctxt.ctxt.regs = guest_cpu_user_regs();
   38.29 -    ptwr_ctxt.ctxt.address_bytes = IS_COMPAT(d) ? 4 : sizeof(long);
   38.30 +    ptwr_ctxt.ctxt.regs = regs;
   38.31 +    ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size =
   38.32 +        IS_COMPAT(d) ? 32 : BITS_PER_LONG;
   38.33      ptwr_ctxt.cr2 = addr;
   38.34      ptwr_ctxt.pte = pte;
   38.35      if ( x86_emulate(&ptwr_ctxt.ctxt, &ptwr_emulate_ops) )
    39.1 --- a/xen/arch/x86/mm/shadow/common.c	Wed Jan 17 19:55:48 2007 -0700
    39.2 +++ b/xen/arch/x86/mm/shadow/common.c	Wed Jan 17 21:31:22 2007 -0700
    39.3 @@ -110,7 +110,7 @@ static int hvm_translate_linear_addr(
    39.4      unsigned long limit, addr = offset;
    39.5      uint32_t last_byte;
    39.6  
    39.7 -    if ( sh_ctxt->ctxt.address_bytes != 8 )
    39.8 +    if ( sh_ctxt->ctxt.addr_size != 64 )
    39.9      {
   39.10          /*
   39.11           * COMPATIBILITY MODE: Apply segment checks and add base.
   39.12 @@ -399,7 +399,7 @@ static struct x86_emulate_ops pv_shadow_
   39.13  struct x86_emulate_ops *shadow_init_emulation(
   39.14      struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs)
   39.15  {
   39.16 -    struct segment_register *creg;
   39.17 +    struct segment_register *creg, *sreg;
   39.18      struct vcpu *v = current;
   39.19      unsigned long addr;
   39.20  
   39.21 @@ -407,7 +407,7 @@ struct x86_emulate_ops *shadow_init_emul
   39.22  
   39.23      if ( !is_hvm_vcpu(v) )
   39.24      {
   39.25 -        sh_ctxt->ctxt.address_bytes = sizeof(long);
   39.26 +        sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = BITS_PER_LONG;
   39.27          return &pv_shadow_emulator_ops;
   39.28      }
   39.29  
   39.30 @@ -416,12 +416,20 @@ struct x86_emulate_ops *shadow_init_emul
   39.31      creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
   39.32  
   39.33      /* Work out the emulation mode. */
   39.34 -    if ( hvm_long_mode_enabled(v) )
   39.35 -        sh_ctxt->ctxt.address_bytes = creg->attr.fields.l ? 8 : 4;
   39.36 +    if ( hvm_long_mode_enabled(v) && creg->attr.fields.l )
   39.37 +    {
   39.38 +        sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64;
   39.39 +    }
   39.40      else if ( regs->eflags & X86_EFLAGS_VM )
   39.41 -        sh_ctxt->ctxt.address_bytes = 2;
   39.42 +    {
   39.43 +        sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 16;
   39.44 +    }
   39.45      else
   39.46 -        sh_ctxt->ctxt.address_bytes = creg->attr.fields.db ? 4 : 2;
   39.47 +    {
   39.48 +        sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
   39.49 +        sh_ctxt->ctxt.addr_size = creg->attr.fields.db ? 32 : 16;
   39.50 +        sh_ctxt->ctxt.sp_size   = sreg->attr.fields.db ? 32 : 16;
   39.51 +    }
   39.52  
   39.53      /* Attempt to prefetch whole instruction. */
   39.54      sh_ctxt->insn_buf_bytes =
    40.1 --- a/xen/arch/x86/mm/shadow/multi.c	Wed Jan 17 19:55:48 2007 -0700
    40.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Wed Jan 17 21:31:22 2007 -0700
    40.3 @@ -3944,7 +3944,7 @@ sh_x86_emulate_write(struct vcpu *v, uns
    40.4      if ( !skip ) sh_validate_guest_pt_write(v, mfn, addr, bytes);
    40.5  
    40.6      /* If we are writing zeros to this page, might want to unshadow */
    40.7 -    if ( likely(bytes >= 4) && (*(u32 *)addr == 0) )
    40.8 +    if ( likely(bytes >= 4) && (*(u32 *)addr == 0) && is_lo_pte(vaddr) )
    40.9          check_for_early_unshadow(v, mfn);
   40.10  
   40.11      sh_unmap_domain_page(addr);
   40.12 @@ -3996,7 +3996,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
   40.13                    vaddr, prev, old, new, *(unsigned long *)addr, bytes);
   40.14  
   40.15      /* If we are writing zeros to this page, might want to unshadow */
   40.16 -    if ( likely(bytes >= 4) && (*(u32 *)addr == 0) )
   40.17 +    if ( likely(bytes >= 4) && (*(u32 *)addr == 0) && is_lo_pte(vaddr) )
   40.18          check_for_early_unshadow(v, mfn);
   40.19  
   40.20      sh_unmap_domain_page(addr);
    41.1 --- a/xen/arch/x86/mm/shadow/private.h	Wed Jan 17 19:55:48 2007 -0700
    41.2 +++ b/xen/arch/x86/mm/shadow/private.h	Wed Jan 17 21:31:22 2007 -0700
    41.3 @@ -427,6 +427,11 @@ extern int sh_remove_write_access(struct
    41.4  #undef mfn_valid
    41.5  #define mfn_valid(_mfn) (mfn_x(_mfn) < max_page)
    41.6  
    41.7 +#if GUEST_PAGING_LEVELS >= 3
    41.8 +# define is_lo_pte(_vaddr) (((_vaddr)&0x4)==0)
    41.9 +#else
   41.10 +# define is_lo_pte(_vaddr) (1)
   41.11 +#endif
   41.12  
   41.13  static inline int
   41.14  sh_mfn_is_a_page_table(mfn_t gmfn)
    42.1 --- a/xen/arch/x86/x86_emulate.c	Wed Jan 17 19:55:48 2007 -0700
    42.2 +++ b/xen/arch/x86/x86_emulate.c	Wed Jan 17 21:31:22 2007 -0700
    42.3 @@ -443,10 +443,11 @@ do{ __asm__ __volatile__ (              
    42.4  })
    42.5  #define insn_fetch_type(_type) ((_type)insn_fetch_bytes(sizeof(_type)))
    42.6  
    42.7 -#define _truncate_ea(ea, byte_width)                    \
    42.8 -({  unsigned long __ea = (ea);                          \
    42.9 -    (((byte_width) == sizeof(unsigned long)) ? __ea :   \
   42.10 -     (__ea & ((1UL << ((byte_width) << 3)) - 1)));      \
   42.11 +#define _truncate_ea(ea, byte_width)            \
   42.12 +({  unsigned long __ea = (ea);                  \
   42.13 +    unsigned int _width = (byte_width);         \
   42.14 +    ((_width == sizeof(unsigned long)) ? __ea : \
   42.15 +     (__ea & ((1UL << (_width << 3)) - 1)));    \
   42.16  })
   42.17  #define truncate_ea(ea) _truncate_ea((ea), ad_bytes)
   42.18  
   42.19 @@ -473,17 +474,28 @@ static int even_parity(uint8_t v)
   42.20  #define _register_address_increment(reg, inc, byte_width)               \
   42.21  do {                                                                    \
   42.22      int _inc = (inc); /* signed type ensures sign extension to long */  \
   42.23 -    if ( (byte_width) == sizeof(unsigned long) )                        \
   42.24 +    unsigned int _width = (byte_width);                                 \
   42.25 +    if ( _width == sizeof(unsigned long) )                              \
   42.26          (reg) += _inc;                                                  \
   42.27      else if ( mode_64bit() )                                            \
   42.28 -        (reg) = ((reg) + _inc) & ((1UL << ((byte_width) << 3)) - 1);    \
   42.29 +        (reg) = ((reg) + _inc) & ((1UL << (_width << 3)) - 1);          \
   42.30      else                                                                \
   42.31 -        (reg) = ((reg) & ~((1UL << ((byte_width) << 3)) - 1)) |         \
   42.32 -                (((reg) + _inc) & ((1UL << ((byte_width) << 3)) - 1));  \
   42.33 +        (reg) = ((reg) & ~((1UL << (_width << 3)) - 1)) |               \
   42.34 +                (((reg) + _inc) & ((1UL << (_width << 3)) - 1));        \
   42.35  } while (0)
   42.36  #define register_address_increment(reg, inc) \
   42.37      _register_address_increment((reg), (inc), ad_bytes)
   42.38  
   42.39 +#define sp_pre_dec(dec) ({                                              \
   42.40 +    _register_address_increment(_regs.esp, -(dec), ctxt->sp_size/8);    \
   42.41 +    _truncate_ea(_regs.esp, ctxt->sp_size/8);                           \
   42.42 +})
   42.43 +#define sp_post_inc(inc) ({                                             \
   42.44 +    unsigned long __esp = _truncate_ea(_regs.esp, ctxt->sp_size/8);     \
   42.45 +    _register_address_increment(_regs.esp, (inc), ctxt->sp_size/8);     \
   42.46 +    __esp;                                                              \
   42.47 +})
   42.48 +
   42.49  #define jmp_rel(rel)                                                    \
   42.50  do {                                                                    \
   42.51      _regs.eip += (int)(rel);                                            \
   42.52 @@ -679,7 +691,7 @@ x86_emulate(
   42.53      ea.mem.seg = x86_seg_ds;
   42.54      ea.mem.off = 0;
   42.55  
   42.56 -    op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->address_bytes;
   42.57 +    op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size/8;
   42.58      if ( op_bytes == 8 )
   42.59      {
   42.60          op_bytes = def_op_bytes = 4;
   42.61 @@ -1144,7 +1156,9 @@ x86_emulate(
   42.62          break;
   42.63      }
   42.64  
   42.65 -    case 0x80 ... 0x83: /* Grp1 */
   42.66 +    case 0x82: /* Grp1 (x86/32 only) */
   42.67 +        generate_exception_if(mode_64bit(), EXC_UD);
   42.68 +    case 0x80: case 0x81: case 0x83: /* Grp1 */
   42.69          switch ( modrm_reg & 7 )
   42.70          {
   42.71          case 0: goto add;
   42.72 @@ -1194,10 +1208,9 @@ x86_emulate(
   42.73          /* 64-bit mode: POP defaults to a 64-bit operand. */
   42.74          if ( mode_64bit() && (dst.bytes == 4) )
   42.75              dst.bytes = 8;
   42.76 -        if ( (rc = ops->read(x86_seg_ss, truncate_ea(_regs.esp),
   42.77 +        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
   42.78                               &dst.val, dst.bytes, ctxt)) != 0 )
   42.79              goto done;
   42.80 -        register_address_increment(_regs.esp, dst.bytes);
   42.81          break;
   42.82  
   42.83      case 0xb0 ... 0xb7: /* mov imm8,r8 */
   42.84 @@ -1466,7 +1479,7 @@ x86_emulate(
   42.85              emulate_1op("dec", dst, _regs.eflags);
   42.86              break;
   42.87          case 2: /* call (near) */
   42.88 -        case 3: /* jmp (near) */
   42.89 +        case 4: /* jmp (near) */
   42.90              if ( ((op_bytes = dst.bytes) != 8) && mode_64bit() )
   42.91              {
   42.92                  dst.bytes = op_bytes = 8;
   42.93 @@ -1488,8 +1501,7 @@ x86_emulate(
   42.94                                       &dst.val, 8, ctxt)) != 0 )
   42.95                      goto done;
   42.96              }
   42.97 -            register_address_increment(_regs.esp, -dst.bytes);
   42.98 -            if ( (rc = ops->write(x86_seg_ss, truncate_ea(_regs.esp),
   42.99 +            if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
  42.100                                    dst.val, dst.bytes, ctxt)) != 0 )
  42.101                  goto done;
  42.102              dst.type = OP_NONE;
  42.103 @@ -1644,10 +1656,9 @@ x86_emulate(
  42.104          dst.bytes = op_bytes;
  42.105          if ( mode_64bit() && (dst.bytes == 4) )
  42.106              dst.bytes = 8;
  42.107 -        if ( (rc = ops->read(x86_seg_ss, truncate_ea(_regs.esp),
  42.108 +        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
  42.109                               &dst.val, dst.bytes, ctxt)) != 0 )
  42.110              goto done;
  42.111 -        register_address_increment(_regs.esp, dst.bytes);
  42.112          break;
  42.113  
  42.114      case 0x60: /* pusha */ {
  42.115 @@ -1657,11 +1668,9 @@ x86_emulate(
  42.116              _regs.esp, _regs.ebp, _regs.esi, _regs.edi };
  42.117          generate_exception_if(mode_64bit(), EXC_UD);
  42.118          for ( i = 0; i < 8; i++ )
  42.119 -            if ( (rc = ops->write(x86_seg_ss,
  42.120 -                                  truncate_ea(_regs.esp-(i+1)*op_bytes),
  42.121 +            if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
  42.122                                    regs[i], op_bytes, ctxt)) != 0 )
  42.123              goto done;
  42.124 -        register_address_increment(_regs.esp, -8*op_bytes);
  42.125          break;
  42.126      }
  42.127  
  42.128 @@ -1674,11 +1683,9 @@ x86_emulate(
  42.129              (unsigned long *)&_regs.ecx, (unsigned long *)&_regs.eax };
  42.130          generate_exception_if(mode_64bit(), EXC_UD);
  42.131          for ( i = 0; i < 8; i++ )
  42.132 -            if ( (rc = ops->read(x86_seg_ss,
  42.133 -                                 truncate_ea(_regs.esp+i*op_bytes),
  42.134 +            if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
  42.135                                   regs[i], op_bytes, ctxt)) != 0 )
  42.136              goto done;
  42.137 -        register_address_increment(_regs.esp, 8*op_bytes);
  42.138          break;
  42.139      }
  42.140  
  42.141 @@ -1697,9 +1704,8 @@ x86_emulate(
  42.142          if ( mode_64bit() && (dst.bytes == 4) )
  42.143              dst.bytes = 8;
  42.144          dst.val = src.val;
  42.145 -        register_address_increment(_regs.esp, -dst.bytes);
  42.146          dst.mem.seg = x86_seg_ss;
  42.147 -        dst.mem.off = truncate_ea(_regs.esp);
  42.148 +        dst.mem.off = sp_pre_dec(dst.bytes);
  42.149          break;
  42.150  
  42.151      case 0x70 ... 0x7f: /* jcc (short) */ {
  42.152 @@ -1813,11 +1819,10 @@ x86_emulate(
  42.153      case 0xc3: /* ret (near) */ {
  42.154          int offset = (b == 0xc2) ? insn_fetch_type(uint16_t) : 0;
  42.155          op_bytes = mode_64bit() ? 8 : op_bytes;
  42.156 -        if ( (rc = ops->read(x86_seg_ss, truncate_ea(_regs.esp),
  42.157 +        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset),
  42.158                               &dst.val, op_bytes, ctxt)) != 0 )
  42.159              goto done;
  42.160          _regs.eip = dst.val;
  42.161 -        register_address_increment(_regs.esp, op_bytes + offset);
  42.162          break;
  42.163      }
  42.164  
  42.165 @@ -2019,7 +2024,7 @@ x86_emulate(
  42.166  
  42.167      case 0xbc: /* bsf */ {
  42.168          int zf;
  42.169 -        asm ( "bsf %2,%0; setc %b1"
  42.170 +        asm ( "bsf %2,%0; setz %b1"
  42.171                : "=r" (dst.val), "=q" (zf)
  42.172                : "r" (src.val), "1" (0) );
  42.173          _regs.eflags &= ~EFLG_ZF;
  42.174 @@ -2029,7 +2034,7 @@ x86_emulate(
  42.175  
  42.176      case 0xbd: /* bsr */ {
  42.177          int zf;
  42.178 -        asm ( "bsr %2,%0; setc %b1"
  42.179 +        asm ( "bsr %2,%0; setz %b1"
  42.180                : "=r" (dst.val), "=q" (zf)
  42.181                : "r" (src.val), "1" (0) );
  42.182          _regs.eflags &= ~EFLG_ZF;
  42.183 @@ -2046,12 +2051,13 @@ x86_emulate(
  42.184          break;
  42.185  
  42.186      case 0xba: /* Grp8 */
  42.187 -        switch ( modrm_reg & 3 )
  42.188 +        switch ( modrm_reg & 7 )
  42.189          {
  42.190 -        case 0: goto bt;
  42.191 -        case 1: goto bts;
  42.192 -        case 2: goto btr;
  42.193 -        case 3: goto btc;
  42.194 +        case 4: goto bt;
  42.195 +        case 5: goto bts;
  42.196 +        case 6: goto btr;
  42.197 +        case 7: goto btc;
  42.198 +        default: generate_exception_if(1, EXC_UD);
  42.199          }
  42.200          break;
  42.201  
  42.202 @@ -2100,6 +2106,7 @@ x86_emulate(
  42.203  #if defined(__i386__)
  42.204      {
  42.205          unsigned long old_lo, old_hi;
  42.206 +        generate_exception_if((modrm_reg & 7) != 1, EXC_UD);
  42.207          if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &old_lo, 4, ctxt)) ||
  42.208               (rc = ops->read(ea.mem.seg, ea.mem.off+4, &old_hi, 4, ctxt)) )
  42.209              goto done;
  42.210 @@ -2126,6 +2133,7 @@ x86_emulate(
  42.211  #elif defined(__x86_64__)
  42.212      {
  42.213          unsigned long old, new;
  42.214 +        generate_exception_if((modrm_reg & 7) != 1, EXC_UD);
  42.215          if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &old, 8, ctxt)) != 0 )
  42.216              goto done;
  42.217          if ( ((uint32_t)(old>>0) != (uint32_t)_regs.eax) ||
    43.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    43.2 +++ b/xen/include/asm-powerpc/byteorder.h	Wed Jan 17 21:31:22 2007 -0700
    43.3 @@ -0,0 +1,80 @@
    43.4 +#ifndef _ASM_POWERPC_BYTEORDER_H
    43.5 +#define _ASM_POWERPC_BYTEORDER_H
    43.6 +
    43.7 +/*
    43.8 + * This program is free software; you can redistribute it and/or
    43.9 + * modify it under the terms of the GNU General Public License
   43.10 + * as published by the Free Software Foundation; either version
   43.11 + * 2 of the License, or (at your option) any later version.
   43.12 + */
   43.13 +
   43.14 +#include <asm/types.h>
   43.15 +#include <xen/compiler.h>
   43.16 +
   43.17 +static inline __u16 ld_le16(const volatile __u16 *addr)
   43.18 +{
   43.19 +    __u16 val;
   43.20 +
   43.21 +    asm volatile ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
   43.22 +    return val;
   43.23 +}
   43.24 +
   43.25 +static inline void st_le16(volatile __u16 *addr, const __u16 val)
   43.26 +{
   43.27 +    asm volatile ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
   43.28 +}
   43.29 +
   43.30 +static inline __u32 ld_le32(const volatile __u32 *addr)
   43.31 +{
   43.32 +    __u32 val;
   43.33 +
   43.34 +    asm volatile ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
   43.35 +    return val;
   43.36 +}
   43.37 +
   43.38 +static inline void st_le32(volatile __u32 *addr, const __u32 val)
   43.39 +{
   43.40 +    asm volatile ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
   43.41 +}
   43.42 +
   43.43 +static inline __attribute_const__ __u16 ___arch__swab16(__u16 value)
   43.44 +{
   43.45 +    __u16 result;
   43.46 +
   43.47 +    asm("rlwimi %0,%1,8,16,23"
   43.48 +        : "=r" (result)
   43.49 +        : "r" (value), "0" (value >> 8));
   43.50 +    return result;
   43.51 +}
   43.52 +
   43.53 +static inline __attribute_const__ __u32 ___arch__swab32(__u32 value)
   43.54 +{
   43.55 +    __u32 result;
   43.56 +
   43.57 +    asm("rlwimi %0,%1,24,16,23\n\t"
   43.58 +        "rlwimi %0,%1,8,8,15\n\t"
   43.59 +        "rlwimi %0,%1,24,0,7"
   43.60 +        : "=r" (result)
   43.61 +        : "r" (value), "0" (value >> 24));
   43.62 +    return result;
   43.63 +}
   43.64 +
   43.65 +#define __arch__swab16(x) ___arch__swab16(x)
   43.66 +#define __arch__swab32(x) ___arch__swab32(x)
   43.67 +
   43.68 +/* The same, but returns converted value from the location pointer by addr. */
   43.69 +#define __arch__swab16p(addr) ld_le16(addr)
   43.70 +#define __arch__swab32p(addr) ld_le32(addr)
   43.71 +
   43.72 +/* The same, but do the conversion in situ, ie. put the value back to addr. */
   43.73 +#define __arch__swab16s(addr) st_le16(addr,*addr)
   43.74 +#define __arch__swab32s(addr) st_le32(addr,*addr)
   43.75 +
   43.76 +#define __BYTEORDER_HAS_U64__
   43.77 +#ifndef __powerpc64__
   43.78 +#define __SWAB_64_THRU_32__
   43.79 +#endif /* __powerpc64__ */
   43.80 +
   43.81 +#include <xen/byteorder/big_endian.h>
   43.82 +
   43.83 +#endif /* _ASM_POWERPC_BYTEORDER_H */
    44.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    44.2 +++ b/xen/include/asm-x86/byteorder.h	Wed Jan 17 21:31:22 2007 -0700
    44.3 @@ -0,0 +1,36 @@
    44.4 +#ifndef __ASM_X86_BYTEORDER_H__
    44.5 +#define __ASM_X86_BYTEORDER_H__
    44.6 +
    44.7 +#include <asm/types.h>
    44.8 +#include <xen/compiler.h>
    44.9 +
   44.10 +static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
   44.11 +{
   44.12 +    asm("bswap %0" : "=r" (x) : "0" (x));
   44.13 +    return x;
   44.14 +}
   44.15 +
   44.16 +static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
   44.17 +{ 
   44.18 +    union { 
   44.19 +        struct { __u32 a,b; } s;
   44.20 +        __u64 u;
   44.21 +    } v;
   44.22 +    v.u = val;
   44.23 +    asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" 
   44.24 +        : "=r" (v.s.a), "=r" (v.s.b) 
   44.25 +        : "0" (v.s.a), "1" (v.s.b)); 
   44.26 +    return v.u;	
   44.27 +} 
   44.28 +
   44.29 +/* Do not define swab16.  Gcc is smart enough to recognize "C" version and
   44.30 +   convert it into rotation or exhange.  */
   44.31 +
   44.32 +#define __arch__swab64(x) ___arch__swab64(x)
   44.33 +#define __arch__swab32(x) ___arch__swab32(x)
   44.34 +
   44.35 +#define __BYTEORDER_HAS_U64__
   44.36 +
   44.37 +#include <xen/byteorder/little_endian.h>
   44.38 +
   44.39 +#endif /* __ASM_X86_BYTEORDER_H__ */
    45.1 --- a/xen/include/asm-x86/x86_emulate.h	Wed Jan 17 19:55:48 2007 -0700
    45.2 +++ b/xen/include/asm-x86/x86_emulate.h	Wed Jan 17 21:31:22 2007 -0700
    45.3 @@ -150,8 +150,11 @@ struct x86_emulate_ctxt
    45.4      /* Register state before/after emulation. */
    45.5      struct cpu_user_regs *regs;
    45.6  
    45.7 -    /* Default address size in current execution mode (2, 4, or 8). */
    45.8 -    int                   address_bytes;
    45.9 +    /* Default address size in current execution mode (16, 32, or 64). */
   45.10 +    unsigned int addr_size;
   45.11 +
   45.12 +    /* Stack pointer width in bits (16, 32 or 64). */
   45.13 +    unsigned int sp_size;
   45.14  };
   45.15  
   45.16  /*
    46.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    46.2 +++ b/xen/include/xen/byteorder/big_endian.h	Wed Jan 17 21:31:22 2007 -0700
    46.3 @@ -0,0 +1,106 @@
    46.4 +#ifndef __XEN_BYTEORDER_BIG_ENDIAN_H__
    46.5 +#define __XEN_BYTEORDER_BIG_ENDIAN_H__
    46.6 +
    46.7 +#ifndef __BIG_ENDIAN
    46.8 +#define __BIG_ENDIAN 4321
    46.9 +#endif
   46.10 +#ifndef __BIG_ENDIAN_BITFIELD
   46.11 +#define __BIG_ENDIAN_BITFIELD
   46.12 +#endif
   46.13 +
   46.14 +#include <xen/types.h>
   46.15 +#include <xen/byteorder/swab.h>
   46.16 +
   46.17 +#define __constant_htonl(x) ((__force __be32)(__u32)(x))
   46.18 +#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
   46.19 +#define __constant_htons(x) ((__force __be16)(__u16)(x))
   46.20 +#define __constant_ntohs(x) ((__force __u16)(__be16)(x))
   46.21 +#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
   46.22 +#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x))
   46.23 +#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
   46.24 +#define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x))
   46.25 +#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
   46.26 +#define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x))
   46.27 +#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
   46.28 +#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
   46.29 +#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
   46.30 +#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x))
   46.31 +#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x))
   46.32 +#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x))
   46.33 +#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
   46.34 +#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
   46.35 +#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
   46.36 +#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
   46.37 +#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
   46.38 +#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
   46.39 +#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
   46.40 +#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
   46.41 +#define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
   46.42 +#define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
   46.43 +#define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
   46.44 +#define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
   46.45 +
   46.46 +static inline __le64 __cpu_to_le64p(const __u64 *p)
   46.47 +{
   46.48 +    return (__force __le64)__swab64p(p);
   46.49 +}
   46.50 +static inline __u64 __le64_to_cpup(const __le64 *p)
   46.51 +{
   46.52 +    return __swab64p((__u64 *)p);
   46.53 +}
   46.54 +static inline __le32 __cpu_to_le32p(const __u32 *p)
   46.55 +{
   46.56 +    return (__force __le32)__swab32p(p);
   46.57 +}
   46.58 +static inline __u32 __le32_to_cpup(const __le32 *p)
   46.59 +{
   46.60 +    return __swab32p((__u32 *)p);
   46.61 +}
   46.62 +static inline __le16 __cpu_to_le16p(const __u16 *p)
   46.63 +{
   46.64 +    return (__force __le16)__swab16p(p);
   46.65 +}
   46.66 +static inline __u16 __le16_to_cpup(const __le16 *p)
   46.67 +{
   46.68 +    return __swab16p((__u16 *)p);
   46.69 +}
   46.70 +static inline __be64 __cpu_to_be64p(const __u64 *p)
   46.71 +{
   46.72 +    return (__force __be64)*p;
   46.73 +}
   46.74 +static inline __u64 __be64_to_cpup(const __be64 *p)
   46.75 +{
   46.76 +    return (__force __u64)*p;
   46.77 +}
   46.78 +static inline __be32 __cpu_to_be32p(const __u32 *p)
   46.79 +{
   46.80 +    return (__force __be32)*p;
   46.81 +}
   46.82 +static inline __u32 __be32_to_cpup(const __be32 *p)
   46.83 +{
   46.84 +    return (__force __u32)*p;
   46.85 +}
   46.86 +static inline __be16 __cpu_to_be16p(const __u16 *p)
   46.87 +{
   46.88 +    return (__force __be16)*p;
   46.89 +}
   46.90 +static inline __u16 __be16_to_cpup(const __be16 *p)
   46.91 +{
   46.92 +    return (__force __u16)*p;
   46.93 +}
   46.94 +#define __cpu_to_le64s(x) __swab64s((x))
   46.95 +#define __le64_to_cpus(x) __swab64s((x))
   46.96 +#define __cpu_to_le32s(x) __swab32s((x))
   46.97 +#define __le32_to_cpus(x) __swab32s((x))
   46.98 +#define __cpu_to_le16s(x) __swab16s((x))
   46.99 +#define __le16_to_cpus(x) __swab16s((x))
  46.100 +#define __cpu_to_be64s(x) do {} while (0)
  46.101 +#define __be64_to_cpus(x) do {} while (0)
  46.102 +#define __cpu_to_be32s(x) do {} while (0)
  46.103 +#define __be32_to_cpus(x) do {} while (0)
  46.104 +#define __cpu_to_be16s(x) do {} while (0)
  46.105 +#define __be16_to_cpus(x) do {} while (0)
  46.106 +
  46.107 +#include <xen/byteorder/generic.h>
  46.108 +
  46.109 +#endif /* __XEN_BYTEORDER_BIG_ENDIAN_H__ */
    47.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    47.2 +++ b/xen/include/xen/byteorder/generic.h	Wed Jan 17 21:31:22 2007 -0700
    47.3 @@ -0,0 +1,68 @@
    47.4 +#ifndef __XEN_BYTEORDER_GENERIC_H__
    47.5 +#define __XEN_BYTEORDER_GENERIC_H__
    47.6 +
    47.7 +/*
    47.8 + * Generic Byte-reordering support
    47.9 + *
   47.10 + * The "... p" macros, like le64_to_cpup, can be used with pointers
   47.11 + * to unaligned data, but there will be a performance penalty on 
   47.12 + * some architectures.  Use get_unaligned for unaligned data.
   47.13 + *
   47.14 + * The following macros are to be defined by <asm/byteorder.h>:
   47.15 + *
   47.16 + * Conversion of XX-bit integers (16- 32- or 64-)
   47.17 + * between native CPU format and little/big endian format
   47.18 + * 64-bit stuff only defined for proper architectures
   47.19 + *     cpu_to_[bl]eXX(__uXX x)
   47.20 + *     [bl]eXX_to_cpu(__uXX x)
   47.21 + *
   47.22 + * The same, but takes a pointer to the value to convert
   47.23 + *     cpu_to_[bl]eXXp(__uXX x)
   47.24 + *     [bl]eXX_to_cpup(__uXX x)
   47.25 + *
   47.26 + * The same, but change in situ
   47.27 + *     cpu_to_[bl]eXXs(__uXX x)
   47.28 + *     [bl]eXX_to_cpus(__uXX x)
   47.29 + *
   47.30 + * See asm-foo/byteorder.h for examples of how to provide
   47.31 + * architecture-optimized versions
   47.32 + */
   47.33 +
   47.34 +#define cpu_to_le64 __cpu_to_le64
   47.35 +#define le64_to_cpu __le64_to_cpu
   47.36 +#define cpu_to_le32 __cpu_to_le32
   47.37 +#define le32_to_cpu __le32_to_cpu
   47.38 +#define cpu_to_le16 __cpu_to_le16
   47.39 +#define le16_to_cpu __le16_to_cpu
   47.40 +#define cpu_to_be64 __cpu_to_be64
   47.41 +#define be64_to_cpu __be64_to_cpu
   47.42 +#define cpu_to_be32 __cpu_to_be32
   47.43 +#define be32_to_cpu __be32_to_cpu
   47.44 +#define cpu_to_be16 __cpu_to_be16
   47.45 +#define be16_to_cpu __be16_to_cpu
   47.46 +#define cpu_to_le64p __cpu_to_le64p
   47.47 +#define le64_to_cpup __le64_to_cpup
   47.48 +#define cpu_to_le32p __cpu_to_le32p
   47.49 +#define le32_to_cpup __le32_to_cpup
   47.50 +#define cpu_to_le16p __cpu_to_le16p
   47.51 +#define le16_to_cpup __le16_to_cpup
   47.52 +#define cpu_to_be64p __cpu_to_be64p
   47.53 +#define be64_to_cpup __be64_to_cpup
   47.54 +#define cpu_to_be32p __cpu_to_be32p
   47.55 +#define be32_to_cpup __be32_to_cpup
   47.56 +#define cpu_to_be16p __cpu_to_be16p
   47.57 +#define be16_to_cpup __be16_to_cpup
   47.58 +#define cpu_to_le64s __cpu_to_le64s
   47.59 +#define le64_to_cpus __le64_to_cpus
   47.60 +#define cpu_to_le32s __cpu_to_le32s
   47.61 +#define le32_to_cpus __le32_to_cpus
   47.62 +#define cpu_to_le16s __cpu_to_le16s
   47.63 +#define le16_to_cpus __le16_to_cpus
   47.64 +#define cpu_to_be64s __cpu_to_be64s
   47.65 +#define be64_to_cpus __be64_to_cpus
   47.66 +#define cpu_to_be32s __cpu_to_be32s
   47.67 +#define be32_to_cpus __be32_to_cpus
   47.68 +#define cpu_to_be16s __cpu_to_be16s
   47.69 +#define be16_to_cpus __be16_to_cpus
   47.70 +
   47.71 +#endif /* __XEN_BYTEORDER_GENERIC_H__ */
    48.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    48.2 +++ b/xen/include/xen/byteorder/little_endian.h	Wed Jan 17 21:31:22 2007 -0700
    48.3 @@ -0,0 +1,106 @@
    48.4 +#ifndef __XEN_BYTEORDER_LITTLE_ENDIAN_H__
    48.5 +#define __XEN_BYTEORDER_LITTLE_ENDIAN_H__
    48.6 +
    48.7 +#ifndef __LITTLE_ENDIAN
    48.8 +#define __LITTLE_ENDIAN 1234
    48.9 +#endif
   48.10 +#ifndef __LITTLE_ENDIAN_BITFIELD
   48.11 +#define __LITTLE_ENDIAN_BITFIELD
   48.12 +#endif
   48.13 +
   48.14 +#include <xen/types.h>
   48.15 +#include <xen/byteorder/swab.h>
   48.16 +
   48.17 +#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
   48.18 +#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
   48.19 +#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
   48.20 +#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
   48.21 +#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
   48.22 +#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
   48.23 +#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
   48.24 +#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
   48.25 +#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
   48.26 +#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
   48.27 +#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
   48.28 +#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
   48.29 +#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
   48.30 +#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
   48.31 +#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
   48.32 +#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x))
   48.33 +#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
   48.34 +#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
   48.35 +#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
   48.36 +#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
   48.37 +#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
   48.38 +#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
   48.39 +#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
   48.40 +#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
   48.41 +#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
   48.42 +#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
   48.43 +#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
   48.44 +#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
   48.45 +
   48.46 +static inline __le64 __cpu_to_le64p(const __u64 *p)
   48.47 +{
   48.48 +    return (__force __le64)*p;
   48.49 +}
   48.50 +static inline __u64 __le64_to_cpup(const __le64 *p)
   48.51 +{
   48.52 +    return (__force __u64)*p;
   48.53 +}
   48.54 +static inline __le32 __cpu_to_le32p(const __u32 *p)
   48.55 +{
   48.56 +    return (__force __le32)*p;
   48.57 +}
   48.58 +static inline __u32 __le32_to_cpup(const __le32 *p)
   48.59 +{
   48.60 +    return (__force __u32)*p;
   48.61 +}
   48.62 +static inline __le16 __cpu_to_le16p(const __u16 *p)
   48.63 +{
   48.64 +    return (__force __le16)*p;
   48.65 +}
   48.66 +static inline __u16 __le16_to_cpup(const __le16 *p)
   48.67 +{
   48.68 +    return (__force __u16)*p;
   48.69 +}
   48.70 +static inline __be64 __cpu_to_be64p(const __u64 *p)
   48.71 +{
   48.72 +    return (__force __be64)__swab64p(p);
   48.73 +}
   48.74 +static inline __u64 __be64_to_cpup(const __be64 *p)
   48.75 +{
   48.76 +    return __swab64p((__u64 *)p);
   48.77 +}
   48.78 +static inline __be32 __cpu_to_be32p(const __u32 *p)
   48.79 +{
   48.80 +    return (__force __be32)__swab32p(p);
   48.81 +}
   48.82 +static inline __u32 __be32_to_cpup(const __be32 *p)
   48.83 +{
   48.84 +    return __swab32p((__u32 *)p);
   48.85 +}
   48.86 +static inline __be16 __cpu_to_be16p(const __u16 *p)
   48.87 +{
   48.88 +    return (__force __be16)__swab16p(p);
   48.89 +}
   48.90 +static inline __u16 __be16_to_cpup(const __be16 *p)
   48.91 +{
   48.92 +    return __swab16p((__u16 *)p);
   48.93 +}
   48.94 +#define __cpu_to_le64s(x) do {} while (0)
   48.95 +#define __le64_to_cpus(x) do {} while (0)
   48.96 +#define __cpu_to_le32s(x) do {} while (0)
   48.97 +#define __le32_to_cpus(x) do {} while (0)
   48.98 +#define __cpu_to_le16s(x) do {} while (0)
   48.99 +#define __le16_to_cpus(x) do {} while (0)
  48.100 +#define __cpu_to_be64s(x) __swab64s((x))
  48.101 +#define __be64_to_cpus(x) __swab64s((x))
  48.102 +#define __cpu_to_be32s(x) __swab32s((x))
  48.103 +#define __be32_to_cpus(x) __swab32s((x))
  48.104 +#define __cpu_to_be16s(x) __swab16s((x))
  48.105 +#define __be16_to_cpus(x) __swab16s((x))
  48.106 +
  48.107 +#include <xen/byteorder/generic.h>
  48.108 +
  48.109 +#endif /* __XEN_BYTEORDER_LITTLE_ENDIAN_H__ */
    49.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    49.2 +++ b/xen/include/xen/byteorder/swab.h	Wed Jan 17 21:31:22 2007 -0700
    49.3 @@ -0,0 +1,185 @@
    49.4 +#ifndef __XEN_BYTEORDER_SWAB_H__
    49.5 +#define __XEN_BYTEORDER_SWAB_H__
    49.6 +
    49.7 +/*
    49.8 + * Byte-swapping, independently from CPU endianness
    49.9 + *     swabXX[ps]?(foo)
   49.10 + *
   49.11 + * Francois-Rene Rideau <fare@tunes.org> 19971205
   49.12 + *    separated swab functions from cpu_to_XX,
   49.13 + *    to clean up support for bizarre-endian architectures.
   49.14 + */
   49.15 +
   49.16 +#include <xen/compiler.h>
   49.17 +
   49.18 +/* casts are necessary for constants, because we never know how for sure
   49.19 + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
   49.20 + */
   49.21 +#define ___swab16(x)                                    \
   49.22 +({                                                      \
   49.23 +    __u16 __x = (x);                                    \
   49.24 +    ((__u16)(                                           \
   49.25 +        (((__u16)(__x) & (__u16)0x00ffU) << 8) |        \
   49.26 +        (((__u16)(__x) & (__u16)0xff00U) >> 8) ));      \
   49.27 +})
   49.28 +
   49.29 +#define ___swab32(x)                                            \
   49.30 +({                                                              \
   49.31 +    __u32 __x = (x);                                            \
   49.32 +    ((__u32)(                                                   \
   49.33 +        (((__u32)(__x) & (__u32)0x000000ffUL) << 24) |          \
   49.34 +        (((__u32)(__x) & (__u32)0x0000ff00UL) <<  8) |          \
   49.35 +        (((__u32)(__x) & (__u32)0x00ff0000UL) >>  8) |          \
   49.36 +        (((__u32)(__x) & (__u32)0xff000000UL) >> 24) ));        \
   49.37 +})
   49.38 +
   49.39 +#define ___swab64(x)                                                       \
   49.40 +({                                                                         \
   49.41 +    __u64 __x = (x);                                                       \
   49.42 +    ((__u64)(                                                              \
   49.43 +        (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) |     \
   49.44 +        (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) |     \
   49.45 +        (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) |     \
   49.46 +        (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) <<  8) |     \
   49.47 +            (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >>  8) | \
   49.48 +        (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) |     \
   49.49 +        (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) |     \
   49.50 +        (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) ));   \
   49.51 +})
   49.52 +
   49.53 +#define ___constant_swab16(x)                   \
   49.54 +    ((__u16)(                                   \
   49.55 +        (((__u16)(x) & (__u16)0x00ffU) << 8) |  \
   49.56 +        (((__u16)(x) & (__u16)0xff00U) >> 8) ))
   49.57 +#define ___constant_swab32(x)                           \
   49.58 +    ((__u32)(                                           \
   49.59 +        (((__u32)(x) & (__u32)0x000000ffUL) << 24) |    \
   49.60 +        (((__u32)(x) & (__u32)0x0000ff00UL) <<  8) |    \
   49.61 +        (((__u32)(x) & (__u32)0x00ff0000UL) >>  8) |    \
   49.62 +        (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
   49.63 +#define ___constant_swab64(x)                                            \
   49.64 +    ((__u64)(                                                            \
   49.65 +        (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) |     \
   49.66 +        (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) |     \
   49.67 +        (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) |     \
   49.68 +        (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) <<  8) |     \
   49.69 +            (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >>  8) | \
   49.70 +        (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) |     \
   49.71 +        (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) |     \
   49.72 +        (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
   49.73 +
   49.74 +/*
   49.75 + * provide defaults when no architecture-specific optimization is detected
   49.76 + */
   49.77 +#ifndef __arch__swab16
   49.78 +#  define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
   49.79 +#endif
   49.80 +#ifndef __arch__swab32
   49.81 +#  define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
   49.82 +#endif
   49.83 +#ifndef __arch__swab64
   49.84 +#  define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
   49.85 +#endif
   49.86 +
   49.87 +#ifndef __arch__swab16p
   49.88 +#  define __arch__swab16p(x) __arch__swab16(*(x))
   49.89 +#endif
   49.90 +#ifndef __arch__swab32p
   49.91 +#  define __arch__swab32p(x) __arch__swab32(*(x))
   49.92 +#endif
   49.93 +#ifndef __arch__swab64p
   49.94 +#  define __arch__swab64p(x) __arch__swab64(*(x))
   49.95 +#endif
   49.96 +
   49.97 +#ifndef __arch__swab16s
   49.98 +#  define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
   49.99 +#endif
  49.100 +#ifndef __arch__swab32s
  49.101 +#  define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
  49.102 +#endif
  49.103 +#ifndef __arch__swab64s
  49.104 +#  define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
  49.105 +#endif
  49.106 +
  49.107 +
  49.108 +/*
  49.109 + * Allow constant folding
  49.110 + */
  49.111 +#if defined(__GNUC__) && defined(__OPTIMIZE__)
  49.112 +#  define __swab16(x) \
  49.113 +(__builtin_constant_p((__u16)(x)) ? \
  49.114 + ___swab16((x)) : \
  49.115 + __fswab16((x)))
  49.116 +#  define __swab32(x) \
  49.117 +(__builtin_constant_p((__u32)(x)) ? \
  49.118 + ___swab32((x)) : \
  49.119 + __fswab32((x)))
  49.120 +#  define __swab64(x) \
  49.121 +(__builtin_constant_p((__u64)(x)) ? \
  49.122 + ___swab64((x)) : \
  49.123 + __fswab64((x)))
  49.124 +#else
  49.125 +#  define __swab16(x) __fswab16(x)
  49.126 +#  define __swab32(x) __fswab32(x)
  49.127 +#  define __swab64(x) __fswab64(x)
  49.128 +#endif /* OPTIMIZE */
  49.129 +
  49.130 +
  49.131 +static inline __attribute_const__ __u16 __fswab16(__u16 x)
  49.132 +{
  49.133 +    return __arch__swab16(x);
  49.134 +}
  49.135 +static inline __u16 __swab16p(const __u16 *x)
  49.136 +{
  49.137 +    return __arch__swab16p(x);
  49.138 +}
  49.139 +static inline void __swab16s(__u16 *addr)
  49.140 +{
  49.141 +    __arch__swab16s(addr);
  49.142 +}
  49.143 +
  49.144 +static inline __attribute_const__ __u32 __fswab32(__u32 x)
  49.145 +{
  49.146 +    return __arch__swab32(x);
  49.147 +}
  49.148 +static inline __u32 __swab32p(const __u32 *x)
  49.149 +{
  49.150 +    return __arch__swab32p(x);
  49.151 +}
  49.152 +static inline void __swab32s(__u32 *addr)
  49.153 +{
  49.154 +    __arch__swab32s(addr);
  49.155 +}
  49.156 +
  49.157 +#ifdef __BYTEORDER_HAS_U64__
  49.158 +static inline __attribute_const__ __u64 __fswab64(__u64 x)
  49.159 +{
  49.160 +#  ifdef __SWAB_64_THRU_32__
  49.161 +    __u32 h = x >> 32;
  49.162 +        __u32 l = x & ((1ULL<<32)-1);
  49.163 +        return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
  49.164 +#  else
  49.165 +    return __arch__swab64(x);
  49.166 +#  endif
  49.167 +}
  49.168 +static inline __u64 __swab64p(const __u64 *x)
  49.169 +{
  49.170 +    return __arch__swab64p(x);
  49.171 +}
  49.172 +static inline void __swab64s(__u64 *addr)
  49.173 +{
  49.174 +    __arch__swab64s(addr);
  49.175 +}
  49.176 +#endif /* __BYTEORDER_HAS_U64__ */
  49.177 +
  49.178 +#define swab16 __swab16
  49.179 +#define swab32 __swab32
  49.180 +#define swab64 __swab64
  49.181 +#define swab16p __swab16p
  49.182 +#define swab32p __swab32p
  49.183 +#define swab64p __swab64p
  49.184 +#define swab16s __swab16s
  49.185 +#define swab32s __swab32s
  49.186 +#define swab64s __swab64s
  49.187 +
  49.188 +#endif /* __XEN_BYTEORDER_SWAB_H__ */
    50.1 --- a/xen/include/xen/config.h	Wed Jan 17 19:55:48 2007 -0700
    50.2 +++ b/xen/include/xen/config.h	Wed Jan 17 21:31:22 2007 -0700
    50.3 @@ -63,6 +63,8 @@
    50.4  /* Linux 'checker' project. */
    50.5  #define __iomem
    50.6  #define __user
    50.7 +#define __force
    50.8 +#define __bitwise
    50.9  
   50.10  #ifndef __ASSEMBLY__
   50.11  
    51.1 --- a/xen/include/xen/types.h	Wed Jan 17 19:55:48 2007 -0700
    51.2 +++ b/xen/include/xen/types.h	Wed Jan 17 21:31:22 2007 -0700
    51.3 @@ -51,4 +51,11 @@ typedef         __s64           int64_t;
    51.4  struct domain;
    51.5  struct vcpu;
    51.6  
    51.7 +typedef __u16 __le16;
    51.8 +typedef __u16 __be16;
    51.9 +typedef __u32 __le32;
   51.10 +typedef __u32 __be32;
   51.11 +typedef __u64 __le64;
   51.12 +typedef __u64 __be64;
   51.13 +
   51.14  #endif /* __TYPES_H__ */