ia64/xen-unstable

changeset 3088:7ef582b6c9c4

bitkeeper revision 1.1159.183.6 (41a27165T3EZZHjH8ZvXRFHh8WInHQ)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-2.0-testing.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
author iap10@labyrinth.cl.cam.ac.uk
date Mon Nov 22 23:08:21 2004 +0000 (2004-11-22)
parents 3cfac953da9e f0d6fa2867c5
children f068615fc588
files .rootkeys linux-2.6.9-xen-sparse/Makefile xen/arch/ia64/privop.c xen/arch/ia64/process.c xen/arch/ia64/regionreg.c xen/arch/ia64/vcpu.c xen/arch/ia64/xenasm.S
line diff
     1.1 --- a/.rootkeys	Mon Nov 22 22:41:45 2004 +0000
     1.2 +++ b/.rootkeys	Mon Nov 22 23:08:21 2004 +0000
     1.3 @@ -127,6 +127,7 @@ 409ba2e7akOFqQUg6Qyg2s28xcXiMg linux-2.4
     1.4  3e5a4e683HKVU-sxtagrDasRB8eBVw linux-2.4.27-xen-sparse/mm/swapfile.c
     1.5  41180721bNns9Na7w1nJ0ZVt8bhUNA linux-2.4.27-xen-sparse/mm/vmalloc.c
     1.6  41505c57WAd5l1rlfCLNSCpx9J13vA linux-2.4.27-xen-sparse/net/core/skbuff.c
     1.7 +41a270bfmh73b5G5UMWfoUZhxoIjTg linux-2.6.9-xen-sparse/Makefile
     1.8  40f562372u3A7_kfbYYixPHJJxYUxA linux-2.6.9-xen-sparse/arch/xen/Kconfig
     1.9  40f56237utH41NPukqHksuNf29IC9A linux-2.6.9-xen-sparse/arch/xen/Kconfig.drivers
    1.10  40f56237penAAlWVBVDpeQZNFIg8CA linux-2.6.9-xen-sparse/arch/xen/Makefile
    1.11 @@ -640,6 +641,11 @@ 40e9808eHXvs_5eggj9McD_J90mhNw tools/xfr
    1.12  3f72f1bdJPsV3JCnBqs9ddL9tr6D2g xen/COPYING
    1.13  3ddb79bcbOVHh38VJzc97-JEGD4dJQ xen/Makefile
    1.14  3ddb79bcWnTwYsQRWl_PaneJfa6p0w xen/Rules.mk
    1.15 +41a26ebcqaSGVQ8qTMwpPwOJSJ7qSw xen/arch/ia64/privop.c
    1.16 +41a26ebc4BOHDUsT0TSnryPeV2xfRA xen/arch/ia64/process.c
    1.17 +41a26ebcJ30TFl1v2kR8rqpEBvOtVw xen/arch/ia64/regionreg.c
    1.18 +41a26ebc--sjlYZQxmIxyCx3jw70qA xen/arch/ia64/vcpu.c
    1.19 +41a26ebc4jSBGQOuyNIPDST58mNbBw xen/arch/ia64/xenasm.S
    1.20  3ddb79bcZbRBzT3elFWSX7u6NtMagQ xen/arch/x86/Makefile
    1.21  3ddb79bcBQF85CfLS4i1WGZ4oLLaCA xen/arch/x86/Rules.mk
    1.22  3e5636e5FAYZ5_vQnmgwFJfSdmO5Mw xen/arch/x86/acpi.c
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/linux-2.6.9-xen-sparse/Makefile	Mon Nov 22 23:08:21 2004 +0000
     2.3 @@ -0,0 +1,1293 @@
     2.4 +VERSION = 2
     2.5 +PATCHLEVEL = 6
     2.6 +SUBLEVEL = 9
     2.7 +EXTRAVERSION =
     2.8 +NAME=Zonked Quokka
     2.9 +
    2.10 +# *DOCUMENTATION*
    2.11 +# To see a list of typical targets execute "make help"
    2.12 +# More info can be located in ./README
    2.13 +# Comments in this file are targeted only to the developer, do not
    2.14 +# expect to learn how to build the kernel reading this file.
    2.15 +
    2.16 +# Do not print "Entering directory ..."
    2.17 +MAKEFLAGS += --no-print-directory
    2.18 +
    2.19 +# We are using a recursive build, so we need to do a little thinking
    2.20 +# to get the ordering right.
    2.21 +#
    2.22 +# Most importantly: sub-Makefiles should only ever modify files in
    2.23 +# their own directory. If in some directory we have a dependency on
    2.24 +# a file in another dir (which doesn't happen often, but it's of
    2.25 +# unavoidable when linking the built-in.o targets which finally
    2.26 +# turn into vmlinux), we will call a sub make in that other dir, and
    2.27 +# after that we are sure that everything which is in that other dir
    2.28 +# is now up to date.
    2.29 +#
    2.30 +# The only cases where we need to modify files which have global
    2.31 +# effects are thus separated out and done before the recursive
    2.32 +# descending is started. They are now explicitly listed as the
    2.33 +# prepare rule.
    2.34 +
    2.35 +# To put more focus on warnings, be less verbose as default
    2.36 +# Use 'make V=1' to see the full commands
    2.37 +
    2.38 +ifdef V
    2.39 +  ifeq ("$(origin V)", "command line")
    2.40 +    KBUILD_VERBOSE = $(V)
    2.41 +  endif
    2.42 +endif
    2.43 +ifndef KBUILD_VERBOSE
    2.44 +  KBUILD_VERBOSE = 0
    2.45 +endif
    2.46 +
    2.47 +# Call sparse as part of compilation of C files
    2.48 +# Use 'make C=1' to enable sparse checking
    2.49 +
    2.50 +ifdef C
    2.51 +  ifeq ("$(origin C)", "command line")
    2.52 +    KBUILD_CHECKSRC = $(C)
    2.53 +  endif
    2.54 +endif
    2.55 +ifndef KBUILD_CHECKSRC
    2.56 +  KBUILD_CHECKSRC = 0
    2.57 +endif
    2.58 +
    2.59 +# Use make M=dir to specify directory of external module to build
    2.60 +# Old syntax make ... SUBDIRS=$PWD is still supported
    2.61 +# Setting the environment variable KBUILD_EXTMOD take precedence
    2.62 +ifdef SUBDIRS
    2.63 +  KBUILD_EXTMOD ?= $(SUBDIRS)
    2.64 +endif
    2.65 +ifdef M
    2.66 +  ifeq ("$(origin M)", "command line")
    2.67 +    KBUILD_EXTMOD := $(M)
    2.68 +  endif
    2.69 +endif
    2.70 +
    2.71 +
    2.72 +# kbuild supports saving output files in a separate directory.
    2.73 +# To locate output files in a separate directory two syntax'es are supported.
    2.74 +# In both cases the working directory must be the root of the kernel src.
    2.75 +# 1) O=
    2.76 +# Use "make O=dir/to/store/output/files/"
    2.77 +# 
    2.78 +# 2) Set KBUILD_OUTPUT
    2.79 +# Set the environment variable KBUILD_OUTPUT to point to the directory
    2.80 +# where the output files shall be placed.
    2.81 +# export KBUILD_OUTPUT=dir/to/store/output/files/
    2.82 +# make
    2.83 +#
    2.84 +# The O= assigment takes precedence over the KBUILD_OUTPUT environment variable.
    2.85 +
    2.86 +
    2.87 +# KBUILD_SRC is set on invocation of make in OBJ directory
    2.88 +# KBUILD_SRC is not intended to be used by the regular user (for now)
    2.89 +ifeq ($(KBUILD_SRC),)
    2.90 +
    2.91 +# OK, Make called in directory where kernel src resides
    2.92 +# Do we want to locate output files in a separate directory?
    2.93 +ifdef O
    2.94 +  ifeq ("$(origin O)", "command line")
    2.95 +    KBUILD_OUTPUT := $(O)
    2.96 +  endif
    2.97 +endif
    2.98 +
    2.99 +# That's our default target when none is given on the command line
   2.100 +.PHONY: _all
   2.101 +_all:
   2.102 +
   2.103 +ifneq ($(KBUILD_OUTPUT),)
   2.104 +# Invoke a second make in the output directory, passing relevant variables
   2.105 +# check that the output directory actually exists
   2.106 +saved-output := $(KBUILD_OUTPUT)
   2.107 +KBUILD_OUTPUT := $(shell cd $(KBUILD_OUTPUT) && /bin/pwd)
   2.108 +$(if $(KBUILD_OUTPUT),, \
   2.109 +     $(error output directory "$(saved-output)" does not exist))
   2.110 +
   2.111 +.PHONY: $(MAKECMDGOALS)
   2.112 +
   2.113 +$(filter-out _all,$(MAKECMDGOALS)) _all:
   2.114 +	$(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT)		\
   2.115 +	KBUILD_SRC=$(CURDIR)	     KBUILD_VERBOSE=$(KBUILD_VERBOSE)	\
   2.116 +	KBUILD_CHECK=$(KBUILD_CHECK) KBUILD_EXTMOD="$(KBUILD_EXTMOD)"	\
   2.117 +        -f $(CURDIR)/Makefile $@
   2.118 +
   2.119 +# Leave processing to above invocation of make
   2.120 +skip-makefile := 1
   2.121 +endif # ifneq ($(KBUILD_OUTPUT),)
   2.122 +endif # ifeq ($(KBUILD_SRC),)
   2.123 +
   2.124 +# We process the rest of the Makefile if this is the final invocation of make
   2.125 +ifeq ($(skip-makefile),)
   2.126 +
   2.127 +# If building an external module we do not care about the all: rule
   2.128 +# but instead _all depend on modules
   2.129 +.PHONY: all
   2.130 +ifeq ($(KBUILD_EXTMOD),)
   2.131 +_all: all
   2.132 +else
   2.133 +_all: modules
   2.134 +endif
   2.135 +
   2.136 +srctree		:= $(if $(KBUILD_SRC),$(KBUILD_SRC),$(CURDIR))
   2.137 +TOPDIR		:= $(srctree)
   2.138 +# FIXME - TOPDIR is obsolete, use srctree/objtree
   2.139 +objtree		:= $(CURDIR)
   2.140 +src		:= $(srctree)
   2.141 +obj		:= $(objtree)
   2.142 +
   2.143 +VPATH		:= $(srctree)
   2.144 +
   2.145 +export srctree objtree VPATH TOPDIR
   2.146 +
   2.147 +nullstring :=
   2.148 +space      := $(nullstring) # end of line
   2.149 +
   2.150 +# Take the contents of any files called localversion* and the config
   2.151 +# variable CONFIG_LOCALVERSION and append them to KERNELRELEASE. Be
   2.152 +# careful not to include files twice if building in the source
   2.153 +# directory. LOCALVERSION from the command line override all of this
   2.154 +
   2.155 +ifeq ($(objtree),$(srctree))
   2.156 +localversion-files := $(wildcard $(srctree)/localversion*)
   2.157 +else
   2.158 +localversion-files := $(wildcard $(objtree)/localversion* $(srctree)/localversion*)
   2.159 +endif
   2.160 +
   2.161 +LOCALVERSION = $(subst $(space),, \
   2.162 +	       $(shell cat /dev/null $(localversion-files)) \
   2.163 +	       $(subst ",,$(CONFIG_LOCALVERSION)))
   2.164 +
   2.165 +KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)$(LOCALVERSION)
   2.166 +
   2.167 +# SUBARCH tells the usermode build what the underlying arch is.  That is set
   2.168 +# first, and if a usermode build is happening, the "ARCH=um" on the command
   2.169 +# line overrides the setting of ARCH below.  If a native build is happening,
   2.170 +# then ARCH is assigned, getting whatever value it gets normally, and 
   2.171 +# SUBARCH is subsequently ignored.
   2.172 +
   2.173 +SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
   2.174 +				  -e s/arm.*/arm/ -e s/sa110/arm/ \
   2.175 +				  -e s/s390x/s390/ -e s/parisc64/parisc/ )
   2.176 +
   2.177 +# Cross compiling and selecting different set of gcc/bin-utils
   2.178 +# ---------------------------------------------------------------------------
   2.179 +#
   2.180 +# When performing cross compilation for other architectures ARCH shall be set
   2.181 +# to the target architecture. (See arch/* for the possibilities).
   2.182 +# ARCH can be set during invocation of make:
   2.183 +# make ARCH=ia64
   2.184 +# Another way is to have ARCH set in the environment.
   2.185 +# The default ARCH is the host where make is executed.
   2.186 +
   2.187 +# CROSS_COMPILE specify the prefix used for all executables used
   2.188 +# during compilation. Only gcc and related bin-utils executables
   2.189 +# are prefixed with $(CROSS_COMPILE).
   2.190 +# CROSS_COMPILE can be set on the command line
   2.191 +# make CROSS_COMPILE=ia64-linux-
   2.192 +# Alternatively CROSS_COMPILE can be set in the environment.
   2.193 +# Default value for CROSS_COMPILE is not to prefix executables
   2.194 +# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
   2.195 +
   2.196 +ARCH		?= $(SUBARCH)
   2.197 +CROSS_COMPILE	?=
   2.198 +
   2.199 +# Architecture as present in compile.h
   2.200 +UTS_MACHINE := $(ARCH)
   2.201 +
   2.202 +# SHELL used by kbuild
   2.203 +CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
   2.204 +	  else if [ -x /bin/bash ]; then echo /bin/bash; \
   2.205 +	  else echo sh; fi ; fi)
   2.206 +
   2.207 +HOSTCC  	= gcc
   2.208 +HOSTCXX  	= g++
   2.209 +HOSTCFLAGS	= -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
   2.210 +HOSTCXXFLAGS	= -O2
   2.211 +
   2.212 +# 	Decide whether to build built-in, modular, or both.
   2.213 +#	Normally, just do built-in.
   2.214 +
   2.215 +KBUILD_MODULES :=
   2.216 +KBUILD_BUILTIN := 1
   2.217 +
   2.218 +#	If we have only "make modules", don't compile built-in objects.
   2.219 +#	When we're building modules with modversions, we need to consider
   2.220 +#	the built-in objects during the descend as well, in order to
   2.221 +#	make sure the checksums are uptodate before we record them.
   2.222 +
   2.223 +ifeq ($(MAKECMDGOALS),modules)
   2.224 +  KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
   2.225 +endif
   2.226 +
   2.227 +#	If we have "make <whatever> modules", compile modules
   2.228 +#	in addition to whatever we do anyway.
   2.229 +#	Just "make" or "make all" shall build modules as well
   2.230 +
   2.231 +ifneq ($(filter all _all modules,$(MAKECMDGOALS)),)
   2.232 +  KBUILD_MODULES := 1
   2.233 +endif
   2.234 +
   2.235 +ifeq ($(MAKECMDGOALS),)
   2.236 +  KBUILD_MODULES := 1
   2.237 +endif
   2.238 +
   2.239 +export KBUILD_MODULES KBUILD_BUILTIN KBUILD_VERBOSE
   2.240 +export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
   2.241 +
   2.242 +# Beautify output
   2.243 +# ---------------------------------------------------------------------------
   2.244 +#
   2.245 +# Normally, we echo the whole command before executing it. By making
   2.246 +# that echo $($(quiet)$(cmd)), we now have the possibility to set
   2.247 +# $(quiet) to choose other forms of output instead, e.g.
   2.248 +#
   2.249 +#         quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
   2.250 +#         cmd_cc_o_c       = $(CC) $(c_flags) -c -o $@ $<
   2.251 +#
   2.252 +# If $(quiet) is empty, the whole command will be printed.
   2.253 +# If it is set to "quiet_", only the short version will be printed. 
   2.254 +# If it is set to "silent_", nothing wil be printed at all, since
   2.255 +# the variable $(silent_cmd_cc_o_c) doesn't exist.
   2.256 +#
   2.257 +# A simple variant is to prefix commands with $(Q) - that's usefull
   2.258 +# for commands that shall be hidden in non-verbose mode.
   2.259 +#
   2.260 +#	$(Q)ln $@ :<
   2.261 +#
   2.262 +# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
   2.263 +# If KBUILD_VERBOSE equals 1 then the above command is displayed.
   2.264 +
   2.265 +ifeq ($(KBUILD_VERBOSE),1)
   2.266 +  quiet =
   2.267 +  Q =
   2.268 +else
   2.269 +  quiet=quiet_
   2.270 +  Q = @
   2.271 +endif
   2.272 +
   2.273 +# If the user is running make -s (silent mode), suppress echoing of
   2.274 +# commands
   2.275 +
   2.276 +ifneq ($(findstring s,$(MAKEFLAGS)),)
   2.277 +  quiet=silent_
   2.278 +endif
   2.279 +
   2.280 +export quiet Q KBUILD_VERBOSE
   2.281 +
   2.282 +######
   2.283 +# cc support functions to be used (only) in arch/$(ARCH)/Makefile
   2.284 +# See documentation in Documentation/kbuild/makefiles.txt
   2.285 +
   2.286 +# cc-option
   2.287 +# Usage: cflags-y += $(call gcc-option, -march=winchip-c6, -march=i586)
   2.288 +
   2.289 +cc-option = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \
   2.290 +             > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi ;)
   2.291 +
   2.292 +# For backward compatibility
   2.293 +check_gcc = $(warning check_gcc is deprecated - use cc-option) \
   2.294 +            $(call cc-option, $(1),$(2))
   2.295 +
   2.296 +# cc-option-yn
   2.297 +# Usage: flag := $(call gcc-option-yn, -march=winchip-c6)
   2.298 +cc-option-yn = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \
   2.299 +                > /dev/null 2>&1; then echo "y"; else echo "n"; fi;)
   2.300 +
   2.301 +# cc-version
   2.302 +# Usage gcc-ver := $(call cc-version $(CC))
   2.303 +cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh \
   2.304 +              $(if $(1), $(1), $(CC)))
   2.305 +
   2.306 +
   2.307 +# Look for make include files relative to root of kernel src
   2.308 +MAKEFLAGS += --include-dir=$(srctree)
   2.309 +
   2.310 +# For maximum performance (+ possibly random breakage, uncomment
   2.311 +# the following)
   2.312 +
   2.313 +#MAKEFLAGS += -rR
   2.314 +
   2.315 +# Make variables (CC, etc...)
   2.316 +
   2.317 +AS		= $(CROSS_COMPILE)as
   2.318 +LD		= $(CROSS_COMPILE)ld
   2.319 +CC		= $(CROSS_COMPILE)gcc
   2.320 +CPP		= $(CC) -E
   2.321 +AR		= $(CROSS_COMPILE)ar
   2.322 +NM		= $(CROSS_COMPILE)nm
   2.323 +STRIP		= $(CROSS_COMPILE)strip
   2.324 +OBJCOPY		= $(CROSS_COMPILE)objcopy
   2.325 +OBJDUMP		= $(CROSS_COMPILE)objdump
   2.326 +AWK		= awk
   2.327 +GENKSYMS	= scripts/genksyms/genksyms
   2.328 +DEPMOD		= /sbin/depmod
   2.329 +KALLSYMS	= scripts/kallsyms
   2.330 +PERL		= perl
   2.331 +CHECK		= sparse
   2.332 +CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__
   2.333 +MODFLAGS	= -DMODULE
   2.334 +CFLAGS_MODULE   = $(MODFLAGS)
   2.335 +AFLAGS_MODULE   = $(MODFLAGS)
   2.336 +LDFLAGS_MODULE  = -r
   2.337 +CFLAGS_KERNEL	=
   2.338 +AFLAGS_KERNEL	=
   2.339 +
   2.340 +NOSTDINC_FLAGS  = -nostdinc -iwithprefix include
   2.341 +
   2.342 +# Use LINUXINCLUDE when you must reference the include/ directory.
   2.343 +# Needed to be compatible with the O= option
   2.344 +LINUXINCLUDE    := -Iinclude \
   2.345 +                   $(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include)
   2.346 +
   2.347 +CPPFLAGS        := -D__KERNEL__ $(LINUXINCLUDE)
   2.348 +
   2.349 +CFLAGS 		:= -Wall -Wstrict-prototypes -Wno-trigraphs \
   2.350 +	  	   -fno-strict-aliasing -fno-common
   2.351 +AFLAGS		:= -D__ASSEMBLY__
   2.352 +
   2.353 +export	VERSION PATCHLEVEL SUBLEVEL EXTRAVERSION LOCALVERSION KERNELRELEASE \
   2.354 +	ARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \
   2.355 +	CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE \
   2.356 +	HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
   2.357 +
   2.358 +export CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
   2.359 +export CFLAGS CFLAGS_KERNEL CFLAGS_MODULE 
   2.360 +export AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
   2.361 +
   2.362 +# When compiling out-of-tree modules, put MODVERDIR in the module
   2.363 +# tree rather than in the kernel tree. The kernel tree might
   2.364 +# even be read-only.
   2.365 +export MODVERDIR := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_versions
   2.366 +
   2.367 +# The temporary file to save gcc -MD generated dependencies must not
   2.368 +# contain a comma
   2.369 +comma := ,
   2.370 +depfile = $(subst $(comma),_,$(@D)/.$(@F).d)
   2.371 +
   2.372 +# Files to ignore in find ... statements
   2.373 +
   2.374 +RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS -o -name .pc \) -prune -o
   2.375 +RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc
   2.376 +
   2.377 +# ===========================================================================
   2.378 +# Rules shared between *config targets and build targets
   2.379 +
   2.380 +# Basic helpers built in scripts/
   2.381 +.PHONY: scripts_basic
   2.382 +scripts_basic:
   2.383 +	$(Q)$(MAKE) $(build)=scripts/basic
   2.384 +
   2.385 +# To make sure we do not include .config for any of the *config targets
   2.386 +# catch them early, and hand them over to scripts/kconfig/Makefile
   2.387 +# It is allowed to specify more targets when calling make, including
   2.388 +# mixing *config targets and build targets.
   2.389 +# For example 'make oldconfig all'. 
   2.390 +# Detect when mixed targets is specified, and make a second invocation
   2.391 +# of make so .config is not included in this case either (for *config).
   2.392 +
   2.393 +no-dot-config-targets := clean mrproper distclean \
   2.394 +			 cscope TAGS tags help %docs check%
   2.395 +
   2.396 +config-targets := 0
   2.397 +mixed-targets  := 0
   2.398 +dot-config     := 1
   2.399 +
   2.400 +ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
   2.401 +	ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
   2.402 +		dot-config := 0
   2.403 +	endif
   2.404 +endif
   2.405 +
   2.406 +ifeq ($(KBUILD_EXTMOD),)
   2.407 +        ifneq ($(filter config %config,$(MAKECMDGOALS)),)
   2.408 +                config-targets := 1
   2.409 +                ifneq ($(filter-out config %config,$(MAKECMDGOALS)),)
   2.410 +                        mixed-targets := 1
   2.411 +                endif
   2.412 +        endif
   2.413 +endif
   2.414 +
   2.415 +ifeq ($(mixed-targets),1)
   2.416 +# ===========================================================================
   2.417 +# We're called with mixed targets (*config and build targets).
   2.418 +# Handle them one by one.
   2.419 +
   2.420 +%:: FORCE
   2.421 +	$(Q)$(MAKE) -C $(srctree) KBUILD_SRC= $@
   2.422 +
   2.423 +else
   2.424 +ifeq ($(config-targets),1)
   2.425 +# ===========================================================================
   2.426 +# *config targets only - make sure prerequisites are updated, and descend
   2.427 +# in scripts/kconfig to make the *config target
   2.428 +
   2.429 +config: scripts_basic FORCE
   2.430 +	$(Q)$(MAKE) $(build)=scripts/kconfig $@
   2.431 +%config: scripts_basic FORCE
   2.432 +	$(Q)$(MAKE) $(build)=scripts/kconfig $@
   2.433 +
   2.434 +else
   2.435 +# ===========================================================================
   2.436 +# Build targets only - this includes vmlinux, arch specific targets, clean
   2.437 +# targets and others. In general all targets except *config targets.
   2.438 +
   2.439 +ifeq ($(KBUILD_EXTMOD),)
   2.440 +# Additional helpers built in scripts/
   2.441 +# Carefully list dependencies so we do not try to build scripts twice
   2.442 +# in parrallel
   2.443 +.PHONY: scripts
   2.444 +scripts: scripts_basic include/config/MARKER
   2.445 +	$(Q)$(MAKE) $(build)=$(@)
   2.446 +
   2.447 +scripts_basic: include/linux/autoconf.h
   2.448 +
   2.449 +# Objects we will link into vmlinux / subdirs we need to visit
   2.450 +init-y		:= init/
   2.451 +drivers-y	:= drivers/ sound/
   2.452 +net-y		:= net/
   2.453 +libs-y		:= lib/
   2.454 +core-y		:= usr/
   2.455 +endif # KBUILD_EXTMOD
   2.456 +
   2.457 +ifeq ($(dot-config),1)
   2.458 +# In this section, we need .config
   2.459 +
   2.460 +# Read in dependencies to all Kconfig* files, make sure to run
   2.461 +# oldconfig if changes are detected.
   2.462 +-include .config.cmd
   2.463 +
   2.464 +include .config
   2.465 +
   2.466 +# If .config needs to be updated, it will be done via the dependency
   2.467 +# that autoconf has on .config.
   2.468 +# To avoid any implicit rule to kick in, define an empty command
   2.469 +.config: ;
   2.470 +
   2.471 +# If .config is newer than include/linux/autoconf.h, someone tinkered
   2.472 +# with it and forgot to run make oldconfig
   2.473 +include/linux/autoconf.h: .config
   2.474 +	$(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
   2.475 +else
   2.476 +# Dummy target needed, because used as prerequisite
   2.477 +include/linux/autoconf.h: ;
   2.478 +endif
   2.479 +
   2.480 +# The all: target is the default when no target is given on the
   2.481 +# command line.
   2.482 +# This allow a user to issue only 'make' to build a kernel including modules
   2.483 +# Defaults vmlinux but it is usually overriden in the arch makefile
   2.484 +all: vmlinux
   2.485 +
   2.486 +ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
   2.487 +CFLAGS		+= -Os
   2.488 +else
   2.489 +CFLAGS		+= -O2
   2.490 +endif
   2.491 +
   2.492 +ifndef CONFIG_FRAME_POINTER
   2.493 +CFLAGS		+= -fomit-frame-pointer
   2.494 +endif
   2.495 +
   2.496 +ifdef CONFIG_DEBUG_INFO
   2.497 +CFLAGS		+= -g
   2.498 +endif
   2.499 +
   2.500 +# warn about C99 declaration after statement
   2.501 +CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
   2.502 +
   2.503 +include $(srctree)/arch/$(ARCH)/Makefile
   2.504 +
   2.505 +# Default kernel image to build when no specific target is given.
   2.506 +# KBUILD_IMAGE may be overruled on the commandline or
   2.507 +# set in the environment
   2.508 +# Also any assingments in arch/$(ARCH)/Makefiel take precedence over
   2.509 +# this default value
   2.510 +export KBUILD_IMAGE ?= vmlinux
   2.511 +
   2.512 +#
   2.513 +# INSTALL_PATH specifies where to place the updated kernel and system map
   2.514 +# images.  Uncomment if you want to place them anywhere other than root.
   2.515 +#
   2.516 +
   2.517 +#export	INSTALL_PATH=/boot
   2.518 +
   2.519 +#
   2.520 +# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory
   2.521 +# relocations required by build roots.  This is not defined in the
   2.522 +# makefile but the arguement can be passed to make if needed.
   2.523 +#
   2.524 +
   2.525 +MODLIB	:= $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE)
   2.526 +export MODLIB
   2.527 +
   2.528 +
   2.529 +ifeq ($(KBUILD_EXTMOD),)
   2.530 +core-y		+= kernel/ mm/ fs/ ipc/ security/ crypto/
   2.531 +
   2.532 +vmlinux-dirs	:= $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
   2.533 +		     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
   2.534 +		     $(net-y) $(net-m) $(libs-y) $(libs-m)))
   2.535 +
   2.536 +vmlinux-alldirs	:= $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \
   2.537 +		     $(init-n) $(init-) \
   2.538 +		     $(core-n) $(core-) $(drivers-n) $(drivers-) \
   2.539 +		     $(net-n)  $(net-)  $(libs-n)    $(libs-))))
   2.540 +
   2.541 +init-y		:= $(patsubst %/, %/built-in.o, $(init-y))
   2.542 +core-y		:= $(patsubst %/, %/built-in.o, $(core-y))
   2.543 +drivers-y	:= $(patsubst %/, %/built-in.o, $(drivers-y))
   2.544 +net-y		:= $(patsubst %/, %/built-in.o, $(net-y))
   2.545 +libs-y1		:= $(patsubst %/, %/lib.a, $(libs-y))
   2.546 +libs-y2		:= $(patsubst %/, %/built-in.o, $(libs-y))
   2.547 +libs-y		:= $(libs-y1) $(libs-y2)
   2.548 +
   2.549 +# Build vmlinux
   2.550 +# ---------------------------------------------------------------------------
   2.551 +# vmlinux is build from the objects selected by $(vmlinux-init) and
   2.552 +# $(vmlinux-main). Most are built-in.o files from top-level directories
   2.553 +# in the kernel tree, others are specified in arch/$(ARCH)Makefile.
   2.554 +# Ordering when linking is important, and $(vmlinux-init) must be first.
   2.555 +#
   2.556 +# vmlinux
   2.557 +#   ^
   2.558 +#   |
   2.559 +#   +-< $(vmlinux-init)
   2.560 +#   |   +--< init/version.o + more
   2.561 +#   |
   2.562 +#   +--< $(vmlinux-main)
   2.563 +#   |    +--< driver/built-in.o mm/built-in.o + more
   2.564 +#   |
   2.565 +#   +-< kallsyms.o (see description in CONFIG_KALLSYMS section)
   2.566 +#
   2.567 +# vmlinux version (uname -v) cannot be updated during normal
   2.568 +# descending-into-subdirs phase since we do not yet know if we need to
   2.569 +# update vmlinux.
   2.570 +# Therefore this step is delayed until just before final link of vmlinux -
   2.571 +# except in the kallsyms case where it is done just before adding the
   2.572 +# symbols to the kernel.
   2.573 +#
   2.574 +# System.map is generated to document addresses of all kernel symbols
   2.575 +
   2.576 +vmlinux-init := $(head-y) $(init-y)
   2.577 +vmlinux-main := $(core-y) $(libs-y) $(drivers-y) $(net-y)
   2.578 +vmlinux-all  := $(vmlinux-init) $(vmlinux-main)
   2.579 +vmlinux-lds  := arch/$(ARCH)/kernel/vmlinux.lds
   2.580 +
   2.581 +# Rule to link vmlinux - also used during CONFIG_KALLSYMS
   2.582 +# May be overridden by arch/$(ARCH)/Makefile
   2.583 +quiet_cmd_vmlinux__ ?= LD      $@
   2.584 +      cmd_vmlinux__ ?= $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) -o $@ \
   2.585 +      -T $(vmlinux-lds) $(vmlinux-init)                          \
   2.586 +      --start-group $(vmlinux-main) --end-group                  \
   2.587 +      $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) FORCE ,$^)
   2.588 +
   2.589 +# Generate new vmlinux version
   2.590 +quiet_cmd_vmlinux_version = GEN     .version
   2.591 +      cmd_vmlinux_version = set -e;                     \
   2.592 +	. $(srctree)/scripts/mkversion > .tmp_version;	\
   2.593 +	mv -f .tmp_version .version;			\
   2.594 +	$(MAKE) $(build)=init
   2.595 +
   2.596 +# Generate System.map
   2.597 +quiet_cmd_sysmap = SYSMAP 
   2.598 +      cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
   2.599 +
   2.600 +# Link of vmlinux
   2.601 +# If CONFIG_KALLSYMS is set .version is already updated
   2.602 +# Generate System.map and verify that the content is consistent
   2.603 +
   2.604 +define rule_vmlinux__
   2.605 +	$(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlinux_version))
   2.606 +
   2.607 +	$(call cmd,vmlinux__)
   2.608 +	$(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
   2.609 +
   2.610 +	$(Q)$(if $($(quiet)cmd_sysmap),                 \
   2.611 +	  echo '  $($(quiet)cmd_sysmap) System.map' &&) \
   2.612 +	$(cmd_sysmap) $@ System.map;                    \
   2.613 +	if [ $$? -ne 0 ]; then                          \
   2.614 +		rm -f $@;                               \
   2.615 +		/bin/false;                             \
   2.616 +	fi;
   2.617 +	$(verify_kallsyms)
   2.618 +endef
   2.619 +
   2.620 +
   2.621 +ifdef CONFIG_KALLSYMS
   2.622 +# Generate section listing all symbols and add it into vmlinux $(kallsyms.o)
   2.623 +# It's a three stage process:
   2.624 +# o .tmp_vmlinux1 has all symbols and sections, but __kallsyms is
   2.625 +#   empty
   2.626 +#   Running kallsyms on that gives us .tmp_kallsyms1.o with
   2.627 +#   the right size - vmlinux version (uname -v) is updated during this step
   2.628 +# o .tmp_vmlinux2 now has a __kallsyms section of the right size,
   2.629 +#   but due to the added section, some addresses have shifted.
   2.630 +#   From here, we generate a correct .tmp_kallsyms2.o
   2.631 +# o The correct .tmp_kallsyms2.o is linked into the final vmlinux.
   2.632 +# o Verify that the System.map from vmlinux matches the map from
   2.633 +#   .tmp_vmlinux2, just in case we did not generate kallsyms correctly.
   2.634 +# o If CONFIG_KALLSYMS_EXTRA_PASS is set, do an extra pass using
   2.635 +#   .tmp_vmlinux3 and .tmp_kallsyms3.o.  This is only meant as a
   2.636 +#   temporary bypass to allow the kernel to be built while the
   2.637 +#   maintainers work out what went wrong with kallsyms.
   2.638 +
   2.639 +ifdef CONFIG_KALLSYMS_EXTRA_PASS
   2.640 +last_kallsyms := 3
   2.641 +else
   2.642 +last_kallsyms := 2
   2.643 +endif
   2.644 +
   2.645 +kallsyms.o := .tmp_kallsyms$(last_kallsyms).o
   2.646 +
   2.647 +define verify_kallsyms
   2.648 +	$(Q)$(if $($(quiet)cmd_sysmap),                       \
   2.649 +	  echo '  $($(quiet)cmd_sysmap) .tmp_System.map' &&)  \
   2.650 +	  $(cmd_sysmap) .tmp_vmlinux$(last_kallsyms) .tmp_System.map
   2.651 +	$(Q)cmp -s System.map .tmp_System.map ||              \
   2.652 +		(echo Inconsistent kallsyms data;             \
   2.653 +		 echo Try setting CONFIG_KALLSYMS_EXTRA_PASS; \
   2.654 +		 rm .tmp_kallsyms* ; /bin/false )
   2.655 +endef
   2.656 +
   2.657 +# Update vmlinux version before link
   2.658 +# Use + in front of this rule to silent warning about make -j1
   2.659 +cmd_ksym_ld = $(cmd_vmlinux__)
   2.660 +define rule_ksym_ld
   2.661 +	+$(call cmd,vmlinux_version)
   2.662 +	$(call cmd,vmlinux__)
   2.663 +	$(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
   2.664 +endef
   2.665 +
   2.666 +# Generate .S file with all kernel symbols
   2.667 +quiet_cmd_kallsyms = KSYM    $@
   2.668 +      cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \
   2.669 +                     $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@
   2.670 +
   2.671 +.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE
   2.672 +	$(call if_changed_dep,as_o_S)
   2.673 +
   2.674 +.tmp_kallsyms%.S: .tmp_vmlinux% $(KALLSYMS)
   2.675 +	$(call cmd,kallsyms)
   2.676 +
   2.677 +# .tmp_vmlinux1 must be complete except kallsyms, so update vmlinux version
   2.678 +.tmp_vmlinux1: $(vmlinux-lds) $(vmlinux-all) FORCE
   2.679 +	$(call if_changed_rule,ksym_ld)
   2.680 +
   2.681 +.tmp_vmlinux2: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms1.o FORCE
   2.682 +	$(call if_changed,vmlinux__)
   2.683 +
   2.684 +.tmp_vmlinux3: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms2.o FORCE
   2.685 +	$(call if_changed,vmlinux__)
   2.686 +
   2.687 +# Needs to visit scripts/ before $(KALLSYMS) can be used.
   2.688 +$(KALLSYMS): scripts ;
   2.689 +
   2.690 +endif # ifdef CONFIG_KALLSYMS
   2.691 +
   2.692 +# vmlinux image - including updated kernel symbols
   2.693 +vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) $(kallsyms.o) FORCE
   2.694 +	$(call if_changed_rule,vmlinux__)
   2.695 +
   2.696 +# The actual objects are generated when descending, 
   2.697 +# make sure no implicit rule kicks in
   2.698 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
   2.699 +
   2.700 +# Handle descending into subdirectories listed in $(vmlinux-dirs)
   2.701 +# Preset locale variables to speed up the build process. Limit locale
   2.702 +# tweaks to this spot to avoid wrong language settings when running
   2.703 +# make menuconfig etc.
   2.704 +# Error messages still appears in the original language
   2.705 +
   2.706 +.PHONY: $(vmlinux-dirs)
   2.707 +$(vmlinux-dirs): prepare-all scripts
   2.708 +	$(Q)$(MAKE) $(build)=$@
   2.709 +
   2.710 +# Things we need to do before we recursively start building the kernel
   2.711 +# or the modules are listed in "prepare-all".
   2.712 +# A multi level approach is used. prepare1 is updated first, then prepare0.
   2.713 +# prepare-all is the collection point for the prepare targets.
   2.714 +
   2.715 +.PHONY: prepare-all prepare prepare0 prepare1 prepare2
   2.716 +
   2.717 +# prepare 2 generate Makefile to be placed in output directory, if
   2.718 +# using a seperate output directory. This allows convinient use
   2.719 +# of make in output directory
   2.720 +prepare2:
   2.721 +	$(Q)if /usr/bin/env test ! $(srctree) -ef $(objtree); then \
   2.722 +	$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile              \
   2.723 +	    $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)         \
   2.724 +	    > $(objtree)/Makefile;                                 \
   2.725 +	fi
   2.726 +
   2.727 +# prepare1 is used to check if we are building in a separate output directory,
   2.728 +# and if so do:
   2.729 +# 1) Check that make has not been executed in the kernel src $(srctree)
   2.730 +# 2) Create the include2 directory, used for the second asm symlink
   2.731 +
   2.732 +prepare1: prepare2
   2.733 +ifneq ($(KBUILD_SRC),)
   2.734 +	@echo '  Using $(srctree) as source for kernel'
   2.735 +	$(Q)if [ -h $(srctree)/include/asm -o -f $(srctree)/.config ]; then \
   2.736 +		echo "  $(srctree) is not clean, please run 'make mrproper'";\
   2.737 +		echo "  in the '$(srctree)' directory.";\
   2.738 +		/bin/false; \
   2.739 +	fi;
   2.740 +	$(Q)if [ ! -d include2 ]; then mkdir -p include2; fi;
   2.741 +	$(Q)ln -fsn $(srctree)/include/asm-$(ARCH) include2/asm
   2.742 +endif
   2.743 +
   2.744 +prepare0: prepare1 include/linux/version.h include/asm include/config/MARKER
   2.745 +ifneq ($(KBUILD_MODULES),)
   2.746 +	$(Q)rm -rf $(MODVERDIR)
   2.747 +	$(Q)mkdir -p $(MODVERDIR)
   2.748 +endif
   2.749 +
   2.750 +# All the preparing..
   2.751 +prepare-all: prepare0 prepare
   2.752 +
   2.753 +#	Leave this as default for preprocessing vmlinux.lds.S, which is now
   2.754 +#	done in arch/$(ARCH)/kernel/Makefile
   2.755 +
   2.756 +export CPPFLAGS_vmlinux.lds += -P -C -U$(ARCH)
   2.757 +
   2.758 +# Single targets
   2.759 +# ---------------------------------------------------------------------------
   2.760 +
   2.761 +%.s: %.c scripts FORCE
   2.762 +	$(Q)$(MAKE) $(build)=$(@D) $@
   2.763 +%.i: %.c scripts FORCE
   2.764 +	$(Q)$(MAKE) $(build)=$(@D) $@
   2.765 +%.o: %.c scripts FORCE
   2.766 +	$(Q)$(MAKE) $(build)=$(@D) $@
   2.767 +%/:      scripts prepare FORCE
   2.768 +	$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) $(build)=$(@D)
   2.769 +%.lst: %.c scripts FORCE
   2.770 +	$(Q)$(MAKE) $(build)=$(@D) $@
   2.771 +%.s: %.S scripts FORCE
   2.772 +	$(Q)$(MAKE) $(build)=$(@D) $@
   2.773 +%.o: %.S scripts FORCE
   2.774 +	$(Q)$(MAKE) $(build)=$(@D) $@
   2.775 +
   2.776 +# 	FIXME: The asm symlink changes when $(ARCH) changes. That's
   2.777 +#	hard to detect, but I suppose "make mrproper" is a good idea
   2.778 +#	before switching between archs anyway.
   2.779 +
   2.780 +include/asm:
   2.781 +	@echo '  SYMLINK $@ -> include/asm-$(ARCH)'
   2.782 +	$(Q)if [ ! -d include ]; then mkdir -p include; fi;
   2.783 +	@ln -fsn asm-$(ARCH) $@
   2.784 +
   2.785 +# 	Split autoconf.h into include/linux/config/*
   2.786 +
   2.787 +include/config/MARKER: include/linux/autoconf.h
   2.788 +	@echo '  SPLIT   include/linux/autoconf.h -> include/config/*'
   2.789 +	@scripts/basic/split-include include/linux/autoconf.h include/config
   2.790 +	@touch $@
   2.791 +
   2.792 +# Generate some files
   2.793 +# ---------------------------------------------------------------------------
   2.794 +
   2.795 +# KERNELRELEASE can change from a few different places, meaning version.h
   2.796 +# needs to be updated, so this check is forced on all builds
   2.797 +
   2.798 +uts_len := 64
   2.799 +
   2.800 +define filechk_version.h
   2.801 +	if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \
   2.802 +	  echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \
   2.803 +	  exit 1; \
   2.804 +	fi; \
   2.805 +	(echo \#define UTS_RELEASE \"$(KERNELRELEASE)\"; \
   2.806 +	  echo \#define LINUX_VERSION_CODE `expr $(VERSION) \\* 65536 + $(PATCHLEVEL) \\* 256 + $(SUBLEVEL)`; \
   2.807 +	 echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'; \
   2.808 +	)
   2.809 +endef
   2.810 +
   2.811 +include/linux/version.h: $(srctree)/Makefile FORCE
   2.812 +	$(call filechk,version.h)
   2.813 +
   2.814 +# ---------------------------------------------------------------------------
   2.815 +
   2.816 +.PHONY: depend dep
   2.817 +depend dep:
   2.818 +	@echo '*** Warning: make $@ is unnecessary now.'
   2.819 +
   2.820 +# ---------------------------------------------------------------------------
   2.821 +# Modules
   2.822 +
   2.823 +ifdef CONFIG_MODULES
   2.824 +
   2.825 +# 	By default, build modules as well
   2.826 +
   2.827 +all: modules
   2.828 +
   2.829 +#	Build modules
   2.830 +
   2.831 +.PHONY: modules
   2.832 +modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
   2.833 +	@echo '  Building modules, stage 2.';
   2.834 +	$(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modpost
   2.835 +
   2.836 +
   2.837 +# Target to prepare building external modules
   2.838 +.PHONY: modules_prepare
   2.839 +modules_prepare: prepare-all scripts
   2.840 +
   2.841 +# Target to install modules
   2.842 +.PHONY: modules_install
   2.843 +modules_install: _modinst_ _modinst_post
   2.844 +
   2.845 +.PHONY: _modinst_
   2.846 +_modinst_:
   2.847 +	@if [ -z "`$(DEPMOD) -V | grep module-init-tools`" ]; then \
   2.848 +		echo "Warning: you may need to install module-init-tools"; \
   2.849 +		echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
   2.850 +		sleep 1; \
   2.851 +	fi
   2.852 +	@rm -rf $(MODLIB)/kernel
   2.853 +	@rm -f $(MODLIB)/source
   2.854 +	@mkdir -p $(MODLIB)/kernel
   2.855 +	@ln -s $(srctree) $(MODLIB)/source
   2.856 +	@if [ ! $(objtree) -ef  $(MODLIB)/build ]; then \
   2.857 +		rm -f $(MODLIB)/build ; \
   2.858 +		ln -s $(objtree) $(MODLIB)/build ; \
   2.859 +	fi
   2.860 +	$(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modinst
   2.861 +
   2.862 +# If System.map exists, run depmod.  This deliberately does not have a
   2.863 +# dependency on System.map since that would run the dependency tree on
   2.864 +# vmlinux.  This depmod is only for convenience to give the initial
   2.865 +# boot a modules.dep even before / is mounted read-write.  However the
   2.866 +# boot script depmod is the master version.
   2.867 +ifeq "$(strip $(INSTALL_MOD_PATH))" ""
   2.868 +depmod_opts	:=
   2.869 +else
   2.870 +depmod_opts	:= -b $(INSTALL_MOD_PATH) -r
   2.871 +endif
   2.872 +.PHONY: _modinst_post
   2.873 +_modinst_post: _modinst_
   2.874 +	if [ -r System.map ]; then $(DEPMOD) -ae -F System.map $(depmod_opts) $(KERNELRELEASE); fi
   2.875 +
   2.876 +else # CONFIG_MODULES
   2.877 +
   2.878 +# Modules not configured
   2.879 +# ---------------------------------------------------------------------------
   2.880 +
   2.881 +modules modules_install: FORCE
   2.882 +	@echo
   2.883 +	@echo "The present kernel configuration has modules disabled."
   2.884 +	@echo "Type 'make config' and enable loadable module support."
   2.885 +	@echo "Then build a kernel with module support enabled."
   2.886 +	@echo
   2.887 +	@exit 1
   2.888 +
   2.889 +endif # CONFIG_MODULES
   2.890 +
   2.891 +# Generate asm-offsets.h 
   2.892 +# ---------------------------------------------------------------------------
   2.893 +
   2.894 +define filechk_gen-asm-offsets
   2.895 +	(set -e; \
   2.896 +	 echo "#ifndef __ASM_OFFSETS_H__"; \
   2.897 +	 echo "#define __ASM_OFFSETS_H__"; \
   2.898 +	 echo "/*"; \
   2.899 +	 echo " * DO NOT MODIFY."; \
   2.900 +	 echo " *"; \
   2.901 +	 echo " * This file was generated by arch/$(ARCH)/Makefile"; \
   2.902 +	 echo " *"; \
   2.903 +	 echo " */"; \
   2.904 +	 echo ""; \
   2.905 +	 sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"; \
   2.906 +	 echo ""; \
   2.907 +	 echo "#endif" )
   2.908 +endef
   2.909 +
   2.910 +
   2.911 +###
   2.912 +# Cleaning is done on three levels.
   2.913 +# make clean     Delete most generated files
   2.914 +#                Leave enough to build external modules
   2.915 +# make mrproper  Delete the current configuration, and all generated files
   2.916 +# make distclean Remove editor backup files, patch leftover files and the like
   2.917 +
   2.918 +# Directories & files removed with 'make clean'
   2.919 +CLEAN_DIRS  += $(MODVERDIR)
   2.920 +CLEAN_FILES +=	vmlinux System.map \
   2.921 +                .tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
   2.922 +
   2.923 +# Directories & files removed with 'make mrproper'
   2.924 +MRPROPER_DIRS  += include/config include2
   2.925 +MRPROPER_FILES += .config .config.old include/asm .version \
   2.926 +                  include/linux/autoconf.h include/linux/version.h \
   2.927 +                  Module.symvers tags TAGS cscope* include/.asm-ignore
   2.928 +
   2.929 +# clean - Delete most, but leave enough to build external modules
   2.930 +#
   2.931 +clean: rm-dirs  := $(CLEAN_DIRS)
   2.932 +clean: rm-files := $(CLEAN_FILES)
   2.933 +clean-dirs      := $(addprefix _clean_,$(vmlinux-alldirs))
   2.934 +
   2.935 +.PHONY: $(clean-dirs) clean archclean
   2.936 +$(clean-dirs):
   2.937 +	$(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
   2.938 +
   2.939 +clean: archclean $(clean-dirs)
   2.940 +	$(call cmd,rmdirs)
   2.941 +	$(call cmd,rmfiles)
   2.942 +	@find . $(RCS_FIND_IGNORE) \
   2.943 +	 	\( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
   2.944 +		-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \
   2.945 +		-type f -print | xargs rm -f
   2.946 +
   2.947 +# mrproper - Delete all generated files, including .config
   2.948 +#
   2.949 +mrproper: rm-dirs  := $(wildcard $(MRPROPER_DIRS))
   2.950 +mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
   2.951 +mrproper-dirs      := $(addprefix _mrproper_,Documentation/DocBook scripts)
   2.952 +
   2.953 +.PHONY: $(mrproper-dirs) mrproper archmrproper
   2.954 +$(mrproper-dirs):
   2.955 +	$(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
   2.956 +
   2.957 +mrproper: clean archmrproper $(mrproper-dirs)
   2.958 +	$(call cmd,rmdirs)
   2.959 +	$(call cmd,rmfiles)
   2.960 +
   2.961 +# distclean
   2.962 +#
   2.963 +.PHONY: distclean
   2.964 +
   2.965 +distclean: mrproper
   2.966 +	@find $(srctree) $(RCS_FIND_IGNORE) \
   2.967 +	 	\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
   2.968 +		-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
   2.969 +	 	-o -name '.*.rej' -o -size 0 \
   2.970 +		-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
   2.971 +		-type f -print | xargs rm -f
   2.972 +
   2.973 +
   2.974 +# Packaging of the kernel to various formats
   2.975 +# ---------------------------------------------------------------------------
   2.976 +# rpm target kept for backward compatibility
   2.977 +package-dir	:= $(srctree)/scripts/package
   2.978 +
   2.979 +.PHONY: %-pkg rpm
   2.980 +
   2.981 +%pkg: FORCE
   2.982 +	$(Q)$(MAKE) -f $(package-dir)/Makefile $@
   2.983 +rpm: FORCE
   2.984 +	$(Q)$(MAKE) -f $(package-dir)/Makefile $@
   2.985 +
   2.986 +
   2.987 +# Brief documentation of the typical targets used
   2.988 +# ---------------------------------------------------------------------------
   2.989 +
   2.990 +boards := $(wildcard $(srctree)/arch/$(ARCH)/configs/*_defconfig)
   2.991 +boards := $(notdir $(boards))
   2.992 +
   2.993 +help:
   2.994 +	@echo  'Cleaning targets:'
   2.995 +	@echo  '  clean		  - remove most generated files but keep the config'
   2.996 +	@echo  '  mrproper	  - remove all generated files + config + various backup files'
   2.997 +	@echo  ''
   2.998 +	@echo  'Configuration targets:'
   2.999 +	@$(MAKE) -f $(srctree)/scripts/kconfig/Makefile help
  2.1000 +	@echo  ''
  2.1001 +	@echo  'Other generic targets:'
  2.1002 +	@echo  '  all		  - Build all targets marked with [*]'
  2.1003 +	@echo  '* vmlinux	  - Build the bare kernel'
  2.1004 +	@echo  '* modules	  - Build all modules'
  2.1005 +	@echo  '  modules_install - Install all modules'
  2.1006 +	@echo  '  dir/            - Build all files in dir and below'
  2.1007 +	@echo  '  dir/file.[ois]  - Build specified target only'
  2.1008 +	@echo  '  rpm		  - Build a kernel as an RPM package'
  2.1009 +	@echo  '  tags/TAGS	  - Generate tags file for editors'
  2.1010 +	@echo  '  cscope	  - Generate cscope index'
  2.1011 +	@echo  ''
  2.1012 +	@echo  'Static analysers'
  2.1013 +	@echo  '  buildcheck      - List dangling references to vmlinux discarded sections'
  2.1014 +	@echo  '                    and init sections from non-init sections'
  2.1015 +	@echo  '  checkstack      - Generate a list of stack hogs'
  2.1016 +	@echo  '  namespacecheck  - Name space analysis on compiled kernel'
  2.1017 +	@echo  ''
  2.1018 +	@echo  'Kernel packaging:'
  2.1019 +	@$(MAKE) -f $(package-dir)/Makefile help
  2.1020 +	@echo  ''
  2.1021 +	@echo  'Documentation targets:'
  2.1022 +	@$(MAKE) -f $(srctree)/Documentation/DocBook/Makefile dochelp
  2.1023 +	@echo  ''
  2.1024 +	@echo  'Architecture specific targets ($(ARCH)):'
  2.1025 +	@$(if $(archhelp),$(archhelp),\
  2.1026 +		echo '  No architecture specific help defined for $(ARCH)')
  2.1027 +	@echo  ''
  2.1028 +	@$(if $(boards), \
  2.1029 +		$(foreach b, $(boards), \
  2.1030 +		printf "  %-24s - Build for %s\\n" $(b) $(subst _defconfig,,$(b));) \
  2.1031 +		echo '')
  2.1032 +
  2.1033 +	@echo  '  make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
  2.1034 +	@echo  '  make O=dir [targets] Locate all output files in "dir", including .config'
  2.1035 +	@echo  '  make C=1   [targets] Check all c source with $$CHECK (sparse)'
  2.1036 +	@echo  '  make C=2   [targets] Force check of all c source with $$CHECK (sparse)'
  2.1037 +	@echo  ''
  2.1038 +	@echo  'Execute "make" or "make all" to build all targets marked with [*] '
  2.1039 +	@echo  'For further info see the ./README file'
  2.1040 +
  2.1041 +
  2.1042 +# Documentation targets
  2.1043 +# ---------------------------------------------------------------------------
  2.1044 +%docs: scripts_basic FORCE
  2.1045 +	$(Q)$(MAKE) $(build)=Documentation/DocBook $@
  2.1046 +
  2.1047 +else # KBUILD_EXTMOD
  2.1048 +
  2.1049 +###
  2.1050 +# External module support.
  2.1051 +# When building external modules the kernel used as basis is considered
  2.1052 +# read-only, and no consistency checks are made and the make
  2.1053 +# system is not used on the basis kernel. If updates are required
  2.1054 +# in the basis kernel ordinary make commands (without M=...) must
  2.1055 +# be used.
  2.1056 +#
  2.1057 +# The following are the only valid targets when building external
  2.1058 +# modules.
  2.1059 +# make M=dir clean     Delete all automatically generated files
  2.1060 +# make M=dir modules   Make all modules in specified dir
  2.1061 +# make M=dir	       Same as 'make M=dir modules'
  2.1062 +# make M=dir modules_install
  2.1063 +#                      Install the modules build in the module directory
  2.1064 +#                      Assumes install directory is already created
  2.1065 +
  2.1066 +# We are always building modules
  2.1067 +KBUILD_MODULES := 1
  2.1068 +.PHONY: crmodverdir
  2.1069 +crmodverdir:
  2.1070 +	$(Q)mkdir -p $(MODVERDIR)
  2.1071 +
  2.1072 +module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD))
  2.1073 +.PHONY: $(module-dirs) modules
  2.1074 +$(module-dirs): crmodverdir
  2.1075 +	$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
  2.1076 +
  2.1077 +modules: $(module-dirs)
  2.1078 +	@echo '  Building modules, stage 2.';
  2.1079 +	$(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modpost
  2.1080 +
  2.1081 +.PHONY: modules_install
  2.1082 +modules_install:
  2.1083 +	$(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modinst
  2.1084 +
  2.1085 +clean-dirs := $(addprefix _clean_,$(KBUILD_EXTMOD))
  2.1086 +
  2.1087 +.PHONY: $(clean-dirs) clean
  2.1088 +$(clean-dirs):
  2.1089 +	$(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
  2.1090 +
  2.1091 +clean:	rm-dirs := $(MODVERDIR)
  2.1092 +clean: $(clean-dirs)
  2.1093 +	$(call cmd,rmdirs)
  2.1094 +	@find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
  2.1095 +	 	\( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
  2.1096 +		-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \
  2.1097 +		-type f -print | xargs rm -f
  2.1098 +
  2.1099 +help:
  2.1100 +	@echo  '  Building external modules.'
  2.1101 +	@echo  '  Syntax: make -C path/to/kernel/src M=$$PWD target'
  2.1102 +	@echo  ''
  2.1103 +	@echo  '  modules         - default target, build the module(s)'
  2.1104 +	@echo  '  modules_install - install the module'
  2.1105 +	@echo  '  clean           - remove generated files in module directory only'
  2.1106 +	@echo  ''
  2.1107 +endif # KBUILD_EXTMOD
  2.1108 +
  2.1109 +# Generate tags for editors
  2.1110 +# ---------------------------------------------------------------------------
  2.1111 +
  2.1112 +define all-sources
  2.1113 +	( find $(srctree) $(RCS_FIND_IGNORE) \
  2.1114 +	       \( -name include -o -name arch \) -prune -o \
  2.1115 +	       -name '*.[chS]' -print; \
  2.1116 +	  find $(srctree)/arch/$(ARCH) $(RCS_FIND_IGNORE) \
  2.1117 +	       -name '*.[chS]' -print; \
  2.1118 +	  find $(srctree)/security/selinux/include $(RCS_FIND_IGNORE) \
  2.1119 +	       -name '*.[chS]' -print; \
  2.1120 +	  find $(srctree)/include $(RCS_FIND_IGNORE) \
  2.1121 +	       \( -name config -o -name 'asm-*' \) -prune \
  2.1122 +	       -o -name '*.[chS]' -print; \
  2.1123 +	  find $(srctree)/include/asm-$(ARCH) $(RCS_FIND_IGNORE) \
  2.1124 +	       -name '*.[chS]' -print; \
  2.1125 +	  find $(srctree)/include/asm-generic $(RCS_FIND_IGNORE) \
  2.1126 +	       -name '*.[chS]' -print )
  2.1127 +endef
  2.1128 +
  2.1129 +quiet_cmd_cscope-file = FILELST cscope.files
  2.1130 +      cmd_cscope-file = $(all-sources) > cscope.files
  2.1131 +
  2.1132 +quiet_cmd_cscope = MAKE    cscope.out
  2.1133 +      cmd_cscope = cscope -k -b -q
  2.1134 +
  2.1135 +cscope: FORCE
  2.1136 +	$(call cmd,cscope-file)
  2.1137 +	$(call cmd,cscope)
  2.1138 +
  2.1139 +quiet_cmd_TAGS = MAKE   $@
  2.1140 +cmd_TAGS = $(all-sources) | etags -
  2.1141 +
  2.1142 +# 	Exuberant ctags works better with -I
  2.1143 +
  2.1144 +quiet_cmd_tags = MAKE   $@
  2.1145 +define cmd_tags
  2.1146 +	rm -f $@; \
  2.1147 +	CTAGSF=`ctags --version | grep -i exuberant >/dev/null && echo "-I __initdata,__exitdata,EXPORT_SYMBOL,EXPORT_SYMBOL_NOVERS"`; \
  2.1148 +	$(all-sources) | xargs ctags $$CTAGSF -a
  2.1149 +endef
  2.1150 +
  2.1151 +TAGS: FORCE
  2.1152 +	$(call cmd,TAGS)
  2.1153 +
  2.1154 +tags: FORCE
  2.1155 +	$(call cmd,tags)
  2.1156 +
  2.1157 +
  2.1158 +# Scripts to check various things for consistency
  2.1159 +# ---------------------------------------------------------------------------
  2.1160 +
  2.1161 +configcheck:
  2.1162 +	find * $(RCS_FIND_IGNORE) \
  2.1163 +		-name '*.[hcS]' -type f -print | sort \
  2.1164 +		| xargs $(PERL) -w scripts/checkconfig.pl
  2.1165 +
  2.1166 +includecheck:
  2.1167 +	find * $(RCS_FIND_IGNORE) \
  2.1168 +		-name '*.[hcS]' -type f -print | sort \
  2.1169 +		| xargs $(PERL) -w scripts/checkincludes.pl
  2.1170 +
  2.1171 +versioncheck:
  2.1172 +	find * $(RCS_FIND_IGNORE) \
  2.1173 +		-name '*.[hcS]' -type f -print | sort \
  2.1174 +		| xargs $(PERL) -w scripts/checkversion.pl
  2.1175 +
  2.1176 +buildcheck:
  2.1177 +	$(PERL) $(srctree)/scripts/reference_discarded.pl
  2.1178 +	$(PERL) $(srctree)/scripts/reference_init.pl
  2.1179 +
  2.1180 +namespacecheck:
  2.1181 +	$(PERL) $(srctree)/scripts/namespace.pl
  2.1182 +
  2.1183 +endif #ifeq ($(config-targets),1)
  2.1184 +endif #ifeq ($(mixed-targets),1)
  2.1185 +
  2.1186 +.PHONY: checkstack
  2.1187 +checkstack:
  2.1188 +	$(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
  2.1189 +	$(PERL) $(src)/scripts/checkstack.pl $(ARCH)
  2.1190 +
  2.1191 +# FIXME Should go into a make.lib or something 
  2.1192 +# ===========================================================================
  2.1193 +
  2.1194 +quiet_cmd_rmdirs = $(if $(wildcard $(rm-dirs)),CLEAN   $(wildcard $(rm-dirs)))
  2.1195 +      cmd_rmdirs = rm -rf $(rm-dirs)
  2.1196 +
  2.1197 +quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN   $(wildcard $(rm-files)))
  2.1198 +      cmd_rmfiles = rm -f $(rm-files)
  2.1199 +
  2.1200 +
  2.1201 +a_flags = -Wp,-MD,$(depfile) $(AFLAGS) $(AFLAGS_KERNEL) \
  2.1202 +	  $(NOSTDINC_FLAGS) $(CPPFLAGS) \
  2.1203 +	  $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(*F).o)
  2.1204 +
  2.1205 +quiet_cmd_as_o_S = AS      $@
  2.1206 +cmd_as_o_S       = $(CC) $(a_flags) -c -o $@ $<
  2.1207 +
  2.1208 +# read all saved command lines
  2.1209 +
  2.1210 +targets := $(wildcard $(sort $(targets)))
  2.1211 +cmd_files := $(wildcard .*.cmd $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
  2.1212 +
  2.1213 +ifneq ($(cmd_files),)
  2.1214 +  $(cmd_files): ;	# Do not try to update included dependency files
  2.1215 +  include $(cmd_files)
  2.1216 +endif
  2.1217 +
  2.1218 +# Execute command and generate cmd file
  2.1219 +if_changed = $(if $(strip $? \
  2.1220 +		          $(filter-out $(cmd_$(1)),$(cmd_$@))\
  2.1221 +			  $(filter-out $(cmd_$@),$(cmd_$(1)))),\
  2.1222 +	@set -e; \
  2.1223 +	$(if $($(quiet)cmd_$(1)),echo '  $(subst ','\'',$($(quiet)cmd_$(1)))';) \
  2.1224 +	$(cmd_$(1)); \
  2.1225 +	echo 'cmd_$@ := $(subst $$,$$$$,$(subst ','\'',$(cmd_$(1))))' > $(@D)/.$(@F).cmd)
  2.1226 +
  2.1227 +
  2.1228 +# execute the command and also postprocess generated .d dependencies
  2.1229 +# file
  2.1230 +if_changed_dep = $(if $(strip $? $(filter-out FORCE $(wildcard $^),$^)\
  2.1231 +		          $(filter-out $(cmd_$(1)),$(cmd_$@))\
  2.1232 +			  $(filter-out $(cmd_$@),$(cmd_$(1)))),\
  2.1233 +	$(Q)set -e; \
  2.1234 +	$(if $($(quiet)cmd_$(1)),echo '  $(subst ','\'',$($(quiet)cmd_$(1)))';) \
  2.1235 +	$(cmd_$(1)); \
  2.1236 +	scripts/basic/fixdep $(depfile) $@ '$(subst $$,$$$$,$(subst ','\'',$(cmd_$(1))))' > $(@D)/.$(@F).tmp; \
  2.1237 +	rm -f $(depfile); \
  2.1238 +	mv -f $(@D)/.$(@F).tmp $(@D)/.$(@F).cmd)
  2.1239 +
  2.1240 +# Usage: $(call if_changed_rule,foo)
  2.1241 +# will check if $(cmd_foo) changed, or any of the prequisites changed,
  2.1242 +# and if so will execute $(rule_foo)
  2.1243 +
  2.1244 +if_changed_rule = $(if $(strip $? \
  2.1245 +		               $(filter-out $(cmd_$(1)),$(cmd_$(@F)))\
  2.1246 +			       $(filter-out $(cmd_$(@F)),$(cmd_$(1)))),\
  2.1247 +	               $(Q)$(rule_$(1)))
  2.1248 +
  2.1249 +# If quiet is set, only print short version of command
  2.1250 +
  2.1251 +cmd = @$(if $($(quiet)cmd_$(1)),echo '  $($(quiet)cmd_$(1))' &&) $(cmd_$(1))
  2.1252 +
  2.1253 +# filechk is used to check if the content of a generated file is updated.
  2.1254 +# Sample usage:
  2.1255 +# define filechk_sample
  2.1256 +#	echo $KERNELRELEASE
  2.1257 +# endef
  2.1258 +# version.h : Makefile
  2.1259 +#	$(call filechk,sample)
  2.1260 +# The rule defined shall write to stdout the content of the new file.
  2.1261 +# The existing file will be compared with the new one.
  2.1262 +# - If no file exist it is created
  2.1263 +# - If the content differ the new file is used
  2.1264 +# - If they are equal no change, and no timestamp update
  2.1265 +
  2.1266 +define filechk
  2.1267 +	@set -e;				\
  2.1268 +	echo '  CHK     $@';			\
  2.1269 +	mkdir -p $(dir $@);			\
  2.1270 +	$(filechk_$(1)) < $< > $@.tmp;		\
  2.1271 +	if [ -r $@ ] && cmp -s $@ $@.tmp; then	\
  2.1272 +		rm -f $@.tmp;			\
  2.1273 +	else					\
  2.1274 +		echo '  UPD     $@';		\
  2.1275 +		mv -f $@.tmp $@;		\
  2.1276 +	fi
  2.1277 +endef
  2.1278 +
  2.1279 +# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj=dir
  2.1280 +# Usage:
  2.1281 +# $(Q)$(MAKE) $(build)=dir
  2.1282 +build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj
  2.1283 +
  2.1284 +# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=dir
  2.1285 +# Usage:
  2.1286 +# $(Q)$(MAKE) $(clean)=dir
  2.1287 +clean := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.clean obj
  2.1288 +
  2.1289 +#	$(call descend,<dir>,<target>)
  2.1290 +#	Recursively call a sub-make in <dir> with target <target>
  2.1291 +# Usage is deprecated, because make does not see this as an invocation of make.
  2.1292 +descend =$(Q)$(MAKE) -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj=$(1) $(2)
  2.1293 +
  2.1294 +endif	# skip-makefile
  2.1295 +
  2.1296 +FORCE:
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/arch/ia64/privop.c	Mon Nov 22 23:08:21 2004 +0000
     3.3 @@ -0,0 +1,863 @@
     3.4 +/*
     3.5 + * Privileged operation "API" handling functions.
     3.6 + * 
     3.7 + * Copyright (C) 2004 Hewlett-Packard Co.
     3.8 + *	Dan Magenheimer (dan.magenheimer@hp.com)
     3.9 + *
    3.10 + */
    3.11 +
    3.12 +#include <asm/privop.h>
    3.13 +#include <asm/privify.h>
    3.14 +#include <asm/vcpu.h>
    3.15 +#include <asm/processor.h>
    3.16 +#include <asm/delay.h>	// Debug only
    3.17 +//#include <debug.h>
    3.18 +
    3.19 +long priv_verbose=0;
    3.20 +
    3.21 +/**************************************************************************
    3.22 +Hypercall bundle creation
    3.23 +**************************************************************************/
    3.24 +
    3.25 +
    3.26 +void build_hypercall_bundle(UINT64 *imva, UINT64 breakimm, UINT64 hypnum, UINT64 ret)
    3.27 +{
    3.28 +	INST64_A5 slot0;
    3.29 +	INST64_I19 slot1;
    3.30 +	INST64_B4 slot2;
    3.31 +	IA64_BUNDLE bundle;
    3.32 +
    3.33 +	// slot1: mov r2 = hypnum (low 20 bits)
    3.34 +	slot0.inst = 0;
    3.35 +	slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
    3.36 +	slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
    3.37 +	slot0.imm5c = hypnum >> 16; slot0.s = 0;
    3.38 +	// slot1: break breakimm
    3.39 +	slot1.inst = 0;
    3.40 +	slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
    3.41 +	slot1.imm20 = breakimm; slot1.i = breakimm >> 20;
    3.42 +	// if ret slot2: br.ret.sptk.many rp
    3.43 +	// else slot2: br.cond.sptk.many rp
    3.44 +	slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
    3.45 +	slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
    3.46 +	if (ret) {
    3.47 +		slot2.btype = 4; slot2.x6 = 0x21;
    3.48 +	}
    3.49 +	else {
    3.50 +		slot2.btype = 0; slot2.x6 = 0x20;
    3.51 +	}
    3.52 +	
    3.53 +	bundle.i64[0] = 0; bundle.i64[1] = 0;
    3.54 +	bundle.template = 0x11;
    3.55 +	bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
    3.56 +	bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
    3.57 +	
    3.58 +	*imva++ = bundle.i64[0]; *imva = bundle.i64[1];
    3.59 +}
    3.60 +
    3.61 +/**************************************************************************
    3.62 +Privileged operation emulation routines
    3.63 +**************************************************************************/
    3.64 +
    3.65 +IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
    3.66 +{
    3.67 +	return vcpu_rfi(vcpu);
    3.68 +}
    3.69 +
    3.70 +IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
    3.71 +{
    3.72 +	return vcpu_bsw0(vcpu);
    3.73 +}
    3.74 +
    3.75 +IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
    3.76 +{
    3.77 +	return vcpu_bsw1(vcpu);
    3.78 +}
    3.79 +
    3.80 +IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
    3.81 +{
    3.82 +	return vcpu_cover(vcpu);
    3.83 +}
    3.84 +
    3.85 +IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
    3.86 +{
    3.87 +	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
    3.88 +	UINT64 addr_range;
    3.89 +
    3.90 +	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
    3.91 +	return vcpu_ptc_l(vcpu,vadr,addr_range);
    3.92 +}
    3.93 +
    3.94 +IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
    3.95 +{
    3.96 +	UINT src = inst.M28.r3;
    3.97 +
    3.98 +	// NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
    3.99 +	if (src > 63) return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
   3.100 +	return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
   3.101 +}
   3.102 +
   3.103 +IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
   3.104 +{
   3.105 +	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
   3.106 +	UINT64 addr_range;
   3.107 +
   3.108 +	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
   3.109 +	return vcpu_ptc_g(vcpu,vadr,addr_range);
   3.110 +}
   3.111 +
   3.112 +IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
   3.113 +{
   3.114 +	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
   3.115 +	UINT64 addr_range;
   3.116 +
   3.117 +	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
   3.118 +	return vcpu_ptc_ga(vcpu,vadr,addr_range);
   3.119 +}
   3.120 +
   3.121 +IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
   3.122 +{
   3.123 +	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
   3.124 +	UINT64 addr_range;
   3.125 +
   3.126 +	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
   3.127 +	return vcpu_ptr_d(vcpu,vadr,addr_range);
   3.128 +}
   3.129 +
   3.130 +IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
   3.131 +{
   3.132 +	UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
   3.133 +	UINT64 addr_range;
   3.134 +
   3.135 +	addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
   3.136 +	return vcpu_ptr_i(vcpu,vadr,addr_range);
   3.137 +}
   3.138 +
   3.139 +IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
   3.140 +{
   3.141 +	UINT64 padr;
   3.142 +	UINT fault;
   3.143 +	UINT src = inst.M46.r3;
   3.144 +
   3.145 +	// NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
   3.146 +	if (src > 63)
   3.147 +		fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
   3.148 +	else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
   3.149 +	if (fault == IA64_NO_FAULT)
   3.150 +		return vcpu_set_gr(vcpu, inst.M46.r1, padr);
   3.151 +	else return fault;
   3.152 +}
   3.153 +
   3.154 +IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
   3.155 +{
   3.156 +	UINT64 key;
   3.157 +	UINT fault;
   3.158 +	UINT src = inst.M46.r3;
   3.159 +
   3.160 +	// NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
   3.161 +	if (src > 63)
   3.162 +		fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
   3.163 +	else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
   3.164 +	if (fault == IA64_NO_FAULT)
   3.165 +		return vcpu_set_gr(vcpu, inst.M46.r1, key);
   3.166 +	else return fault;
   3.167 +}
   3.168 +
   3.169 +/************************************
   3.170 + * Insert translation register/cache
   3.171 +************************************/
   3.172 +
   3.173 +IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
   3.174 +{
   3.175 +	UINT64 fault, itir, ifa, pte, slot;
   3.176 +
   3.177 +	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   3.178 +	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   3.179 +		return(IA64_ILLOP_FAULT);
   3.180 +	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   3.181 +		return(IA64_ILLOP_FAULT);
   3.182 +	pte = vcpu_get_gr(vcpu,inst.M42.r2);
   3.183 +	slot = vcpu_get_gr(vcpu,inst.M42.r3);
   3.184 +
   3.185 +	return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
   3.186 +}
   3.187 +
   3.188 +IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
   3.189 +{
   3.190 +	UINT64 fault, itir, ifa, pte, slot;
   3.191 +
   3.192 +	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   3.193 +	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   3.194 +		return(IA64_ILLOP_FAULT);
   3.195 +	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   3.196 +		return(IA64_ILLOP_FAULT);
   3.197 +	pte = vcpu_get_gr(vcpu,inst.M42.r2);
   3.198 +	slot = vcpu_get_gr(vcpu,inst.M42.r3);
   3.199 +
   3.200 +	return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
   3.201 +}
   3.202 +
   3.203 +IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
   3.204 +{
   3.205 +	UINT64 fault, itir, ifa, pte;
   3.206 +
   3.207 +	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   3.208 +	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   3.209 +		return(IA64_ILLOP_FAULT);
   3.210 +	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   3.211 +		return(IA64_ILLOP_FAULT);
   3.212 +	pte = vcpu_get_gr(vcpu,inst.M41.r2);
   3.213 +
   3.214 +	return (vcpu_itc_d(vcpu,pte,itir,ifa));
   3.215 +}
   3.216 +
   3.217 +IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
   3.218 +{
   3.219 +	UINT64 fault, itir, ifa, pte;
   3.220 +
   3.221 +	//if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
   3.222 +	if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
   3.223 +		return(IA64_ILLOP_FAULT);
   3.224 +	if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
   3.225 +		return(IA64_ILLOP_FAULT);
   3.226 +	pte = vcpu_get_gr(vcpu,inst.M41.r2);
   3.227 +
   3.228 +	return (vcpu_itc_i(vcpu,pte,itir,ifa));
   3.229 +}
   3.230 +
   3.231 +/*************************************
   3.232 + * Moves to semi-privileged registers
   3.233 +*************************************/
   3.234 +
   3.235 +IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
   3.236 +{
   3.237 +	// I27 and M30 are identical for these fields
   3.238 +	UINT64 ar3 = inst.M30.ar3;
   3.239 +	UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
   3.240 +	return (vcpu_set_ar(vcpu,ar3,imm));
   3.241 +}
   3.242 +
   3.243 +IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
   3.244 +{
   3.245 +	// I26 and M29 are identical for these fields
   3.246 +	UINT64 ar3 = inst.M29.ar3;
   3.247 +
   3.248 +	if (inst.M29.r2 > 63 && inst.M29.ar3 < 8) { // privified mov from kr
   3.249 +		UINT64 val;
   3.250 +		if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
   3.251 +			return vcpu_set_gr(vcpu, inst.M29.r2-64, val);
   3.252 +		else return IA64_ILLOP_FAULT;
   3.253 +	}
   3.254 +	else {
   3.255 +		UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
   3.256 +		return (vcpu_set_ar(vcpu,ar3,r2));
   3.257 +	}
   3.258 +}
   3.259 +
   3.260 +/********************************
   3.261 + * Moves to privileged registers
   3.262 +********************************/
   3.263 +
   3.264 +IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
   3.265 +{
   3.266 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   3.267 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   3.268 +	return (vcpu_set_pkr(vcpu,r3,r2));
   3.269 +}
   3.270 +
   3.271 +IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
   3.272 +{
   3.273 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   3.274 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   3.275 +	return (vcpu_set_rr(vcpu,r3,r2));
   3.276 +}
   3.277 +
   3.278 +IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
   3.279 +{
   3.280 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   3.281 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   3.282 +	return (vcpu_set_dbr(vcpu,r3,r2));
   3.283 +}
   3.284 +
   3.285 +IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
   3.286 +{
   3.287 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   3.288 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   3.289 +	return (vcpu_set_ibr(vcpu,r3,r2));
   3.290 +}
   3.291 +
   3.292 +IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
   3.293 +{
   3.294 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   3.295 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   3.296 +	return (vcpu_set_pmc(vcpu,r3,r2));
   3.297 +}
   3.298 +
   3.299 +IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
   3.300 +{
   3.301 +	UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
   3.302 +	UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
   3.303 +	return (vcpu_set_pmd(vcpu,r3,r2));
   3.304 +}
   3.305 +
   3.306 +unsigned long to_cr_cnt[128] = { 0 };
   3.307 +
   3.308 +IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
   3.309 +{
   3.310 +	UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
   3.311 +	to_cr_cnt[inst.M32.cr3]++;
   3.312 +	switch (inst.M32.cr3) {
   3.313 +	    case 0: return vcpu_set_dcr(vcpu,val);
   3.314 +	    case 1: return vcpu_set_itm(vcpu,val);
   3.315 +	    case 2: return vcpu_set_iva(vcpu,val);
   3.316 +	    case 8: return vcpu_set_pta(vcpu,val);
   3.317 +	    case 16:return vcpu_set_ipsr(vcpu,val);
   3.318 +	    case 17:return vcpu_set_isr(vcpu,val);
   3.319 +	    case 19:return vcpu_set_iip(vcpu,val);
   3.320 +	    case 20:return vcpu_set_ifa(vcpu,val);
   3.321 +	    case 21:return vcpu_set_itir(vcpu,val);
   3.322 +	    case 22:return vcpu_set_iipa(vcpu,val);
   3.323 +	    case 23:return vcpu_set_ifs(vcpu,val);
   3.324 +	    case 24:return vcpu_set_iim(vcpu,val);
   3.325 +	    case 25:return vcpu_set_iha(vcpu,val);
   3.326 +	    case 64:return vcpu_set_lid(vcpu,val);
   3.327 +	    case 65:return IA64_ILLOP_FAULT;
   3.328 +	    case 66:return vcpu_set_tpr(vcpu,val);
   3.329 +	    case 67:return vcpu_set_eoi(vcpu,val);
   3.330 +	    case 68:return IA64_ILLOP_FAULT;
   3.331 +	    case 69:return IA64_ILLOP_FAULT;
   3.332 +	    case 70:return IA64_ILLOP_FAULT;
   3.333 +	    case 71:return IA64_ILLOP_FAULT;
   3.334 +	    case 72:return vcpu_set_itv(vcpu,val);
   3.335 +	    case 73:return vcpu_set_pmv(vcpu,val);
   3.336 +	    case 74:return vcpu_set_cmcv(vcpu,val);
   3.337 +	    case 80:return vcpu_set_lrr0(vcpu,val);
   3.338 +	    case 81:return vcpu_set_lrr1(vcpu,val);
   3.339 +	    default: return IA64_ILLOP_FAULT;
   3.340 +	}
   3.341 +}
   3.342 +
   3.343 +IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
   3.344 +{
   3.345 +	UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
   3.346 +	return vcpu_reset_psr_sm(vcpu,imm24);
   3.347 +}
   3.348 +
   3.349 +IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
   3.350 +{
   3.351 +	UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
   3.352 +	return vcpu_set_psr_sm(vcpu,imm24);
   3.353 +}
   3.354 +
   3.355 +/**
   3.356 + * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
   3.357 + */
   3.358 +IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
   3.359 +{
   3.360 +	UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
   3.361 +	return vcpu_set_psr_l(vcpu,val);
   3.362 +}
   3.363 +
   3.364 +/**********************************
   3.365 + * Moves from privileged registers
   3.366 + **********************************/
   3.367 +
   3.368 +IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
   3.369 +{
   3.370 +	UINT64 val;
   3.371 +	IA64FAULT fault;
   3.372 +	
   3.373 +	if (inst.M43.r1 > 63) { // privified mov from cpuid
   3.374 +		fault = vcpu_get_cpuid(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   3.375 +		if (fault == IA64_NO_FAULT)
   3.376 +			return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
   3.377 +	}
   3.378 +	else {
   3.379 +		fault = vcpu_get_rr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   3.380 +		if (fault == IA64_NO_FAULT)
   3.381 +			return vcpu_set_gr(vcpu, inst.M43.r1, val);
   3.382 +	}
   3.383 +	return fault;
   3.384 +}
   3.385 +
   3.386 +IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
   3.387 +{
   3.388 +	UINT64 val;
   3.389 +	IA64FAULT fault;
   3.390 +	
   3.391 +	fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   3.392 +	if (fault == IA64_NO_FAULT)
   3.393 +		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   3.394 +	else return fault;
   3.395 +}
   3.396 +
   3.397 +IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
   3.398 +{
   3.399 +	UINT64 val;
   3.400 +	IA64FAULT fault;
   3.401 +	
   3.402 +	fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   3.403 +	if (fault == IA64_NO_FAULT)
   3.404 +		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   3.405 +	else return fault;
   3.406 +}
   3.407 +
   3.408 +IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
   3.409 +{
   3.410 +	UINT64 val;
   3.411 +	IA64FAULT fault;
   3.412 +	
   3.413 +	fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   3.414 +	if (fault == IA64_NO_FAULT)
   3.415 +		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   3.416 +	else return fault;
   3.417 +}
   3.418 +
   3.419 +IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
   3.420 +{
   3.421 +	UINT64 val;
   3.422 +	IA64FAULT fault;
   3.423 +	
   3.424 +	fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
   3.425 +	if (fault == IA64_NO_FAULT)
   3.426 +		return vcpu_set_gr(vcpu, inst.M43.r1, val);
   3.427 +	else return fault;
   3.428 +}
   3.429 +
   3.430 +unsigned long from_cr_cnt[128] = { 0 };
   3.431 +
   3.432 +#define cr_get(cr) \
   3.433 +	((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
   3.434 +		vcpu_set_gr(vcpu, tgt, val) : fault;
   3.435 +	
   3.436 +IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
   3.437 +{
   3.438 +	UINT64 tgt = inst.M33.r1;
   3.439 +	UINT64 val;
   3.440 +	IA64FAULT fault;
   3.441 +
   3.442 +	from_cr_cnt[inst.M33.cr3]++;
   3.443 +	switch (inst.M33.cr3) {
   3.444 +	    case 0: return cr_get(dcr);
   3.445 +	    case 1: return cr_get(itm);
   3.446 +	    case 2: return cr_get(iva);
   3.447 +	    case 8: return cr_get(pta);
   3.448 +	    case 16:return cr_get(ipsr);
   3.449 +	    case 17:return cr_get(isr);
   3.450 +	    case 19:return cr_get(iip);
   3.451 +	    case 20:return cr_get(ifa);
   3.452 +	    case 21:return cr_get(itir);
   3.453 +	    case 22:return cr_get(iipa);
   3.454 +	    case 23:return cr_get(ifs);
   3.455 +	    case 24:return cr_get(iim);
   3.456 +	    case 25:return cr_get(iha);
   3.457 +	    case 64:return cr_get(lid);
   3.458 +	    case 65:return cr_get(ivr);
   3.459 +	    case 66:return cr_get(tpr);
   3.460 +	    case 67:return vcpu_set_gr(vcpu,tgt,0L);
   3.461 +	    case 68:return cr_get(irr0);
   3.462 +	    case 69:return cr_get(irr1);
   3.463 +	    case 70:return cr_get(irr2);
   3.464 +	    case 71:return cr_get(irr3);
   3.465 +	    case 72:return cr_get(itv);
   3.466 +	    case 73:return cr_get(pmv);
   3.467 +	    case 74:return cr_get(cmcv);
   3.468 +	    case 80:return cr_get(lrr0);
   3.469 +	    case 81:return cr_get(lrr1);
   3.470 +	    default: return IA64_ILLOP_FAULT;
   3.471 +	}
   3.472 +	return IA64_ILLOP_FAULT;
   3.473 +}
   3.474 +
   3.475 +IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
   3.476 +{
   3.477 +	UINT64 tgt = inst.M33.r1;
   3.478 +	UINT64 val;
   3.479 +	IA64FAULT fault;
   3.480 +
   3.481 +	if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
   3.482 +		return vcpu_set_gr(vcpu, tgt, val);
   3.483 +	else return fault;
   3.484 +}
   3.485 +
   3.486 +/**************************************************************************
   3.487 +Privileged operation decode and dispatch routines
   3.488 +**************************************************************************/
   3.489 +
   3.490 +IA64_SLOT_TYPE slot_types[0x20][3] = {
   3.491 +	{M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
   3.492 +	{M, I, ILLEGAL}, {M, I, ILLEGAL},
   3.493 +	{ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
   3.494 +	{M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
   3.495 +	{M, F, I}, {M, F, I},
   3.496 +	{M, M, F}, {M, M, F},
   3.497 +	{M, I, B}, {M, I, B},
   3.498 +	{M, B, B}, {M, B, B},
   3.499 +	{ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
   3.500 +	{B, B, B}, {B, B, B},
   3.501 +	{M, M, B}, {M, M, B},
   3.502 +	{ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
   3.503 +	{M, F, B}, {M, F, B},
   3.504 +	{ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
   3.505 +};
   3.506 +
   3.507 +// pointer to privileged emulation function
   3.508 +typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
   3.509 +
   3.510 +PPEFCN Mpriv_funcs[64] = {
   3.511 +  priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
   3.512 +  priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
   3.513 +  0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
   3.514 +  priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
   3.515 +  priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
   3.516 +  priv_mov_from_pmc, 0, 0, 0,
   3.517 +  0, 0, 0, 0,
   3.518 +  0, 0, priv_tpa, priv_tak,
   3.519 +  0, 0, 0, 0,
   3.520 +  priv_mov_from_cr, priv_mov_from_psr, 0, 0,
   3.521 +  0, 0, 0, 0,
   3.522 +  priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
   3.523 +  0, 0, 0, 0,
   3.524 +  priv_ptc_e, 0, 0, 0,
   3.525 +  0, 0, 0, 0, 0, 0, 0, 0
   3.526 +};
   3.527 +
   3.528 +struct {
   3.529 +	unsigned long mov_to_ar_imm;
   3.530 +	unsigned long mov_to_ar_reg;
   3.531 +	unsigned long ssm;
   3.532 +	unsigned long rsm;
   3.533 +	unsigned long rfi;
   3.534 +	unsigned long bsw0;
   3.535 +	unsigned long bsw1;
   3.536 +	unsigned long cover;
   3.537 +	unsigned long Mpriv_cnt[64];
   3.538 +} privcnt = { 0 };
   3.539 +
   3.540 +unsigned long privop_trace = 0;
   3.541 +
   3.542 +IA64FAULT
   3.543 +priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
   3.544 +{
   3.545 +	IA64_BUNDLE bundle, __get_domain_bundle(UINT64);
   3.546 +	int slot;
   3.547 +	IA64_SLOT_TYPE slot_type;
   3.548 +	INST64 inst;
   3.549 +	PPEFCN pfunc;
   3.550 +	unsigned long ipsr = regs->cr_ipsr;
   3.551 +	UINT64 iip = regs->cr_iip;
   3.552 +	int x6;
   3.553 +	
   3.554 +	// make a local copy of the bundle containing the privop
   3.555 +#if 1
   3.556 +	bundle = __get_domain_bundle(iip);
   3.557 +	if (!bundle.i64[0] && !bundle.i64[1]) return IA64_RETRY;
   3.558 +#else
   3.559 +#ifdef AVOIDING_POSSIBLE_DOMAIN_TLB_MISS
   3.560 +	//TODO: this needs to check for faults and behave accordingly
   3.561 +	if (!vcpu_get_iip_bundle(&bundle)) return IA64_DTLB_FAULT;
   3.562 +#else
   3.563 +if (iip < 0x10000) {
   3.564 + printf("priv_handle_op: unlikely iip=%p,b0=%p\n",iip,regs->b0);
   3.565 + dummy();
   3.566 +}
   3.567 +        bundle = *(IA64_BUNDLE *)iip;
   3.568 +#endif
   3.569 +#endif
   3.570 +#if 0
   3.571 +	if (iip==0xa000000100001820) {
   3.572 +		static int firstpagefault = 1;
   3.573 +		if (firstpagefault) {
   3.574 +			printf("*** First time to domain page fault!\n");				firstpagefault=0;
   3.575 +		}
   3.576 +	}
   3.577 +#endif
   3.578 +	if (privop_trace) {
   3.579 +		static long i = 400;
   3.580 +		//if (i > 0) printf("privop @%p\n",iip);
   3.581 +		if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
   3.582 +			iip,ia64_get_itc(),ia64_get_itm());
   3.583 +		i--;
   3.584 +	}
   3.585 +	slot = ((struct ia64_psr *)&ipsr)->ri;
   3.586 +	if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
   3.587 +	else if (slot == 1)
   3.588 +		inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
   3.589 +	else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41; 
   3.590 +	else printf("priv_handle_op: illegal slot: %d\n", slot);
   3.591 +
   3.592 +	slot_type = slot_types[bundle.template][slot];
   3.593 +	if (priv_verbose) {
   3.594 +		printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
   3.595 +		 iip, (UINT64)inst.inst, slot, slot_type);
   3.596 +	}
   3.597 +	if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
   3.598 +		// break instr for privified cover
   3.599 +	}
   3.600 +	else if (privlvl != 2) return (IA64_ILLOP_FAULT);
   3.601 +	switch (slot_type) {
   3.602 +	    case M:
   3.603 +		if (inst.generic.major == 0) {
   3.604 +#if 0
   3.605 +			if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
   3.606 +				privcnt.cover++;
   3.607 +				return priv_cover(vcpu,inst);
   3.608 +			}
   3.609 +#endif
   3.610 +			if (inst.M29.x3 != 0) break;
   3.611 +			if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
   3.612 +				privcnt.mov_to_ar_imm++;
   3.613 +				return priv_mov_to_ar_imm(vcpu,inst);
   3.614 +			}
   3.615 +			if (inst.M44.x4 == 6) {
   3.616 +				privcnt.ssm++;
   3.617 +				return priv_ssm(vcpu,inst);
   3.618 +			}
   3.619 +			if (inst.M44.x4 == 7) {
   3.620 +				privcnt.rsm++;
   3.621 +				return priv_rsm(vcpu,inst);
   3.622 +			}
   3.623 +			break;
   3.624 +		}
   3.625 +		else if (inst.generic.major != 1) break;
   3.626 +		x6 = inst.M29.x6;
   3.627 +		if (x6 == 0x2a) {
   3.628 +			privcnt.mov_to_ar_reg++;
   3.629 +			return priv_mov_to_ar_reg(vcpu,inst);
   3.630 +		}
   3.631 +		if (inst.M29.x3 != 0) break;
   3.632 +		if (!(pfunc = Mpriv_funcs[x6])) break;
   3.633 +		if (x6 == 0x1e || x6 == 0x1f)  { // tpa or tak are "special"
   3.634 +			if (inst.M46.r3 > 63) {
   3.635 +				if (x6 == 0x1e) x6 = 0x1b;
   3.636 +				else x6 = 0x1a;
   3.637 +			}
   3.638 +		}
   3.639 +		privcnt.Mpriv_cnt[x6]++;
   3.640 +		return (*pfunc)(vcpu,inst);
   3.641 +		break;
   3.642 +	    case B:
   3.643 +		if (inst.generic.major != 0) break;
   3.644 +		if (inst.B8.x6 == 0x08) {
   3.645 +			IA64FAULT fault;
   3.646 +			privcnt.rfi++;
   3.647 +			fault = priv_rfi(vcpu,inst);
   3.648 +			if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
   3.649 +			return fault;
   3.650 +		}
   3.651 +		if (inst.B8.x6 == 0x0c) {
   3.652 +			privcnt.bsw0++;
   3.653 +			return priv_bsw0(vcpu,inst);
   3.654 +		}
   3.655 +		if (inst.B8.x6 == 0x0d) {
   3.656 +			privcnt.bsw1++;
   3.657 +			return priv_bsw1(vcpu,inst);
   3.658 +		}
   3.659 +		if (inst.B8.x6 == 0x0) { // break instr for privified cover
   3.660 +			privcnt.cover++;
   3.661 +			return priv_cover(vcpu,inst);
   3.662 +		}
   3.663 +		break;
   3.664 +	    case I:
   3.665 +		if (inst.generic.major != 0) break;
   3.666 +#if 0
   3.667 +		if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
   3.668 +			privcnt.cover++;
   3.669 +			return priv_cover(vcpu,inst);
   3.670 +		}
   3.671 +#endif
   3.672 +		if (inst.I26.x3 != 0) break;  // I26.x3 == I27.x3
   3.673 +		if (inst.I26.x6 == 0x2a) {
   3.674 +			privcnt.mov_to_ar_reg++;
   3.675 +			return priv_mov_to_ar_reg(vcpu,inst);
   3.676 +		}
   3.677 +		if (inst.I27.x6 == 0x0a) {
   3.678 +			privcnt.mov_to_ar_imm++;
   3.679 +			return priv_mov_to_ar_imm(vcpu,inst);
   3.680 +		}
   3.681 +		break;
   3.682 +	    default:
   3.683 +		break;
   3.684 +	}
   3.685 +        //printf("We who are about do die salute you\n");
   3.686 +	printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
   3.687 +		 iip, (UINT64)inst.inst, slot, slot_type);
   3.688 +        //printf("vtop(0x%lx)==0x%lx\r\n", iip, tr_vtop(iip));
   3.689 +        //thread_mozambique("privop fault\n");
   3.690 +	return (IA64_ILLOP_FAULT);
   3.691 +}
   3.692 +
   3.693 +/** Emulate a privileged operation.
   3.694 + *
   3.695 + * This should probably return 0 on success and the "trap number"
   3.696 + * (e.g. illegal operation for bad register, priv op for an
   3.697 + * instruction that isn't allowed, etc.) on "failure"
   3.698 + *
   3.699 + * @param vcpu virtual cpu
   3.700 + * @param isrcode interrupt service routine code
   3.701 + * @return fault
   3.702 + */
   3.703 +IA64FAULT
   3.704 +priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
   3.705 +{
   3.706 +	IA64FAULT fault;
   3.707 +	UINT64 ipsr = regs->cr_ipsr;
   3.708 +	UINT64 isrcode = (isr >> 4) & 0xf;
   3.709 +	int privlvl;
   3.710 +
   3.711 +	// handle privops masked as illops? and breaks (6)
   3.712 +	if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
   3.713 +        	printf("priv_emulate: isrcode != 0 or 1 or 2\n");
   3.714 +		printf("priv_emulate: returning ILLOP, not implemented!\n");
   3.715 +		while (1);
   3.716 +		return IA64_ILLOP_FAULT;
   3.717 +	}
   3.718 +	//if (isrcode != 1 && isrcode != 2) return 0;
   3.719 +	vcpu_set_regs(vcpu,regs);
   3.720 +	privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
   3.721 +	// its OK for a privified-cover to be executed in user-land
   3.722 +	fault = priv_handle_op(vcpu,regs,privlvl);
   3.723 +	if (fault == IA64_NO_FAULT) { // success!!
   3.724 +		// update iip/ipsr to point to the next instruction
   3.725 +		(void)vcpu_increment_iip(vcpu);
   3.726 +	}
   3.727 +	else if (fault == IA64_EXTINT_VECTOR) {
   3.728 +		// update iip/ipsr before delivering interrupt
   3.729 +		(void)vcpu_increment_iip(vcpu);
   3.730 +	}
   3.731 +	else if (fault == IA64_RFI_IN_PROGRESS) return fault;
   3.732 +		// success but don't update to next instruction
   3.733 +        else if (fault == IA64_RETRY) {
   3.734 +            //printf("Priv emulate gets IA64_RETRY\n");
   3.735 +	    //printf("priv_emulate: returning RETRY, not implemented!\n");
   3.736 +	    //while (1);
   3.737 +	    // don't update iip/ipsr, deliver 
   3.738 +	
   3.739 +            vcpu_force_data_miss(vcpu,regs->cr_iip);
   3.740 +	    return IA64_RETRY;
   3.741 +        }
   3.742 +	else if (priv_verbose) printf("unhandled operation from handle_op\n");
   3.743 +//	if (fault == IA64_ILLOP_FAULT) {
   3.744 +//		printf("priv_emulate: returning ILLOP, not implemented!\n");
   3.745 +//		while (1);
   3.746 +//	}
   3.747 +	return fault;
   3.748 +}
   3.749 +
   3.750 +
   3.751 +/**************************************************************************
   3.752 +Privileged operation instrumentation routines
   3.753 +**************************************************************************/
   3.754 +
   3.755 +char *Mpriv_str[64] = {
   3.756 +  "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
   3.757 +  "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
   3.758 +  "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
   3.759 +  "ptr_d", "ptr_i", "itr_d", "itr_i",
   3.760 +  "mov_from_rr", "mov_from_dbr", "mov_from_ibr", "mov_from_pkr",
   3.761 +  "mov_from_pmc", "<0x15>", "<0x16>", "<0x17>",
   3.762 +  "<0x18>", "<0x19>", "privified-thash", "privified-ttag",
   3.763 +  "<0x1c>", "<0x1d>", "tpa", "tak",
   3.764 +  "<0x20>", "<0x21>", "<0x22>", "<0x23>",
   3.765 +  "mov_from_cr", "mov_from_psr", "<0x26>", "<0x27>",
   3.766 +  "<0x28>", "<0x29>", "<0x2a>", "<0x2b>",
   3.767 +  "mov_to_cr", "mov_to_psr", "itc_d", "itc_i",
   3.768 +  "<0x30>", "<0x31>", "<0x32>", "<0x33>",
   3.769 +  "ptc_e", "<0x35>", "<0x36>", "<0x37>",
   3.770 +  "<0x38>", "<0x39>", "<0x3a>", "<0x3b>",
   3.771 +  "<0x3c>", "<0x3d>", "<0x3e>", "<0x3f>"
   3.772 +};
   3.773 +
   3.774 +#define RS "Rsvd"
   3.775 +char *cr_str[128] = {
   3.776 +  "dcr","itm","iva",RS,RS,RS,RS,RS,
   3.777 +  "pta",RS,RS,RS,RS,RS,RS,RS,
   3.778 +  "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
   3.779 +  "iim","iha",RS,RS,RS,RS,RS,RS,
   3.780 +  RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
   3.781 +  RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
   3.782 +  "lid","ivr","tpr","eoi","irr0","irr1","irr2","irr3",
   3.783 +  "itv","pmv","cmcv",RS,RS,RS,RS,RS,
   3.784 +  "lrr0","lrr1",RS,RS,RS,RS,RS,RS,
   3.785 +  RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
   3.786 +  RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
   3.787 +  RS,RS,RS,RS,RS,RS,RS,RS
   3.788 +};
   3.789 +
   3.790 +void dump_privop_counts(void)
   3.791 +{
   3.792 +	int i, j;
   3.793 +	UINT64 sum = 0;
   3.794 +
   3.795 +	// this is ugly and should probably produce sorted output
   3.796 +	// but it will have to do for now
   3.797 +	sum += privcnt.mov_to_ar_imm; sum += privcnt.mov_to_ar_reg;
   3.798 +	sum += privcnt.ssm; sum += privcnt.rsm;
   3.799 +	sum += privcnt.rfi; sum += privcnt.bsw0;
   3.800 +	sum += privcnt.bsw1; sum += privcnt.cover;
   3.801 +	for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
   3.802 +	printf("Privop statistics: (Total privops: %ld)\r\n",sum);
   3.803 +	if (privcnt.mov_to_ar_imm)
   3.804 +		printf("%10d  %s [%d%%]\r\n", privcnt.mov_to_ar_imm,
   3.805 +			"mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
   3.806 +	if (privcnt.mov_to_ar_reg)
   3.807 +		printf("%10d  %s [%d%%]\r\n", privcnt.mov_to_ar_reg,
   3.808 +			"mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
   3.809 +	if (privcnt.ssm)
   3.810 +		printf("%10d  %s [%d%%]\r\n", privcnt.ssm,
   3.811 +			"ssm", (privcnt.ssm*100L)/sum);
   3.812 +	if (privcnt.rsm)
   3.813 +		printf("%10d  %s [%d%%]\r\n", privcnt.rsm,
   3.814 +			"rsm", (privcnt.rsm*100L)/sum);
   3.815 +	if (privcnt.rfi)
   3.816 +		printf("%10d  %s [%d%%]\r\n", privcnt.rfi,
   3.817 +			"rfi", (privcnt.rfi*100L)/sum);
   3.818 +	if (privcnt.bsw0)
   3.819 +		printf("%10d  %s [%d%%]\r\n", privcnt.bsw0,
   3.820 +			"bsw0", (privcnt.bsw0*100L)/sum);
   3.821 +	if (privcnt.bsw1)
   3.822 +		printf("%10d  %s [%d%%]\r\n", privcnt.bsw1,
   3.823 +			"bsw1", (privcnt.bsw1*100L)/sum);
   3.824 +	if (privcnt.cover)
   3.825 +		printf("%10d  %s [%d%%]\r\n", privcnt.cover,
   3.826 +			"cover", (privcnt.cover*100L)/sum);
   3.827 +	for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
   3.828 +		if (!Mpriv_str[i]) printf("PRIVSTRING NULL!!\r\n");
   3.829 +		else printf("%10d  %s [%d%%]\r\n", privcnt.Mpriv_cnt[i],
   3.830 +			Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
   3.831 +		if (i == 0x24) { // mov from CR
   3.832 +			printf("            [");
   3.833 +			for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
   3.834 +				if (!cr_str[j])
   3.835 +					printf("PRIVSTRING NULL!!\r\n");
   3.836 +				printf("%s(%d),",cr_str[j],from_cr_cnt[j]);
   3.837 +			}
   3.838 +			printf("]\r\n");
   3.839 +		}
   3.840 +		else if (i == 0x2c) { // mov to CR
   3.841 +			printf("            [");
   3.842 +			for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
   3.843 +				if (!cr_str[j])
   3.844 +					printf("PRIVSTRING NULL!!\r\n");
   3.845 +				printf("%s(%d),",cr_str[j],to_cr_cnt[j]);
   3.846 +			}
   3.847 +			printf("]\r\n");
   3.848 +		}
   3.849 +	}
   3.850 +}
   3.851 +
   3.852 +void zero_privop_counts(void)
   3.853 +{
   3.854 +	int i, j;
   3.855 +
   3.856 +	// this is ugly and should probably produce sorted output
   3.857 +	// but it will have to do for now
   3.858 +	printf("Zeroing privop statistics\r\n");
   3.859 +	privcnt.mov_to_ar_imm = 0; privcnt.mov_to_ar_reg = 0;
   3.860 +	privcnt.ssm = 0; privcnt.rsm = 0;
   3.861 +	privcnt.rfi = 0; privcnt.bsw0 = 0;
   3.862 +	privcnt.bsw1 = 0; privcnt.cover = 0;
   3.863 +	for (i=0; i < 64; i++) privcnt.Mpriv_cnt[i] = 0;
   3.864 +	for (j=0; j < 128; j++) from_cr_cnt[j] = 0;
   3.865 +	for (j=0; j < 128; j++) to_cr_cnt[j] = 0;
   3.866 +}
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/arch/ia64/process.c	Mon Nov 22 23:08:21 2004 +0000
     4.3 @@ -0,0 +1,836 @@
     4.4 +/*
     4.5 + * Miscellaneous process/domain related routines
     4.6 + * 
     4.7 + * Copyright (C) 2004 Hewlett-Packard Co.
     4.8 + *	Dan Magenheimer (dan.magenheimer@hp.com)
     4.9 + *
    4.10 + */
    4.11 +
    4.12 +#include <xen/config.h>
    4.13 +#include <xen/lib.h>
    4.14 +#include <xen/errno.h>
    4.15 +#include <xen/sched.h>
    4.16 +#include <xen/smp.h>
    4.17 +#include <asm/ptrace.h>
    4.18 +#include <xen/delay.h>
    4.19 +
    4.20 +#include <linux/efi.h>	/* FOR EFI_UNIMPLEMENTED */
    4.21 +#include <asm/sal.h>	/* FOR struct ia64_sal_retval */
    4.22 +
    4.23 +#include <asm/system.h>
    4.24 +#include <asm/io.h>
    4.25 +#include <asm/processor.h>
    4.26 +#include <asm/desc.h>
    4.27 +#include <asm/ldt.h>
    4.28 +#include <xen/irq.h>
    4.29 +#include <xen/event.h>
    4.30 +#include <asm/regionreg.h>
    4.31 +#include <asm/privop.h>
    4.32 +#include <asm/vcpu.h>
    4.33 +#include <asm/ia64_int.h>
    4.34 +#include <asm/hpsim_ssc.h>
    4.35 +#include <asm/dom_fw.h>
    4.36 +
    4.37 +extern struct ia64_sal_retval pal_emulator_static(UINT64);
    4.38 +extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
    4.39 +
    4.40 +extern unsigned long dom0_start, dom0_size;
    4.41 +
    4.42 +#define IA64_PSR_CPL1	(__IA64_UL(1) << IA64_PSR_CPL1_BIT)
    4.43 +// note IA64_PSR_PK removed from following, why is this necessary?
    4.44 +#define	DELIVER_PSR_SET	(IA64_PSR_IC | IA64_PSR_I | \
    4.45 +			IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
    4.46 +			IA64_PSR_IT | IA64_PSR_BN)
    4.47 +
    4.48 +#define	DELIVER_PSR_CLR	(IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
    4.49 +			IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI |	\
    4.50 +			IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
    4.51 +			IA64_PSR_CPL | IA64_PSR_MC | IA64_PSR_IS | \
    4.52 +			IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
    4.53 +			IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
    4.54 +
    4.55 +#define PSCB(x)	x->shared_info->arch
    4.56 +
    4.57 +extern unsigned long vcpu_verbose;
    4.58 +
    4.59 +long do_iopl(domid_t domain, unsigned int new_io_pl)
    4.60 +{
    4.61 +	dummy();
    4.62 +	return 0;
    4.63 +}
    4.64 +
    4.65 +void schedule_tail(struct domain *next)
    4.66 +{
    4.67 +	unsigned long rr7;
    4.68 +	printk("current=%lx,shared_info=%lx\n",current,current->shared_info);
    4.69 +	printk("next=%lx,shared_info=%lx\n",next,next->shared_info);
    4.70 +	if (rr7 = load_region_regs(current)) {
    4.71 +		printk("schedule_tail: change to rr7 not yet implemented\n");
    4.72 +	}
    4.73 +}
    4.74 +
    4.75 +extern TR_ENTRY *match_tr(struct domain *d, unsigned long ifa);
    4.76 +
    4.77 +void tdpfoo(void) { }
    4.78 +
    4.79 +// given a domain virtual address, pte and pagesize, extract the metaphysical
    4.80 +// address, convert the pte for a physical address for (possibly different)
    4.81 +// Xen PAGE_SIZE and return modified pte.  (NOTE: TLB insert should use
    4.82 +// PAGE_SIZE!)
    4.83 +unsigned long translate_domain_pte(unsigned long pteval,
    4.84 +	unsigned long address, unsigned long itir)
    4.85 +{
    4.86 +	struct domain *d = (struct domain *) current;
    4.87 +	unsigned long mask, pteval2, mpaddr;
    4.88 +	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
    4.89 +	extern struct domain *dom0;
    4.90 +	extern unsigned long dom0_start, dom0_size;
    4.91 +
    4.92 +	// FIXME address had better be pre-validated on insert
    4.93 +	mask = (1L << ((itir >> 2) & 0x3f)) - 1;
    4.94 +	mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
    4.95 +	if (d == dom0) {
    4.96 +		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
    4.97 +			//printk("translate_domain_pte: out-of-bounds dom0 mpaddr %p! itc=%lx...\n",mpaddr,ia64_get_itc());
    4.98 +			tdpfoo();
    4.99 +		}
   4.100 +	}
   4.101 +	else if ((mpaddr >> PAGE_SHIFT) > d->max_pages) {
   4.102 +		printf("translate_domain_pte: bad mpa=%p (> %p),vadr=%p,pteval=%p,itir=%p\n",
   4.103 +			mpaddr,d->max_pages<<PAGE_SHIFT,address,pteval,itir);
   4.104 +		tdpfoo();
   4.105 +	}
   4.106 +	pteval2 = lookup_domain_mpa(d,mpaddr);
   4.107 +	pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
   4.108 +	pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
   4.109 +	pteval2 = (pteval & ~_PAGE_PPN_MASK) | pteval2;
   4.110 +	return pteval2;
   4.111 +}
   4.112 +
   4.113 +// given a current domain metaphysical address, return the physical address
   4.114 +unsigned long translate_domain_mpaddr(unsigned long mpaddr)
   4.115 +{
   4.116 +	extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   4.117 +	unsigned long pteval;
   4.118 +
   4.119 +	if (current == dom0) {
   4.120 +		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   4.121 +			printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
   4.122 +			tdpfoo();
   4.123 +		}
   4.124 +	}
   4.125 +	pteval = lookup_domain_mpa(current,mpaddr);
   4.126 +	return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
   4.127 +}
   4.128 +
   4.129 +void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
   4.130 +{
   4.131 +	unsigned long vcpu_get_ipsr_int_state(struct domain *,unsigned long);
   4.132 +	unsigned long vcpu_get_rr_ve(struct domain *,unsigned long);
   4.133 +	unsigned long vcpu_get_itir_on_fault(struct domain *,unsigned long);
   4.134 +	struct domain *d = (struct domain *) current;
   4.135 +
   4.136 +	if (vector == IA64_EXTINT_VECTOR) {
   4.137 +		
   4.138 +		extern unsigned long vcpu_verbose, privop_trace;
   4.139 +		static first_extint = 1;
   4.140 +		if (first_extint) {
   4.141 +			printf("Delivering first extint to domain: ifa=%p, isr=%p, itir=%p, iip=%p\n",ifa,isr,itiriim,regs->cr_iip);
   4.142 +			//privop_trace = 1; vcpu_verbose = 1;
   4.143 +			first_extint = 0;
   4.144 +		}
   4.145 +	}
   4.146 +	if (!PSCB(d).interrupt_collection_enabled) {
   4.147 +		if (!(PSCB(d).ipsr & IA64_PSR_DT)) {
   4.148 +			printf("psr.dt off, trying to deliver nested dtlb!\n");
   4.149 +			while(1);
   4.150 +		}
   4.151 +		vector &= ~0xf;
   4.152 +		if (vector != IA64_DATA_TLB_VECTOR &&
   4.153 +		    vector != IA64_DATA_TLB_VECTOR) {
   4.154 +printf("psr.ic off, delivering fault=%lx,iip=%p,isr=%p,PSCB.iip=%p\n",
   4.155 +	vector,regs->cr_iip,isr,PSCB(d).iip);
   4.156 +			while(1);
   4.157 +			
   4.158 +		}
   4.159 +//printf("Delivering NESTED DATA TLB fault\n");
   4.160 +		vector = IA64_DATA_NESTED_TLB_VECTOR;
   4.161 +		regs->cr_iip = ((unsigned long) PSCB(d).iva + vector) & ~0xffUL;
   4.162 +		regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   4.163 +// NOTE: nested trap must NOT pass PSCB address
   4.164 +		//regs->r31 = (unsigned long) &PSCB(d);
   4.165 +		return;
   4.166 +
   4.167 +	}
   4.168 +	if ((vector & 0xf) != IA64_FORCED_IFA) PSCB(d).ifa = ifa;
   4.169 +	else ifa = PSCB(d).ifa;
   4.170 +	vector &= ~0xf;
   4.171 +//	always deliver on ALT vector (for now?) because no VHPT
   4.172 +//	if (!vcpu_get_rr_ve(d,ifa)) {
   4.173 +		if (vector == IA64_DATA_TLB_VECTOR)
   4.174 +			vector = IA64_ALT_DATA_TLB_VECTOR;
   4.175 +		else if (vector == IA64_INST_TLB_VECTOR)
   4.176 +			vector = IA64_ALT_INST_TLB_VECTOR;
   4.177 +//	}
   4.178 +	PSCB(d).unat = regs->ar_unat;  // not sure if this is really needed?
   4.179 +	PSCB(d).precover_ifs = regs->cr_ifs;
   4.180 +	vcpu_bsw0(d);
   4.181 +	PSCB(d).ipsr = vcpu_get_ipsr_int_state(d,regs->cr_ipsr);
   4.182 +	if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
   4.183 +		PSCB(d).iim = itiriim;
   4.184 +	else PSCB(d).itir = vcpu_get_itir_on_fault(d,ifa);
   4.185 +	PSCB(d).isr = isr; // this is unnecessary except for interrupts!
   4.186 +	PSCB(d).iip = regs->cr_iip;
   4.187 +	PSCB(d).ifs = 0;
   4.188 +	PSCB(d).incomplete_regframe = 0;
   4.189 +
   4.190 +	regs->cr_iip = ((unsigned long) PSCB(d).iva + vector) & ~0xffUL;
   4.191 +	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   4.192 +// FIXME: NEED TO PASS PSCB, BUT **NOT** IN R31 WHICH IS BEING USED FOR ar.pr
   4.193 +// IN ANY CASE, PASS PINNED ADDRESS, NOT THIS ONE
   4.194 +	//regs->r31 = (unsigned long) &PSCB(d);
   4.195 +
   4.196 +	PSCB(d).interrupt_delivery_enabled = 0;
   4.197 +	PSCB(d).interrupt_collection_enabled = 0;
   4.198 +}
   4.199 +
   4.200 +void foodpi(void) {}
   4.201 +
   4.202 +// ONLY gets called from ia64_leave_kernel
   4.203 +// ONLY call with interrupts disabled?? (else might miss one?)
   4.204 +// NEVER successful if already reflecting a trap/fault because psr.i==0
   4.205 +void deliver_pending_interrupt(struct pt_regs *regs)
   4.206 +{
   4.207 +	struct domain *d = (struct domain *) current;
   4.208 +	// FIXME: Will this work properly if doing an RFI???
   4.209 +	if (!is_idle_task(d) && user_mode(regs)) {
   4.210 +		vcpu_poke_timer(d);
   4.211 +		if (vcpu_deliverable_interrupts(d)) {
   4.212 +			unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
   4.213 +			foodpi();
   4.214 +			reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
   4.215 +		}
   4.216 +	}
   4.217 +}
   4.218 +
   4.219 +int handle_lazy_cover(struct domain *d, unsigned long isr, struct pt_regs *regs)
   4.220 +{
   4.221 +	if (!PSCB(d).interrupt_collection_enabled) {
   4.222 +		if (isr & IA64_ISR_IR) {
   4.223 +//			printf("Handling lazy cover\n");
   4.224 +			PSCB(d).ifs = regs->cr_ifs;
   4.225 +			PSCB(d).incomplete_regframe = 1;
   4.226 +			regs->cr_ifs = 0;
   4.227 +			return(1); // retry same instruction with cr.ifs off
   4.228 +		}
   4.229 +	}
   4.230 +	return(0);
   4.231 +}
   4.232 +
   4.233 +#define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
   4.234 +
   4.235 +void xen_handle_domain_access(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
   4.236 +{
   4.237 +	struct domain *d = (struct domain *) current;
   4.238 +	TR_ENTRY *trp;
   4.239 +	unsigned long psr = regs->cr_ipsr, mask, flags;
   4.240 +	unsigned long iip = regs->cr_iip;
   4.241 +	// FIXME should validate address here
   4.242 +	unsigned long pteval, mpaddr;
   4.243 +	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   4.244 +	IA64FAULT fault;
   4.245 +	extern void __get_domain_bundle(void);
   4.246 +
   4.247 +// NEED TO HANDLE THREE CASES:
   4.248 +// 1) domain is in metaphysical mode
   4.249 +// 2) domain address is in TR
   4.250 +// 3) domain address is not in TR (reflect data miss)
   4.251 +
   4.252 +		// got here trying to read a privop bundle
   4.253 +	     	//if (d->metaphysical_mode) {
   4.254 +     	if (d->metaphysical_mode && !(address>>61)) {  //FIXME
   4.255 +		if (d == dom0) {
   4.256 +			if (address < dom0_start || address >= dom0_start + dom0_size) {
   4.257 +				printk("xen_handle_domain_access: out-of-bounds"
   4.258 +				   "dom0 mpaddr %p! continuing...\n",mpaddr);
   4.259 +				tdpfoo();
   4.260 +			}
   4.261 +		}
   4.262 +		pteval = lookup_domain_mpa(d,address);
   4.263 +		//FIXME: check return value?
   4.264 +		// would be nice to have a counter here
   4.265 +		vcpu_itc_no_srlz(d,2,address,pteval,PAGE_SHIFT);
   4.266 +		return;
   4.267 +	}
   4.268 +if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
   4.269 +	if (*(unsigned long *)__get_domain_bundle != iip) {
   4.270 +		printf("Bad user space access @%p ",address);
   4.271 +		printf("iip=%p, ipsr=%p, b0=%p\n",iip,psr,regs->b0);
   4.272 +		while(1);
   4.273 +	}
   4.274 +		
   4.275 +	fault = vcpu_tpa(d,address,&mpaddr);
   4.276 +	if (fault != IA64_NO_FAULT) {
   4.277 +		// this is hardcoded to handle __get_domain_bundle only
   4.278 +		regs->r8 = 0; regs->r9 = 0;
   4.279 +		regs->cr_iip += 0x20;
   4.280 +		//regs->cr_iip |= (2UL << IA64_PSR_RI_BIT);
   4.281 +		return;
   4.282 +	}
   4.283 +	if (d == dom0) {
   4.284 +		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   4.285 +			printk("xen_handle_domain_access: vcpu_tpa returned out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
   4.286 +			tdpfoo();
   4.287 +		}
   4.288 +	}
   4.289 +	pteval = lookup_domain_mpa(d,mpaddr);
   4.290 +	// would be nice to have a counter here
   4.291 +	//printf("Handling privop data TLB miss\n");
   4.292 +	// FIXME, must be inlined or potential for nested fault here!
   4.293 +	vcpu_itc_no_srlz(d,2,address,pteval,PAGE_SHIFT);
   4.294 +}
   4.295 +
   4.296 +void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
   4.297 +{
   4.298 +	struct domain *d = (struct domain *) current;
   4.299 +	TR_ENTRY *trp;
   4.300 +	unsigned long psr = regs->cr_ipsr, mask, flags;
   4.301 +	unsigned long iip = regs->cr_iip;
   4.302 +	// FIXME should validate address here
   4.303 +	unsigned long pteval, mpaddr;
   4.304 +	unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   4.305 +	unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
   4.306 +	unsigned long vector;
   4.307 +	IA64FAULT fault;
   4.308 +
   4.309 +
   4.310 +	//The right way is put in VHPT and take another miss!
   4.311 +
   4.312 +	// weak attempt to avoid doing both I/D tlb insert to avoid
   4.313 +	// problems for privop bundle fetch, doesn't work, deal with later
   4.314 +	if (IS_XEN_ADDRESS(d,iip) && !IS_XEN_ADDRESS(d,address)) {
   4.315 +		xen_handle_domain_access(address, isr, regs, itir);
   4.316 +
   4.317 +		return;
   4.318 +	}
   4.319 +
   4.320 +	// FIXME: no need to pass itir in to this routine as we need to
   4.321 +	// compute the virtual itir anyway (based on domain's RR.ps)
   4.322 +	// AND ACTUALLY reflect_interruption doesn't use it anyway!
   4.323 +	itir = vcpu_get_itir_on_fault(d,address);
   4.324 +
   4.325 +	if (d->metaphysical_mode && (is_data || !(address>>61))) {  //FIXME
   4.326 +		// FIXME should validate mpaddr here
   4.327 +		if (d == dom0) {
   4.328 +			if (address < dom0_start || address >= dom0_start + dom0_size) {
   4.329 +				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, iip=%p! continuing...\n",address,iip);
   4.330 +				printk("ia64_do_page_fault: out-of-bounds dom0 mpaddr %p, old iip=%p!\n",address,d->shared_info->arch.iip);
   4.331 +				tdpfoo();
   4.332 +			}
   4.333 +		}
   4.334 +		pteval = lookup_domain_mpa(d,address);
   4.335 +		// FIXME, must be inlined or potential for nested fault here!
   4.336 +		vcpu_itc_no_srlz(d,is_data?2:1,address,pteval,PAGE_SHIFT);
   4.337 +		return;
   4.338 +	}
   4.339 +	if (trp = match_tr(d,address)) {
   4.340 +		// FIXME address had better be pre-validated on insert
   4.341 +		pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
   4.342 +		vcpu_itc_no_srlz(d,is_data?2:1,address,pteval,(trp->itir>>2)&0x3f);
   4.343 +		return;
   4.344 +	}
   4.345 +	vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
   4.346 +	if (handle_lazy_cover(d, isr, regs)) return;
   4.347 +if (!(address>>61)) { printf("ia64_do_page_fault: @%p???, iip=%p, itc=%p (spinning...)\n",address,iip,ia64_get_itc()); while(1); }
   4.348 +	if ((isr & IA64_ISR_SP)
   4.349 +	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
   4.350 +	{
   4.351 +		/*
   4.352 +		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
   4.353 +		 * bit in the psr to ensure forward progress.  (Target register will get a
   4.354 +		 * NaT for ld.s, lfetch will be canceled.)
   4.355 +		 */
   4.356 +		ia64_psr(regs)->ed = 1;
   4.357 +		return;
   4.358 +	}
   4.359 +	reflect_interruption(address, isr, itir, regs, vector);
   4.360 +}
   4.361 +
   4.362 +void
   4.363 +ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
   4.364 +	    unsigned long iim, unsigned long itir, unsigned long arg5,
   4.365 +	    unsigned long arg6, unsigned long arg7, unsigned long stack)
   4.366 +{
   4.367 +	struct pt_regs *regs = (struct pt_regs *) &stack;
   4.368 +	unsigned long code, error = isr;
   4.369 +	char buf[128];
   4.370 +	int result, sig;
   4.371 +	static const char *reason[] = {
   4.372 +		"IA-64 Illegal Operation fault",
   4.373 +		"IA-64 Privileged Operation fault",
   4.374 +		"IA-64 Privileged Register fault",
   4.375 +		"IA-64 Reserved Register/Field fault",
   4.376 +		"Disabled Instruction Set Transition fault",
   4.377 +		"Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
   4.378 +		"Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
   4.379 +		"Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
   4.380 +	};
   4.381 +#if 0
   4.382 +printf("ia64_fault, vector=0x%p, ifa=%p, iip=%p, ipsr=%p, isr=%p\n",
   4.383 + vector, ifa, regs->cr_iip, regs->cr_ipsr, isr);
   4.384 +#endif
   4.385 +
   4.386 +	if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
   4.387 +		/*
   4.388 +		 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
   4.389 +		 * the lfetch.
   4.390 +		 */
   4.391 +		ia64_psr(regs)->ed = 1;
   4.392 +		printf("ia64_fault: handled lfetch.fault\n");
   4.393 +		return;
   4.394 +	}
   4.395 +
   4.396 +	switch (vector) {
   4.397 +	      case 24: /* General Exception */
   4.398 +		code = (isr >> 4) & 0xf;
   4.399 +		sprintf(buf, "General Exception: %s%s", reason[code],
   4.400 +			(code == 3) ? ((isr & (1UL << 37))
   4.401 +				       ? " (RSE access)" : " (data access)") : "");
   4.402 +		if (code == 8) {
   4.403 +# ifdef CONFIG_IA64_PRINT_HAZARDS
   4.404 +			printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
   4.405 +			       current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri,
   4.406 +			       regs->pr);
   4.407 +# endif
   4.408 +			printf("ia64_fault: returning on hazard\n");
   4.409 +			return;
   4.410 +		}
   4.411 +		break;
   4.412 +
   4.413 +	      case 25: /* Disabled FP-Register */
   4.414 +		if (isr & 2) {
   4.415 +			//disabled_fph_fault(regs);
   4.416 +			//return;
   4.417 +		}
   4.418 +		sprintf(buf, "Disabled FPL fault---not supposed to happen!");
   4.419 +		break;
   4.420 +
   4.421 +	      case 26: /* NaT Consumption */
   4.422 +		if (user_mode(regs)) {
   4.423 +			void *addr;
   4.424 +
   4.425 +			if (((isr >> 4) & 0xf) == 2) {
   4.426 +				/* NaT page consumption */
   4.427 +				//sig = SIGSEGV;
   4.428 +				//code = SEGV_ACCERR;
   4.429 +				addr = (void *) ifa;
   4.430 +			} else {
   4.431 +				/* register NaT consumption */
   4.432 +				//sig = SIGILL;
   4.433 +				//code = ILL_ILLOPN;
   4.434 +				addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
   4.435 +			}
   4.436 +			//siginfo.si_signo = sig;
   4.437 +			//siginfo.si_code = code;
   4.438 +			//siginfo.si_errno = 0;
   4.439 +			//siginfo.si_addr = addr;
   4.440 +			//siginfo.si_imm = vector;
   4.441 +			//siginfo.si_flags = __ISR_VALID;
   4.442 +			//siginfo.si_isr = isr;
   4.443 +			//force_sig_info(sig, &siginfo, current);
   4.444 +			//return;
   4.445 +		} //else if (ia64_done_with_exception(regs))
   4.446 +			//return;
   4.447 +		sprintf(buf, "NaT consumption");
   4.448 +		break;
   4.449 +
   4.450 +	      case 31: /* Unsupported Data Reference */
   4.451 +		if (user_mode(regs)) {
   4.452 +			//siginfo.si_signo = SIGILL;
   4.453 +			//siginfo.si_code = ILL_ILLOPN;
   4.454 +			//siginfo.si_errno = 0;
   4.455 +			//siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
   4.456 +			//siginfo.si_imm = vector;
   4.457 +			//siginfo.si_flags = __ISR_VALID;
   4.458 +			//siginfo.si_isr = isr;
   4.459 +			//force_sig_info(SIGILL, &siginfo, current);
   4.460 +			//return;
   4.461 +		}
   4.462 +		sprintf(buf, "Unsupported data reference");
   4.463 +		break;
   4.464 +
   4.465 +	      case 29: /* Debug */
   4.466 +	      case 35: /* Taken Branch Trap */
   4.467 +	      case 36: /* Single Step Trap */
   4.468 +		//if (fsys_mode(current, regs)) {}
   4.469 +		switch (vector) {
   4.470 +		      case 29:
   4.471 +			//siginfo.si_code = TRAP_HWBKPT;
   4.472 +#ifdef CONFIG_ITANIUM
   4.473 +			/*
   4.474 +			 * Erratum 10 (IFA may contain incorrect address) now has
   4.475 +			 * "NoFix" status.  There are no plans for fixing this.
   4.476 +			 */
   4.477 +			if (ia64_psr(regs)->is == 0)
   4.478 +			  ifa = regs->cr_iip;
   4.479 +#endif
   4.480 +			break;
   4.481 +		      case 35: ifa = 0; break;
   4.482 +		      case 36: ifa = 0; break;
   4.483 +		      //case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
   4.484 +		      //case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
   4.485 +		}
   4.486 +		//siginfo.si_signo = SIGTRAP;
   4.487 +		//siginfo.si_errno = 0;
   4.488 +		//siginfo.si_addr  = (void *) ifa;
   4.489 +		//siginfo.si_imm   = 0;
   4.490 +		//siginfo.si_flags = __ISR_VALID;
   4.491 +		//siginfo.si_isr   = isr;
   4.492 +		//force_sig_info(SIGTRAP, &siginfo, current);
   4.493 +		//return;
   4.494 +
   4.495 +	      case 32: /* fp fault */
   4.496 +	      case 33: /* fp trap */
   4.497 +		//result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
   4.498 +		if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
   4.499 +			//siginfo.si_signo = SIGFPE;
   4.500 +			//siginfo.si_errno = 0;
   4.501 +			//siginfo.si_code = FPE_FLTINV;
   4.502 +			//siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
   4.503 +			//siginfo.si_flags = __ISR_VALID;
   4.504 +			//siginfo.si_isr = isr;
   4.505 +			//siginfo.si_imm = 0;
   4.506 +			//force_sig_info(SIGFPE, &siginfo, current);
   4.507 +		}
   4.508 +		//return;
   4.509 +		sprintf(buf, "FP fault/trap");
   4.510 +		break;
   4.511 +
   4.512 +	      case 34:
   4.513 +		if (isr & 0x2) {
   4.514 +			/* Lower-Privilege Transfer Trap */
   4.515 +			/*
   4.516 +			 * Just clear PSR.lp and then return immediately: all the
   4.517 +			 * interesting work (e.g., signal delivery is done in the kernel
   4.518 +			 * exit path).
   4.519 +			 */
   4.520 +			//ia64_psr(regs)->lp = 0;
   4.521 +			//return;
   4.522 +			sprintf(buf, "Lower-Privilege Transfer trap");
   4.523 +		} else {
   4.524 +			/* Unimplemented Instr. Address Trap */
   4.525 +			if (user_mode(regs)) {
   4.526 +				//siginfo.si_signo = SIGILL;
   4.527 +				//siginfo.si_code = ILL_BADIADDR;
   4.528 +				//siginfo.si_errno = 0;
   4.529 +				//siginfo.si_flags = 0;
   4.530 +				//siginfo.si_isr = 0;
   4.531 +				//siginfo.si_imm = 0;
   4.532 +				//siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
   4.533 +				//force_sig_info(SIGILL, &siginfo, current);
   4.534 +				//return;
   4.535 +			}
   4.536 +			sprintf(buf, "Unimplemented Instruction Address fault");
   4.537 +		}
   4.538 +		break;
   4.539 +
   4.540 +	      case 45:
   4.541 +		printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
   4.542 +		printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
   4.543 +		       regs->cr_iip, ifa, isr);
   4.544 +		//force_sig(SIGSEGV, current);
   4.545 +		break;
   4.546 +
   4.547 +	      case 46:
   4.548 +		printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
   4.549 +		printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
   4.550 +		       regs->cr_iip, ifa, isr, iim);
   4.551 +		//force_sig(SIGSEGV, current);
   4.552 +		return;
   4.553 +
   4.554 +	      case 47:
   4.555 +		sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
   4.556 +		break;
   4.557 +
   4.558 +	      default:
   4.559 +		sprintf(buf, "Fault %lu", vector);
   4.560 +		break;
   4.561 +	}
   4.562 +	//die_if_kernel(buf, regs, error);
   4.563 +printk("ia64_fault: %s: reflecting\n",buf);
   4.564 +reflect_interruption(ifa,isr,iim,regs,IA64_GENEX_VECTOR);
   4.565 +//while(1);
   4.566 +	//force_sig(SIGILL, current);
   4.567 +}
   4.568 +
   4.569 +unsigned long running_on_sim = 0;
   4.570 +
   4.571 +void
   4.572 +do_ssc(unsigned long ssc, struct pt_regs *regs)
   4.573 +{
   4.574 +	extern unsigned long lookup_domain_mpa(struct domain *,unsigned long);
   4.575 +	unsigned long arg0, arg1, arg2, arg3, retval;
   4.576 +	char buf[2];
   4.577 +/**/	static int last_fd, last_count;	// FIXME FIXME FIXME
   4.578 +/**/					// BROKEN FOR MULTIPLE DOMAINS & SMP
   4.579 +/**/	struct ssc_disk_stat { int fd; unsigned count;} *stat, last_stat;
   4.580 +	extern unsigned long vcpu_verbose, privop_trace;
   4.581 +
   4.582 +	arg0 = vcpu_get_gr(current,32);
   4.583 +	switch(ssc) {
   4.584 +	    case SSC_PUTCHAR:
   4.585 +		buf[0] = arg0;
   4.586 +		buf[1] = '\0';
   4.587 +		printf(buf);
   4.588 +		break;
   4.589 +	    case SSC_GETCHAR:
   4.590 +		retval = ia64_ssc(0,0,0,0,ssc);
   4.591 +		vcpu_set_gr(current,8,retval);
   4.592 +		break;
   4.593 +	    case SSC_WAIT_COMPLETION:
   4.594 +		if (arg0) {	// metaphysical address
   4.595 +
   4.596 +			arg0 = translate_domain_mpaddr(arg0);
   4.597 +/**/			stat = (struct ssc_disk_stat *)__va(arg0);
   4.598 +///**/			if (stat->fd == last_fd) stat->count = last_count;
   4.599 +/**/			stat->count = last_count;
   4.600 +//if (last_count >= PAGE_SIZE) printf("ssc_wait: stat->fd=%d,last_fd=%d,last_count=%d\n",stat->fd,last_fd,last_count);
   4.601 +///**/			retval = ia64_ssc(arg0,0,0,0,ssc);
   4.602 +/**/			retval = 0;
   4.603 +		}
   4.604 +		else retval = -1L;
   4.605 +		vcpu_set_gr(current,8,retval);
   4.606 +		break;
   4.607 +	    case SSC_OPEN:
   4.608 +		arg1 = vcpu_get_gr(current,33);	// access rights
   4.609 +if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware.  (ignoring...)\n"); arg0 = 0; }
   4.610 +		if (arg0) {	// metaphysical address
   4.611 +			arg0 = translate_domain_mpaddr(arg0);
   4.612 +			retval = ia64_ssc(arg0,arg1,0,0,ssc);
   4.613 +		}
   4.614 +		else retval = -1L;
   4.615 +		vcpu_set_gr(current,8,retval);
   4.616 +		break;
   4.617 +	    case SSC_WRITE:
   4.618 +	    case SSC_READ:
   4.619 +//if (ssc == SSC_WRITE) printf("DOING AN SSC_WRITE\n");
   4.620 +		arg1 = vcpu_get_gr(current,33);
   4.621 +		arg2 = vcpu_get_gr(current,34);
   4.622 +		arg3 = vcpu_get_gr(current,35);
   4.623 +		if (arg2) {	// metaphysical address of descriptor
   4.624 +			struct ssc_disk_req *req;
   4.625 +			unsigned long mpaddr, paddr;
   4.626 +			long len;
   4.627 +
   4.628 +			arg2 = translate_domain_mpaddr(arg2);
   4.629 +			req = (struct disk_req *)__va(arg2);
   4.630 +			req->len &= 0xffffffffL;	// avoid strange bug
   4.631 +			len = req->len;
   4.632 +/**/			last_fd = arg1;
   4.633 +/**/			last_count = len;
   4.634 +			mpaddr = req->addr;
   4.635 +//if (last_count >= PAGE_SIZE) printf("do_ssc: read fd=%d, addr=%p, len=%lx ",last_fd,mpaddr,len);
   4.636 +			retval = 0;
   4.637 +			if ((mpaddr & PAGE_MASK) != ((mpaddr+len-1) & PAGE_MASK)) {
   4.638 +				// do partial page first
   4.639 +				req->addr = translate_domain_mpaddr(mpaddr);
   4.640 +				req->len = PAGE_SIZE - (req->addr & ~PAGE_MASK);
   4.641 +				len -= req->len; mpaddr += req->len;
   4.642 +				retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   4.643 +				arg3 += req->len; // file offset
   4.644 +/**/				last_stat.fd = last_fd;
   4.645 +/**/				(void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
   4.646 +//if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)[part]=%x ",req->addr,req->len,retval);
   4.647 +			}
   4.648 +			if (retval >= 0) while (len > 0) {
   4.649 +				req->addr = translate_domain_mpaddr(mpaddr);
   4.650 +				req->len = (len > PAGE_SIZE) ? PAGE_SIZE : len;
   4.651 +				len -= PAGE_SIZE; mpaddr += PAGE_SIZE;
   4.652 +				retval = ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   4.653 +				arg3 += req->len; // file offset
   4.654 +// TEMP REMOVED AGAIN				arg3 += req->len; // file offset
   4.655 +/**/				last_stat.fd = last_fd;
   4.656 +/**/				(void)ia64_ssc(__pa(&last_stat),0,0,0,SSC_WAIT_COMPLETION);
   4.657 +//if (last_count >= PAGE_SIZE) printf("ssc(%p,%lx)=%x ",req->addr,req->len,retval);
   4.658 +			}
   4.659 +			// set it back to the original value
   4.660 +			req->len = last_count;
   4.661 +		}
   4.662 +		else retval = -1L;
   4.663 +		vcpu_set_gr(current,8,retval);
   4.664 +//if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
   4.665 +		break;
   4.666 +	    case SSC_CONNECT_INTERRUPT:
   4.667 +		arg1 = vcpu_get_gr(current,33);
   4.668 +		arg2 = vcpu_get_gr(current,34);
   4.669 +		arg3 = vcpu_get_gr(current,35);
   4.670 +		if (!running_on_sim) { printf("SSC_CONNECT_INTERRUPT, not implemented on hardware.  (ignoring...)\n"); break; }
   4.671 +		(void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
   4.672 +		break;
   4.673 +	    case SSC_NETDEV_PROBE:
   4.674 +		vcpu_set_gr(current,8,-1L);
   4.675 +		break;
   4.676 +	    default:
   4.677 +		printf("ia64_handle_break: bad ssc code %lx\n",ssc);
   4.678 +		break;
   4.679 +	}
   4.680 +	vcpu_increment_iip(current);
   4.681 +}
   4.682 +
   4.683 +void fooefi(void) {}
   4.684 +
   4.685 +void
   4.686 +ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   4.687 +{
   4.688 +	static int first_time = 1;
   4.689 +	struct domain *d = (struct domain *) current;
   4.690 +	extern unsigned long running_on_sim;
   4.691 +
   4.692 +	if (first_time) {
   4.693 +		if (platform_is_hp_ski()) running_on_sim = 1;
   4.694 +		else running_on_sim = 0;
   4.695 +		first_time = 0;
   4.696 +	}
   4.697 +	if (iim == 0x80001 || iim == 0x80002) {	//FIXME: don't hardcode constant
   4.698 +		if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
   4.699 +		else do_ssc(vcpu_get_gr(current,36), regs);
   4.700 +	}
   4.701 +	else if (iim == d->breakimm) {
   4.702 +		struct ia64_sal_retval x;
   4.703 +		switch (regs->r2) {
   4.704 +		    case FW_HYPERCALL_PAL_CALL:
   4.705 +			//printf("*** PAL hypercall: index=%d\n",regs->r28);
   4.706 +			//FIXME: This should call a C routine
   4.707 +			x = pal_emulator_static(regs->r28);
   4.708 +			regs->r8 = x.status; regs->r9 = x.v0;
   4.709 +			regs->r10 = x.v1; regs->r11 = x.v2;
   4.710 +			break;
   4.711 +		    case FW_HYPERCALL_SAL_CALL:
   4.712 +			x = sal_emulator(vcpu_get_gr(d,32),vcpu_get_gr(d,33),
   4.713 +				vcpu_get_gr(d,34),vcpu_get_gr(d,35),
   4.714 +				vcpu_get_gr(d,36),vcpu_get_gr(d,37),
   4.715 +				vcpu_get_gr(d,38),vcpu_get_gr(d,39));
   4.716 +			regs->r8 = x.status; regs->r9 = x.v0;
   4.717 +			regs->r10 = x.v1; regs->r11 = x.v2;
   4.718 +			break;
   4.719 +		    case FW_HYPERCALL_EFI_RESET_SYSTEM:
   4.720 +			printf("efi.reset_system called ");
   4.721 +			if (current == dom0) {
   4.722 +				printf("(by dom0)\n ");
   4.723 +				(*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
   4.724 +			}
   4.725 +			printf("(not supported for non-0 domain)\n");
   4.726 +			regs->r8 = EFI_UNSUPPORTED;
   4.727 +			break;
   4.728 +		    case FW_HYPERCALL_EFI_GET_TIME:
   4.729 +			{
   4.730 +			unsigned long *tv, *tc;
   4.731 +			fooefi();
   4.732 +			tv = vcpu_get_gr(d,32);
   4.733 +			tc = vcpu_get_gr(d,33);
   4.734 +			//printf("efi_get_time(%p,%p) called...",tv,tc);
   4.735 +			tv = __va(translate_domain_mpaddr(tv));
   4.736 +			if (tc) tc = __va(translate_domain_mpaddr(tc));
   4.737 +			regs->r8 = (*efi.get_time)(tv,tc);
   4.738 +			//printf("and returns %lx\n",regs->r8);
   4.739 +			}
   4.740 +			break;
   4.741 +		    case FW_HYPERCALL_EFI_SET_TIME:
   4.742 +		    case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
   4.743 +		    case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
   4.744 +			// FIXME: need fixes in efi.h from 2.6.9
   4.745 +		    case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
   4.746 +			// FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED
   4.747 +			// SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS 
   4.748 +			// POINTER ARGUMENTS WILL BE VIRTUAL!!
   4.749 +		    case FW_HYPERCALL_EFI_GET_VARIABLE:
   4.750 +			// FIXME: need fixes in efi.h from 2.6.9
   4.751 +		    case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
   4.752 +		    case FW_HYPERCALL_EFI_SET_VARIABLE:
   4.753 +		    case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
   4.754 +			// FIXME: need fixes in efi.h from 2.6.9
   4.755 +			regs->r8 = EFI_UNSUPPORTED;
   4.756 +			break;
   4.757 +		}
   4.758 +		vcpu_increment_iip(current);
   4.759 +	}
   4.760 +	else reflect_interruption(ifa,isr,iim,regs,IA64_BREAK_VECTOR);
   4.761 +}
   4.762 +
   4.763 +void
   4.764 +ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir)
   4.765 +{
   4.766 +	IA64FAULT vector;
   4.767 +	struct domain *d = (struct domain *) current;
   4.768 +	// FIXME: no need to pass itir in to this routine as we need to
   4.769 +	// compute the virtual itir anyway (based on domain's RR.ps)
   4.770 +	// AND ACTUALLY reflect_interruption doesn't use it anyway!
   4.771 +	itir = vcpu_get_itir_on_fault(d,ifa);
   4.772 +	vector = priv_emulate((struct domain *)current,regs,isr);
   4.773 +	if (vector == IA64_RETRY) {
   4.774 +		reflect_interruption(ifa,isr,itir,regs,
   4.775 +			IA64_ALT_DATA_TLB_VECTOR | IA64_FORCED_IFA);
   4.776 +	}
   4.777 +	else if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
   4.778 +		reflect_interruption(ifa,isr,itir,regs,vector);
   4.779 +	}
   4.780 +}
   4.781 +
   4.782 +#define INTR_TYPE_MAX	10
   4.783 +UINT64 int_counts[INTR_TYPE_MAX];
   4.784 +
   4.785 +void
   4.786 +ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
   4.787 +{
   4.788 +	extern unsigned long vcpu_get_itir_on_fault(struct domain *vcpu, UINT64 ifa);
   4.789 +	struct domain *d = (struct domain *) current;
   4.790 +	unsigned long check_lazy_cover = 0;
   4.791 +	unsigned long psr = regs->cr_ipsr;
   4.792 +	unsigned long itir = vcpu_get_itir_on_fault(d,ifa);
   4.793 +
   4.794 +	if (!(psr & IA64_PSR_CPL)) {
   4.795 +		printf("ia64_handle_reflection: reflecting with priv=0!!\n");
   4.796 +		while(1);
   4.797 +	}
   4.798 +	// FIXME: no need to pass itir in to this routine as we need to
   4.799 +	// compute the virtual itir anyway (based on domain's RR.ps)
   4.800 +	// AND ACTUALLY reflect_interruption doesn't use it anyway!
   4.801 +	itir = vcpu_get_itir_on_fault(d,ifa);
   4.802 +	switch(vector) {
   4.803 +	    case 8:
   4.804 +		vector = IA64_DIRTY_BIT_VECTOR; break;
   4.805 +	    case 9:
   4.806 +		vector = IA64_INST_ACCESS_BIT_VECTOR; break;
   4.807 +	    case 10:
   4.808 +		check_lazy_cover = 1;
   4.809 +		vector = IA64_DATA_ACCESS_BIT_VECTOR; break;
   4.810 +	    case 22:
   4.811 +		vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break;
   4.812 +	    case 23:
   4.813 +		check_lazy_cover = 1;
   4.814 +		vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
   4.815 +	    case 25:
   4.816 +		vector = IA64_DISABLED_FPREG_VECTOR; break;
   4.817 +	    case 26:
   4.818 +printf("*** NaT fault... attempting to handle as privop\n");
   4.819 +		vector = priv_emulate(d,regs,isr);
   4.820 +		if (vector == IA64_NO_FAULT) {
   4.821 +printf("*** Handled privop masquerading as NaT fault\n");
   4.822 +			return;
   4.823 +		}
   4.824 +		vector = IA64_NAT_CONSUMPTION_VECTOR; break;
   4.825 +	    case 27:
   4.826 +//printf("*** Handled speculation vector, itc=%lx!\n",ia64_get_itc());
   4.827 +		itir = iim;
   4.828 +		vector = IA64_SPECULATION_VECTOR; break;
   4.829 +	    case 30:
   4.830 +		// FIXME: Should we handle unaligned refs in Xen??
   4.831 +		vector = IA64_UNALIGNED_REF_VECTOR; break;
   4.832 +	    default:
   4.833 +		printf("ia64_handle_reflection: unhandled vector=0x%lx\n",vector);
   4.834 +		while(vector);
   4.835 +		return;
   4.836 +	}
   4.837 +	if (check_lazy_cover && handle_lazy_cover(d, isr, regs)) return;
   4.838 +	reflect_interruption(ifa,isr,itir,regs,vector);
   4.839 +}
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/ia64/regionreg.c	Mon Nov 22 23:08:21 2004 +0000
     5.3 @@ -0,0 +1,399 @@
     5.4 +/*
     5.5 + * Region register and region id management
     5.6 + *
     5.7 + * Copyright (C) 2001-2004 Hewlett-Packard Co.
     5.8 + *	Dan Magenheimer (dan.magenheimer@hp.com
     5.9 + *	Bret Mckee (bret.mckee@hp.com)
    5.10 + *
    5.11 + */
    5.12 +
    5.13 +
    5.14 +#include <linux/config.h>
    5.15 +#include <linux/types.h>
    5.16 +#include <linux/sched.h>
    5.17 +#include <asm/page.h>
    5.18 +#include <asm/regionreg.h>
    5.19 +#include <asm/vhpt.h>
    5.20 +
    5.21 +
    5.22 +#define	IA64_MIN_IMPL_RID_BITS	(IA64_MIN_IMPL_RID_MSB+1)
    5.23 +#define	IA64_MAX_IMPL_RID_BITS	24
    5.24 +
    5.25 +#define MIN_RIDS	(1 << IA64_MIN_IMPL_RID_BITS)
    5.26 +#define	MIN_RID_MAX	(MIN_RIDS - 1)
    5.27 +#define	MIN_RID_MASK	(MIN_RIDS - 1)
    5.28 +#define	MAX_RIDS	(1 << (IA64_MAX_IMPL_RID_BITS))
    5.29 +#define	MAX_RID		(MAX_RIDS - 1)
    5.30 +#define	MAX_RID_BLOCKS	(1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
    5.31 +#define RIDS_PER_RIDBLOCK MIN_RIDS
    5.32 +
    5.33 +// This is the one global memory representation of the default Xen region reg
    5.34 +ia64_rr xen_rr;
    5.35 +
    5.36 +#if 0
    5.37 +// following already defined in include/asm-ia64/gcc_intrin.h
    5.38 +// it should probably be ifdef'd out from there to ensure all region
    5.39 +// register usage is encapsulated in this file
    5.40 +static inline unsigned long
    5.41 +ia64_get_rr (unsigned long rr)
    5.42 +{
    5.43 +	    unsigned long r;
    5.44 +	    __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
    5.45 +	    return r;
    5.46 +}
    5.47 +
    5.48 +static inline void
    5.49 +ia64_set_rr (unsigned long rr, unsigned long rrv)
    5.50 +{
    5.51 +	    __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
    5.52 +}
    5.53 +#endif
    5.54 +
    5.55 +// use this to allocate a rid out of the "Xen reserved rid block"
    5.56 +unsigned long allocate_reserved_rid(void)
    5.57 +{
    5.58 +	static unsigned long currentrid = XEN_DEFAULT_RID;
    5.59 +	unsigned long t = currentrid;
    5.60 +
    5.61 +	unsigned long max = RIDS_PER_RIDBLOCK;
    5.62 +
    5.63 +	if (++currentrid >= max) return(-1UL);
    5.64 +	return t;
    5.65 +}
    5.66 +
    5.67 +
    5.68 +// returns -1 if none available
    5.69 +unsigned long allocate_metaphysical_rid(void)
    5.70 +{
    5.71 +	unsigned long rid = allocate_reserved_rid();
    5.72 +}
    5.73 +
    5.74 +int deallocate_metaphysical_rid(unsigned long rid)
    5.75 +{
    5.76 +	// fix this when the increment allocation mechanism is fixed.
    5.77 +	return 1;
    5.78 +}
    5.79 +
    5.80 +
    5.81 +void init_rr(void)
    5.82 +{
    5.83 +	xen_rr.rrval = 0;
    5.84 +	xen_rr.ve = 0;
    5.85 +	xen_rr.rid = allocate_reserved_rid();
    5.86 +	xen_rr.ps = PAGE_SHIFT;
    5.87 +
    5.88 +	printf("initialized xen_rr.rid=0x%lx\n", xen_rr.rid);
    5.89 +}
    5.90 +
    5.91 +/*************************************
    5.92 +  Region Block setup/management
    5.93 +*************************************/
    5.94 +
    5.95 +static int implemented_rid_bits = 0;
    5.96 +static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
    5.97 +
    5.98 +void get_impl_rid_bits(void)
    5.99 +{
   5.100 +	// FIXME (call PAL)
   5.101 +//#ifdef CONFIG_MCKINLEY
   5.102 +	implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
   5.103 +//#else
   5.104 +//#error "rid ranges won't work on Merced"
   5.105 +//#endif
   5.106 +	if (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS ||
   5.107 +	    implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
   5.108 +		BUG();
   5.109 +}
   5.110 +
   5.111 +
   5.112 +/*
   5.113 + * Allocate a power-of-two-sized chunk of region id space -- one or more
   5.114 + *  "rid blocks"
   5.115 + */
   5.116 +int allocate_rid_range(struct domain *d, unsigned long ridbits)
   5.117 +{
   5.118 +	int i, j, n_rid_blocks;
   5.119 +
   5.120 +	if (implemented_rid_bits == 0) get_impl_rid_bits();
   5.121 +	
   5.122 +	if (ridbits >= IA64_MAX_IMPL_RID_BITS)
   5.123 +	ridbits = IA64_MAX_IMPL_RID_BITS - 1;
   5.124 +	
   5.125 +	if (ridbits < IA64_MIN_IMPL_RID_BITS)
   5.126 +	ridbits = IA64_MIN_IMPL_RID_BITS;
   5.127 +
   5.128 +	// convert to rid_blocks and find one
   5.129 +	n_rid_blocks = ridbits - IA64_MIN_IMPL_RID_BITS + 1;
   5.130 +	
   5.131 +	// skip over block 0, reserved for "meta-physical mappings (and Xen)"
   5.132 +	for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
   5.133 +		if (ridblock_owner[i] == NULL) {
   5.134 +			for (j = i; j < i + n_rid_blocks; ++j) {
   5.135 +				if (ridblock_owner[j]) break;
   5.136 +			}
   5.137 +			if (ridblock_owner[j] == NULL) break;
   5.138 +		}
   5.139 +	}
   5.140 +	
   5.141 +	if (i >= MAX_RID_BLOCKS) return 0;
   5.142 +	
   5.143 +	// found an unused block:
   5.144 +	//   (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
   5.145 +	// mark this block as owned
   5.146 +	for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d;
   5.147 +	
   5.148 +	// setup domain struct
   5.149 +	d->rid_bits = ridbits;
   5.150 +	d->starting_rid = i << IA64_MIN_IMPL_RID_BITS;
   5.151 +	d->ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
   5.152 +	
   5.153 +	return 1;
   5.154 +}
   5.155 +
   5.156 +
   5.157 +int deallocate_rid_range(struct domain *d)
   5.158 +{
   5.159 +	int i;
   5.160 +	int rid_block_end = d->ending_rid >> IA64_MIN_IMPL_RID_BITS;
   5.161 +	int rid_block_start = d->starting_rid >> IA64_MIN_IMPL_RID_BITS;
   5.162 +
   5.163 +	return 1;  // KLUDGE ALERT
   5.164 +	//
   5.165 +	// not all domains will have allocated RIDs (physical mode loaders for instance)
   5.166 +	//
   5.167 +	if (d->rid_bits == 0) return 1;
   5.168 +
   5.169 +#ifdef DEBUG
   5.170 +	for (i = rid_block_start; i < rid_block_end; ++i) {
   5.171 +	        ASSERT(ridblock_owner[i] == d);
   5.172 +	    }
   5.173 +#endif
   5.174 +	
   5.175 +	for (i = rid_block_start; i < rid_block_end; ++i)
   5.176 +	ridblock_owner[i] = NULL;
   5.177 +	
   5.178 +	d->rid_bits = 0;
   5.179 +	d->starting_rid = 0;
   5.180 +	d->ending_rid = 0;
   5.181 +	return 1;
   5.182 +}
   5.183 +
   5.184 +
   5.185 +// This function is purely for performance... apparently scrambling
   5.186 +//  bits in the region id makes for better hashing, which means better
   5.187 +//  use of the VHPT, which means better performance
   5.188 +// Note that the only time a RID should be mangled is when it is stored in
   5.189 +//  a region register; anytime it is "viewable" outside of this module,
   5.190 +//  it should be unmangled
   5.191 +
   5.192 +//This appears to work in Xen... turn it on later so no complications yet
   5.193 +//#define CONFIG_MANGLE_RIDS
   5.194 +#ifdef CONFIG_MANGLE_RIDS
   5.195 +static inline unsigned long
   5.196 +vmMangleRID(unsigned long RIDVal)
   5.197 +{
   5.198 +	union bits64 { unsigned char bytes[4]; unsigned long uint; };
   5.199 +
   5.200 +	union bits64 t;
   5.201 +	unsigned char tmp;
   5.202 +
   5.203 +	t.uint = RIDVal;
   5.204 +	tmp = t.bytes[1];
   5.205 +	t.bytes[1] = t.bytes[3];
   5.206 +	t.bytes[3] = tmp;
   5.207 +
   5.208 +	return t.uint;
   5.209 +}
   5.210 +
   5.211 +// since vmMangleRID is symmetric, use it for unmangling also
   5.212 +#define vmUnmangleRID(x)	vmMangleRID(x)
   5.213 +#else
   5.214 +// no mangling/unmangling
   5.215 +#define vmMangleRID(x)	(x)
   5.216 +#define vmUnmangleRID(x) (x)
   5.217 +#endif
   5.218 +
   5.219 +static inline void
   5.220 +set_rr_no_srlz(unsigned long rr, unsigned long rrval)
   5.221 +{
   5.222 +	ia64_set_rr(rr, vmMangleRID(rrval));
   5.223 +}
   5.224 +
   5.225 +void
   5.226 +set_rr(unsigned long rr, unsigned long rrval)
   5.227 +{
   5.228 +	ia64_set_rr(rr, vmMangleRID(rrval));
   5.229 +	ia64_srlz_d();
   5.230 +}
   5.231 +
   5.232 +unsigned long
   5.233 +get_rr(unsigned long rr)
   5.234 +{
   5.235 +	return vmUnmangleRID(ia64_get_rr(rr));
   5.236 +}
   5.237 +
   5.238 +static inline int validate_page_size(unsigned long ps)
   5.239 +{
   5.240 +	switch(ps) {
   5.241 +	    case 12: case 13: case 14: case 16: case 18:
   5.242 +	    case 20: case 22: case 24: case 26: case 28:
   5.243 +		return 1;
   5.244 +	    default:
   5.245 +		return 0;
   5.246 +	}
   5.247 +}
   5.248 +
   5.249 +// validates and changes a single region register
   5.250 +// in the currently executing domain
   5.251 +// Passing a value of -1 is a (successful) no-op
   5.252 +// NOTE: DOES NOT SET VCPU's rrs[x] value!!
   5.253 +int set_one_rr(unsigned long rr, unsigned long val)
   5.254 +{
   5.255 +	struct domain *d = current;
   5.256 +	unsigned long rreg = REGION_NUMBER(rr);
   5.257 +	ia64_rr rrv, newrrv, memrrv;
   5.258 +	unsigned long newrid;
   5.259 +
   5.260 +	if (val == -1) return 1;
   5.261 +
   5.262 +	rrv.rrval = val;
   5.263 +	newrrv.rrval = 0;
   5.264 +	newrid = d->starting_rid + rrv.rid;
   5.265 +
   5.266 +	if (newrid > d->ending_rid) return 0;
   5.267 +
   5.268 +	memrrv.rrval = rrv.rrval;
   5.269 +	if (rreg == 7) {
   5.270 +		newrrv.rid = newrid;
   5.271 +		newrrv.ve = VHPT_ENABLED_REGION_7;
   5.272 +		newrrv.ps = IA64_GRANULE_SHIFT;
   5.273 +		ia64_new_rr7(vmMangleRID(newrrv.rrval));
   5.274 +	}
   5.275 +	else {
   5.276 +		newrrv.rid = newrid;
   5.277 +		// FIXME? region 6 needs to be uncached for EFI to work
   5.278 +		if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
   5.279 +		else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
   5.280 +		newrrv.ps = PAGE_SHIFT;
   5.281 +		set_rr(rr,newrrv.rrval);
   5.282 +	}
   5.283 +	return 1;
   5.284 +}
   5.285 +
   5.286 +// set rr0 to the passed rid (for metaphysical mode so don't use domain offset
   5.287 +int set_metaphysical_rr(unsigned long rr, unsigned long rid)
   5.288 +{
   5.289 +	ia64_rr rrv;
   5.290 +	
   5.291 +	rrv.rrval = 0;
   5.292 +	rrv.rid = rid;
   5.293 +	rrv.ps = PAGE_SHIFT;
   5.294 +//	rrv.ve = 1; 	FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
   5.295 +	rrv.ve = 0;
   5.296 +	set_rr(rr,rrv.rrval);
   5.297 +}
   5.298 +
   5.299 +// validates/changes region registers 0-6 in the currently executing domain
   5.300 +// Note that this is the one and only SP API (other than executing a privop)
   5.301 +// for a domain to use to change region registers
   5.302 +int set_all_rr( u64 rr0, u64 rr1, u64 rr2, u64 rr3,
   5.303 +		     u64 rr4, u64 rr5, u64 rr6, u64 rr7)
   5.304 +{
   5.305 +	if (!set_one_rr(0x0000000000000000L, rr0)) return 0;
   5.306 +	if (!set_one_rr(0x2000000000000000L, rr1)) return 0;
   5.307 +	if (!set_one_rr(0x4000000000000000L, rr2)) return 0;
   5.308 +	if (!set_one_rr(0x6000000000000000L, rr3)) return 0;
   5.309 +	if (!set_one_rr(0x8000000000000000L, rr4)) return 0;
   5.310 +	if (!set_one_rr(0xa000000000000000L, rr5)) return 0;
   5.311 +	if (!set_one_rr(0xc000000000000000L, rr6)) return 0;
   5.312 +	if (!set_one_rr(0xe000000000000000L, rr7)) return 0;
   5.313 +	return 1;
   5.314 +}
   5.315 +
   5.316 +void init_all_rr(struct domain *d)
   5.317 +{
   5.318 +	ia64_rr rrv;
   5.319 +
   5.320 +	rrv.rrval = 0;
   5.321 +	rrv.rid = d->metaphysical_rid;
   5.322 +	rrv.ps = PAGE_SHIFT;
   5.323 +	rrv.ve = 1;
   5.324 +	d->shared_info->arch.rrs[0] = -1;
   5.325 +	d->shared_info->arch.rrs[1] = rrv.rrval;
   5.326 +	d->shared_info->arch.rrs[2] = rrv.rrval;
   5.327 +	d->shared_info->arch.rrs[3] = rrv.rrval;
   5.328 +	d->shared_info->arch.rrs[4] = rrv.rrval;
   5.329 +	d->shared_info->arch.rrs[5] = rrv.rrval;
   5.330 +	d->shared_info->arch.rrs[6] = rrv.rrval;
   5.331 +//	d->shared_info->arch.rrs[7] = rrv.rrval;
   5.332 +}
   5.333 +
   5.334 +
   5.335 +/* XEN/ia64 INTERNAL ROUTINES */
   5.336 +
   5.337 +unsigned long physicalize_rid(struct domain *d, unsigned long rid)
   5.338 +{
   5.339 +	ia64_rr rrv;
   5.340 +	    
   5.341 +	rrv.rrval = rid;
   5.342 +	rrv.rid += d->starting_rid;
   5.343 +	return rrv.rrval;
   5.344 +}
   5.345 +
   5.346 +unsigned long
   5.347 +virtualize_rid(struct domain *d, unsigned long rid)
   5.348 +{
   5.349 +	ia64_rr rrv;
   5.350 +	    
   5.351 +	rrv.rrval = rid;
   5.352 +	rrv.rid -= d->starting_rid;
   5.353 +	return rrv.rrval;
   5.354 +}
   5.355 +
   5.356 +// loads a thread's region register (0-6) state into
   5.357 +// the real physical region registers.  Returns the
   5.358 +// (possibly mangled) bits to store into rr7
   5.359 +// iff it is different than what is currently in physical
   5.360 +// rr7 (because we have to to assembly and physical mode
   5.361 +// to change rr7).  If no change to rr7 is required, returns 0.
   5.362 +//
   5.363 +unsigned long load_region_regs(struct domain *d)
   5.364 +{
   5.365 +	unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6;
   5.366 +	unsigned long oldrr7, newrr7;
   5.367 +	// TODO: These probably should be validated
   5.368 +
   5.369 +	if (d->metaphysical_mode) {
   5.370 +		ia64_rr rrv;
   5.371 +
   5.372 +		rrv.rid = d->metaphysical_rid;
   5.373 +		rrv.ps = PAGE_SHIFT;
   5.374 +		rrv.ve = 1;
   5.375 +		rr0 = rr1 = rr2 = rr3 = rr4 = rr5 = rr6 = newrr7 = rrv.rrval;
   5.376 +	}
   5.377 +	else {
   5.378 +		rr0 = physicalize_rid(d, d->shared_info->arch.rrs[0]);
   5.379 +		rr1 = physicalize_rid(d, d->shared_info->arch.rrs[1]);
   5.380 +		rr2 = physicalize_rid(d, d->shared_info->arch.rrs[2]);
   5.381 +		rr3 = physicalize_rid(d, d->shared_info->arch.rrs[3]);
   5.382 +		rr4 = physicalize_rid(d, d->shared_info->arch.rrs[4]);
   5.383 +		rr5 = physicalize_rid(d, d->shared_info->arch.rrs[5]);
   5.384 +		rr6 = physicalize_rid(d, d->shared_info->arch.rrs[6]);
   5.385 +		newrr7 = physicalize_rid(d, d->shared_info->arch.rrs[7]);
   5.386 +	}
   5.387 +
   5.388 +	set_rr_no_srlz(0x0000000000000000L, rr0);
   5.389 +	set_rr_no_srlz(0x2000000000000000L, rr1);
   5.390 +	set_rr_no_srlz(0x4000000000000000L, rr2);
   5.391 +	set_rr_no_srlz(0x6000000000000000L, rr3);
   5.392 +	set_rr_no_srlz(0x8000000000000000L, rr4);
   5.393 +	set_rr_no_srlz(0xa000000000000000L, rr5);
   5.394 +	set_rr_no_srlz(0xc000000000000000L, rr6);
   5.395 +	ia64_srlz_d();
   5.396 +	oldrr7 = get_rr(0xe000000000000000L);
   5.397 +	if (oldrr7 != newrr7) {
   5.398 +		newrr7 = (newrr7 & ~0xff) | (PAGE_SHIFT << 2) | 1;
   5.399 +		return vmMangleRID(newrr7);
   5.400 +	}
   5.401 +	else return 0;
   5.402 +}
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/ia64/vcpu.c	Mon Nov 22 23:08:21 2004 +0000
     6.3 @@ -0,0 +1,1559 @@
     6.4 +/*
     6.5 + * Virtualized CPU functions
     6.6 + * 
     6.7 + * Copyright (C) 2004 Hewlett-Packard Co.
     6.8 + *	Dan Magenheimer (dan.magenheimer@hp.com)
     6.9 + *
    6.10 + */
    6.11 +
    6.12 +#include <linux/sched.h>
    6.13 +#include <asm/ia64_int.h>
    6.14 +#include <asm/vcpu.h>
    6.15 +#include <asm/regionreg.h>
    6.16 +#include <asm/tlb.h>
    6.17 +#include <asm/processor.h>
    6.18 +#include <asm/delay.h>
    6.19 +
    6.20 +typedef	union {
    6.21 +	struct ia64_psr;
    6.22 +	unsigned long i64;
    6.23 +} PSR;
    6.24 +
    6.25 +//typedef	struct pt_regs	REGS;
    6.26 +//typedef struct domain VCPU;
    6.27 +
    6.28 +// this def for vcpu_regs won't work if kernel stack is present
    6.29 +#define	vcpu_regs(vcpu) ((struct pt_regs *) vcpu->regs)
    6.30 +#define	PSCB(x)	x->shared_info->arch
    6.31 +
    6.32 +#define	TRUE	1
    6.33 +#define	FALSE	0
    6.34 +#define	IA64_PTA_SZ_BIT		2
    6.35 +#define	IA64_PTA_VF_BIT		8
    6.36 +#define	IA64_PTA_BASE_BIT	15
    6.37 +#define	IA64_PTA_LFMT		(1UL << IA64_PTA_VF_BIT)
    6.38 +#define	IA64_PTA_SZ(x)	(x##UL << IA64_PTA_SZ_BIT)
    6.39 +
    6.40 +#define STATIC
    6.41 +
    6.42 +unsigned long vcpu_verbose = 0;
    6.43 +#define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
    6.44 +
    6.45 +/**************************************************************************
    6.46 + VCPU general register access routines
    6.47 +**************************************************************************/
    6.48 +
    6.49 +UINT64
    6.50 +vcpu_get_gr(VCPU *vcpu, unsigned reg)
    6.51 +{
    6.52 +	REGS *regs = vcpu_regs(vcpu);
    6.53 +	UINT64 val;
    6.54 +
    6.55 +	if (!reg) return 0;
    6.56 +	getreg(reg,&val,0,regs);	// FIXME: handle NATs later
    6.57 +	return val;
    6.58 +}
    6.59 +
    6.60 +// returns:
    6.61 +//   IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
    6.62 +//   IA64_NO_FAULT otherwise
    6.63 +IA64FAULT
    6.64 +vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
    6.65 +{
    6.66 +	REGS *regs = vcpu_regs(vcpu);
    6.67 +	long sof = (regs->cr_ifs) & 0x7f;
    6.68 +
    6.69 +	if (!reg) return IA64_ILLOP_FAULT;
    6.70 +	if (reg >= sof + 32) return IA64_ILLOP_FAULT;
    6.71 +	setreg(reg,value,0,regs);	// FIXME: handle NATs later
    6.72 +	return IA64_NO_FAULT;
    6.73 +}
    6.74 +
    6.75 +/**************************************************************************
    6.76 + VCPU privileged application register access routines
    6.77 +**************************************************************************/
    6.78 +
    6.79 +IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val)
    6.80 +{
    6.81 +	if (reg == 44) return (vcpu_set_itc(vcpu,val));
    6.82 +	if (reg == 27) return (IA64_ILLOP_FAULT);
    6.83 +	if (reg > 7) return (IA64_ILLOP_FAULT);
    6.84 +	PSCB(vcpu).krs[reg] = val;
    6.85 +#if 0
    6.86 +// for now, privify kr read's so all kr accesses are privileged
    6.87 +	switch (reg) {
    6.88 +	      case 0: asm volatile ("mov ar.k0=%0" :: "r"(val)); break;
    6.89 +	      case 1: asm volatile ("mov ar.k1=%0" :: "r"(val)); break;
    6.90 +	      case 2: asm volatile ("mov ar.k2=%0" :: "r"(val)); break;
    6.91 +	      case 3: asm volatile ("mov ar.k3=%0" :: "r"(val)); break;
    6.92 +	      case 4: asm volatile ("mov ar.k4=%0" :: "r"(val)); break;
    6.93 +	      case 5: asm volatile ("mov ar.k5=%0" :: "r"(val)); break;
    6.94 +	      case 6: asm volatile ("mov ar.k6=%0" :: "r"(val)); break;
    6.95 +	      case 7: asm volatile ("mov ar.k7=%0" :: "r"(val)); break;
    6.96 +	      case 27: asm volatile ("mov ar.cflg=%0" :: "r"(val)); break;
    6.97 +	}
    6.98 +#endif
    6.99 +	return IA64_NO_FAULT;
   6.100 +}
   6.101 +
   6.102 +IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val)
   6.103 +{
   6.104 +	if (reg > 7) return (IA64_ILLOP_FAULT);
   6.105 +	*val = PSCB(vcpu).krs[reg];
   6.106 +	return IA64_NO_FAULT;
   6.107 +}
   6.108 +
   6.109 +/**************************************************************************
   6.110 + VCPU processor status register access routines
   6.111 +**************************************************************************/
   6.112 +
   6.113 +void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
   6.114 +{
   6.115 +	/* only do something if mode changes */
   6.116 +	if (!!newmode ^ !!vcpu->metaphysical_mode) {
   6.117 +		if (newmode) set_metaphysical_rr(0,vcpu->metaphysical_rid);
   6.118 +		else if (PSCB(vcpu).rrs[0] != -1)
   6.119 +			set_one_rr(0, PSCB(vcpu).rrs[0]);
   6.120 +		vcpu->metaphysical_mode = newmode;
   6.121 +	}
   6.122 +}
   6.123 +
   6.124 +IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
   6.125 +{
   6.126 +	struct ia64_psr psr, imm, *ipsr;
   6.127 +	REGS *regs = vcpu_regs(vcpu);
   6.128 +
   6.129 +	// TODO: All of these bits need to be virtualized
   6.130 +	// TODO: Only allowed for current vcpu
   6.131 +	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   6.132 +	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   6.133 +	imm = *(struct ia64_psr *)&imm24;
   6.134 +	// interrupt flag
   6.135 +	if (imm.i) PSCB(vcpu).interrupt_delivery_enabled = 0;
   6.136 +	if (imm.ic)  PSCB(vcpu).interrupt_collection_enabled = 0;
   6.137 +	// interrupt collection flag
   6.138 +	//if (imm.ic) PSCB(vcpu).interrupt_delivery_enabled = 0;
   6.139 +	// just handle psr.up and psr.pp for now
   6.140 +	if (imm24 & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP
   6.141 +		| IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT
   6.142 +		| IA64_PSR_DFL | IA64_PSR_DFH))
   6.143 +			return (IA64_ILLOP_FAULT);
   6.144 +	if (imm.dfh) ipsr->dfh = 0;
   6.145 +	if (imm.dfl) ipsr->dfl = 0;
   6.146 +	if (imm.pp) { ipsr->pp = 0; psr.pp = 0; }
   6.147 +	if (imm.up) { ipsr->up = 0; psr.up = 0; }
   6.148 +	if (imm.sp) { ipsr->sp = 0; psr.sp = 0; }
   6.149 +	if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE);
   6.150 +	__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   6.151 +	return IA64_NO_FAULT;
   6.152 +}
   6.153 +
   6.154 +extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu);
   6.155 +#define SPURIOUS_VECTOR 0xf
   6.156 +
   6.157 +IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
   6.158 +{
   6.159 +	struct ia64_psr psr, imm, *ipsr;
   6.160 +	REGS *regs = vcpu_regs(vcpu);
   6.161 +	UINT64 mask, enabling_interrupts = 0;
   6.162 +
   6.163 +	// TODO: All of these bits need to be virtualized
   6.164 +	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   6.165 +	imm = *(struct ia64_psr *)&imm24;
   6.166 +	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   6.167 +	// just handle psr.sp,pp and psr.i,ic (and user mask) for now
   6.168 +	mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM |
   6.169 +		IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH;
   6.170 +	if (imm24 & ~mask) return (IA64_ILLOP_FAULT);
   6.171 +	if (imm.dfh) ipsr->dfh = 1;
   6.172 +	if (imm.dfl) ipsr->dfl = 1;
   6.173 +	if (imm.pp) { ipsr->pp = 1; psr.pp = 1; }
   6.174 +	if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
   6.175 +	if (imm.i) {
   6.176 +		if (!PSCB(vcpu).interrupt_delivery_enabled) {
   6.177 +//printf("vcpu_set_psr_sm: psr.ic 0->1 ");
   6.178 +			enabling_interrupts = 1;
   6.179 +		}
   6.180 +		PSCB(vcpu).interrupt_delivery_enabled = 1;
   6.181 +	}
   6.182 +	if (imm.ic)  PSCB(vcpu).interrupt_collection_enabled = 1;
   6.183 +	// TODO: do this faster
   6.184 +	if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
   6.185 +	if (imm.ac) { ipsr->ac = 1; psr.ac = 1; }
   6.186 +	if (imm.up) { ipsr->up = 1; psr.up = 1; }
   6.187 +	if (imm.be) {
   6.188 +		printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
   6.189 +		return (IA64_ILLOP_FAULT);
   6.190 +	}
   6.191 +	if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE);
   6.192 +	__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   6.193 +#if 0 // now done with deliver_pending_interrupts
   6.194 +	if (enabling_interrupts) {
   6.195 +		if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) {
   6.196 +//printf("with interrupts pending\n");
   6.197 +			return IA64_EXTINT_VECTOR;
   6.198 +		}
   6.199 +//else printf("but nothing pending\n");
   6.200 +	}
   6.201 +#endif
   6.202 +	return IA64_NO_FAULT;
   6.203 +}
   6.204 +
   6.205 +IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
   6.206 +{
   6.207 +	struct ia64_psr psr, newpsr, *ipsr;
   6.208 +	REGS *regs = vcpu_regs(vcpu);
   6.209 +	UINT64 enabling_interrupts = 0;
   6.210 +
   6.211 +	// TODO: All of these bits need to be virtualized
   6.212 +	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   6.213 +	newpsr = *(struct ia64_psr *)&val;
   6.214 +	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   6.215 +	// just handle psr.up and psr.pp for now
   6.216 +	//if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT);
   6.217 +	// however trying to set other bits can't be an error as it is in ssm
   6.218 +	if (newpsr.dfh) ipsr->dfh = 1;
   6.219 +	if (newpsr.dfl) ipsr->dfl = 1;
   6.220 +	if (newpsr.pp) { ipsr->pp = 1; psr.pp = 1; }
   6.221 +	if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
   6.222 +	if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
   6.223 +	if (newpsr.i) {
   6.224 +		if (!PSCB(vcpu).interrupt_delivery_enabled)
   6.225 +			enabling_interrupts = 1;
   6.226 +		PSCB(vcpu).interrupt_delivery_enabled = 1;
   6.227 +	}
   6.228 +	if (newpsr.ic)  PSCB(vcpu).interrupt_collection_enabled = 1;
   6.229 +	if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
   6.230 +	if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; }
   6.231 +	if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
   6.232 +	if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE);
   6.233 +	else vcpu_set_metaphysical_mode(vcpu,TRUE);
   6.234 +	if (newpsr.be) {
   6.235 +		printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
   6.236 +		return (IA64_ILLOP_FAULT);
   6.237 +	}
   6.238 +	//__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   6.239 +#if 0 // now done with deliver_pending_interrupts
   6.240 +	if (enabling_interrupts) {
   6.241 +		if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
   6.242 +			return IA64_EXTINT_VECTOR;
   6.243 +	}
   6.244 +#endif
   6.245 +	return IA64_NO_FAULT;
   6.246 +}
   6.247 +
   6.248 +IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
   6.249 +{
   6.250 +	UINT64 psr;
   6.251 +	struct ia64_psr newpsr;
   6.252 +
   6.253 +	// TODO: This needs to return a "filtered" view of
   6.254 +	// the psr, not the actual psr.  Probably the psr needs
   6.255 +	// to be a field in regs (in addition to ipsr).
   6.256 +	__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
   6.257 +	newpsr = *(struct ia64_psr *)&psr;
   6.258 +	if (newpsr.cpl == 2) newpsr.cpl = 0;
   6.259 +	if (PSCB(vcpu).interrupt_delivery_enabled) newpsr.i = 1;
   6.260 +	else newpsr.i = 0;
   6.261 +	if (PSCB(vcpu).interrupt_collection_enabled) newpsr.ic = 1;
   6.262 +	else newpsr.ic = 0;
   6.263 +	*pval = *(unsigned long *)&newpsr;
   6.264 +	return IA64_NO_FAULT;
   6.265 +}
   6.266 +
   6.267 +BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
   6.268 +{
   6.269 +	return !!PSCB(vcpu).interrupt_collection_enabled;
   6.270 +}
   6.271 +
   6.272 +BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
   6.273 +{
   6.274 +	return !!PSCB(vcpu).interrupt_delivery_enabled;
   6.275 +}
   6.276 +
   6.277 +UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
   6.278 +{
   6.279 +	UINT64 dcr = PSCB(vcpu).dcr;
   6.280 +	PSR psr = {0};
   6.281 +	
   6.282 +	//printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr);
   6.283 +	psr.i64 = prevpsr;
   6.284 +	psr.be = 0; if (dcr & IA64_DCR_BE) psr.be = 1;
   6.285 +	psr.pp = 0; if (dcr & IA64_DCR_PP) psr.pp = 1;
   6.286 +	psr.ic = PSCB(vcpu).interrupt_collection_enabled;
   6.287 +	psr.i = PSCB(vcpu).interrupt_delivery_enabled;
   6.288 +	psr.bn = PSCB(vcpu).banknum;
   6.289 +	psr.dt = 1; psr.it = 1; psr.rt = 1;
   6.290 +	if (psr.cpl == 2) psr.cpl = 0; // !!!! fool domain
   6.291 +	// psr.pk = 1;
   6.292 +	//printf("returns 0x%016lx...",psr.i64);
   6.293 +	return psr.i64;
   6.294 +}
   6.295 +
   6.296 +/**************************************************************************
   6.297 + VCPU control register access routines
   6.298 +**************************************************************************/
   6.299 +
   6.300 +IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
   6.301 +{
   6.302 +extern unsigned long privop_trace;
   6.303 +//privop_trace=0;
   6.304 +//verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu).iip);
   6.305 +	// Reads of cr.dcr on Xen always have the sign bit set, so
   6.306 +	// a domain can differentiate whether it is running on SP or not
   6.307 +	*pval = PSCB(vcpu).dcr | 0x8000000000000000L;
   6.308 +	return (IA64_NO_FAULT);
   6.309 +}
   6.310 +
   6.311 +IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
   6.312 +{
   6.313 +	*pval = PSCB(vcpu).iva & ~0x7fffL;
   6.314 +	return (IA64_NO_FAULT);
   6.315 +}
   6.316 +
   6.317 +IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
   6.318 +{
   6.319 +	*pval = PSCB(vcpu).pta;
   6.320 +	return (IA64_NO_FAULT);
   6.321 +}
   6.322 +
   6.323 +IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
   6.324 +{
   6.325 +	//REGS *regs = vcpu_regs(vcpu);
   6.326 +	//*pval = regs->cr_ipsr;
   6.327 +	*pval = PSCB(vcpu).ipsr;
   6.328 +	return (IA64_NO_FAULT);
   6.329 +}
   6.330 +
   6.331 +IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
   6.332 +{
   6.333 +	*pval = PSCB(vcpu).isr;
   6.334 +	return (IA64_NO_FAULT);
   6.335 +}
   6.336 +
   6.337 +IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
   6.338 +{
   6.339 +	//REGS *regs = vcpu_regs(vcpu);
   6.340 +	//*pval = regs->cr_iip;
   6.341 +	*pval = PSCB(vcpu).iip;
   6.342 +	return (IA64_NO_FAULT);
   6.343 +}
   6.344 +
   6.345 +IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
   6.346 +{
   6.347 +	UINT64 val = PSCB(vcpu).ifa;
   6.348 +	*pval = val;
   6.349 +	return (IA64_NO_FAULT);
   6.350 +}
   6.351 +
   6.352 +
   6.353 +unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa)
   6.354 +{
   6.355 +	ia64_rr rr;
   6.356 +
   6.357 +	rr.rrval = 0;
   6.358 +	rr.ps = vcpu_get_rr_ps(vcpu,ifa);
   6.359 +	rr.rid = vcpu_get_rr_rid(vcpu,ifa);
   6.360 +	return (rr.rrval);
   6.361 +}
   6.362 +
   6.363 +
   6.364 +IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
   6.365 +{
   6.366 +	UINT64 val = PSCB(vcpu).itir;
   6.367 +	*pval = val;
   6.368 +	return (IA64_NO_FAULT);
   6.369 +}
   6.370 +
   6.371 +IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
   6.372 +{
   6.373 +	UINT64 val = PSCB(vcpu).iipa;
   6.374 +	// SP entry code does not save iipa yet nor does it get
   6.375 +	//  properly delivered in the pscb
   6.376 +	printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n");
   6.377 +	*pval = val;
   6.378 +	return (IA64_NO_FAULT);
   6.379 +}
   6.380 +
   6.381 +IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
   6.382 +{
   6.383 +	//PSCB(vcpu).ifs = PSCB(vcpu)->regs.cr_ifs;
   6.384 +	//*pval = PSCB(vcpu).regs.cr_ifs;
   6.385 +	*pval = PSCB(vcpu).ifs;
   6.386 +	PSCB(vcpu).incomplete_regframe = 0;
   6.387 +	return (IA64_NO_FAULT);
   6.388 +}
   6.389 +
   6.390 +IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
   6.391 +{
   6.392 +	UINT64 val = PSCB(vcpu).iim;
   6.393 +	*pval = val;
   6.394 +	return (IA64_NO_FAULT);
   6.395 +}
   6.396 +
   6.397 +IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
   6.398 +{
   6.399 +	return vcpu_thash(vcpu,PSCB(vcpu).ifa,pval);
   6.400 +}
   6.401 +
   6.402 +IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val)
   6.403 +{
   6.404 +extern unsigned long privop_trace;
   6.405 +//privop_trace=1;
   6.406 +	// Reads of cr.dcr on SP always have the sign bit set, so
   6.407 +	// a domain can differentiate whether it is running on SP or not
   6.408 +	// Thus, writes of DCR should ignore the sign bit
   6.409 +//verbose("vcpu_set_dcr: called\n");
   6.410 +	PSCB(vcpu).dcr = val & ~0x8000000000000000L;
   6.411 +	return (IA64_NO_FAULT);
   6.412 +}
   6.413 +
   6.414 +IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val)
   6.415 +{
   6.416 +	PSCB(vcpu).iva = val & ~0x7fffL;
   6.417 +	return (IA64_NO_FAULT);
   6.418 +}
   6.419 +
   6.420 +IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val)
   6.421 +{
   6.422 +	if (val & IA64_PTA_LFMT) {
   6.423 +		printf("*** No support for VHPT long format yet!!\n");
   6.424 +		return (IA64_ILLOP_FAULT);
   6.425 +	}
   6.426 +	if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT;
   6.427 +	if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT;
   6.428 +	PSCB(vcpu).pta = val;
   6.429 +	return IA64_NO_FAULT;
   6.430 +}
   6.431 +
   6.432 +IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val)
   6.433 +{
   6.434 +	PSCB(vcpu).ipsr = val;
   6.435 +	return IA64_NO_FAULT;
   6.436 +}
   6.437 +
   6.438 +IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val)
   6.439 +{
   6.440 +	PSCB(vcpu).isr = val;
   6.441 +	return IA64_NO_FAULT;
   6.442 +}
   6.443 +
   6.444 +IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val)
   6.445 +{
   6.446 +	PSCB(vcpu).iip = val;
   6.447 +	return IA64_NO_FAULT;
   6.448 +}
   6.449 +
   6.450 +IA64FAULT vcpu_increment_iip(VCPU *vcpu)
   6.451 +{
   6.452 +	REGS *regs = vcpu_regs(vcpu);
   6.453 +	struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
   6.454 +	if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; }
   6.455 +	else ipsr->ri++;
   6.456 +	return (IA64_NO_FAULT);
   6.457 +}
   6.458 +
   6.459 +IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val)
   6.460 +{
   6.461 +	PSCB(vcpu).ifa = val;
   6.462 +	return IA64_NO_FAULT;
   6.463 +}
   6.464 +
   6.465 +IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val)
   6.466 +{
   6.467 +	PSCB(vcpu).itir = val;
   6.468 +	return IA64_NO_FAULT;
   6.469 +}
   6.470 +
   6.471 +IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val)
   6.472 +{
   6.473 +	// SP entry code does not save iipa yet nor does it get
   6.474 +	//  properly delivered in the pscb
   6.475 +	printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n");
   6.476 +	PSCB(vcpu).iipa = val;
   6.477 +	return IA64_NO_FAULT;
   6.478 +}
   6.479 +
   6.480 +IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val)
   6.481 +{
   6.482 +	//REGS *regs = vcpu_regs(vcpu);
   6.483 +	PSCB(vcpu).ifs = val;
   6.484 +	return IA64_NO_FAULT;
   6.485 +}
   6.486 +
   6.487 +IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val)
   6.488 +{
   6.489 +	PSCB(vcpu).iim = val;
   6.490 +	return IA64_NO_FAULT;
   6.491 +}
   6.492 +
   6.493 +IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val)
   6.494 +{
   6.495 +	PSCB(vcpu).iha = val;
   6.496 +	return IA64_NO_FAULT;
   6.497 +}
   6.498 +
   6.499 +/**************************************************************************
   6.500 + VCPU interrupt control register access routines
   6.501 +**************************************************************************/
   6.502 +
   6.503 +void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
   6.504 +{
   6.505 +	if (vector & ~0xff) {
   6.506 +		printf("vcpu_pend_interrupt: bad vector\n");
   6.507 +		return;
   6.508 +	}
   6.509 +	if (!test_bit(vector,PSCB(vcpu).delivery_mask)) return;
   6.510 +	if (test_bit(vector,PSCB(vcpu).irr)) {
   6.511 +//printf("vcpu_pend_interrupt: overrun\n");
   6.512 +	}
   6.513 +	set_bit(vector,PSCB(vcpu).irr);
   6.514 +}
   6.515 +
   6.516 +#define	IA64_TPR_MMI	0x10000
   6.517 +#define	IA64_TPR_MIC	0x000f0
   6.518 +
   6.519 +/* checks to see if a VCPU has any unmasked pending interrupts
   6.520 + * if so, returns the highest, else returns SPURIOUS_VECTOR */
   6.521 +/* NOTE: Since this gets called from vcpu_get_ivr() and the
   6.522 + * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit,
   6.523 + * this routine also ignores pscb.interrupt_delivery_enabled
   6.524 + * and this must be checked independently; see vcpu_deliverable interrupts() */
   6.525 +UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
   6.526 +{
   6.527 +	UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
   6.528 +
   6.529 +	p = &PSCB(vcpu).irr[3];
   6.530 +	q = &PSCB(vcpu).delivery_mask[3];
   6.531 +	r = &PSCB(vcpu).insvc[3];
   6.532 +	for (i = 3; ; p--, q--, r--, i--) {
   6.533 +		bits = *p & *q;
   6.534 +		if (bits) break; // got a potential interrupt
   6.535 +		if (*r) {
   6.536 +			// nothing in this word which is pending+inservice
   6.537 +			// but there is one inservice which masks lower
   6.538 +			return SPURIOUS_VECTOR;
   6.539 +		}
   6.540 +		if (i == 0) {
   6.541 +		// checked all bits... nothing pending+inservice
   6.542 +			return SPURIOUS_VECTOR;
   6.543 +		}
   6.544 +	}
   6.545 +	// have a pending,deliverable interrupt... see if it is masked
   6.546 +	bitnum = ia64_fls(bits);
   6.547 +//printf("XXXXXXX vcpu_check_pending_interrupts: got bitnum=%p...",bitnum);
   6.548 +	vector = bitnum+(i*64);
   6.549 +	mask = 1L << bitnum;
   6.550 +//printf("XXXXXXX vcpu_check_pending_interrupts: got vector=%p...",vector);
   6.551 +	if (*r >= mask) {
   6.552 +		// masked by equal inservice
   6.553 +//printf("but masked by equal inservice\n");
   6.554 +		return SPURIOUS_VECTOR;
   6.555 +	}
   6.556 +	if (PSCB(vcpu).tpr & IA64_TPR_MMI) {
   6.557 +		// tpr.mmi is set
   6.558 +//printf("but masked by tpr.mmi\n");
   6.559 +		return SPURIOUS_VECTOR;
   6.560 +	}
   6.561 +	if (((PSCB(vcpu).tpr & IA64_TPR_MIC) + 15) >= vector) {
   6.562 +		//tpr.mic masks class
   6.563 +//printf("but masked by tpr.mic\n");
   6.564 +		return SPURIOUS_VECTOR;
   6.565 +	}
   6.566 +
   6.567 +//printf("returned to caller\n");
   6.568 +	return vector;
   6.569 +}
   6.570 +
   6.571 +UINT64 vcpu_deliverable_interrupts(VCPU *vcpu)
   6.572 +{
   6.573 +	return (vcpu_get_psr_i(vcpu) &&
   6.574 +		vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR);
   6.575 +}
   6.576 +
   6.577 +IA64FAULT vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
   6.578 +{
   6.579 +extern unsigned long privop_trace;
   6.580 +//privop_trace=1;
   6.581 +	//TODO: Implement this
   6.582 +	printf("vcpu_get_lid: WARNING: Getting cr.lid always returns zero\n");
   6.583 +	*pval = 0;
   6.584 +	return IA64_NO_FAULT;
   6.585 +}
   6.586 +
   6.587 +IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
   6.588 +{
   6.589 +	int i;
   6.590 +	UINT64 vector, mask;
   6.591 +#if 1
   6.592 +	static char firstivr = 1;
   6.593 +	static char firsttime[256];
   6.594 +	if (firstivr) {
   6.595 +		int i;
   6.596 +		for (i=0;i<256;i++) firsttime[i]=1;
   6.597 +		firstivr=0;
   6.598 +	}
   6.599 +#endif
   6.600 +
   6.601 +	vector = vcpu_check_pending_interrupts(vcpu);
   6.602 +	if (vector == SPURIOUS_VECTOR) {
   6.603 +		PSCB(vcpu).pending_interruption = 0;
   6.604 +		*pval = vector;
   6.605 +		return IA64_NO_FAULT;
   6.606 +	}
   6.607 +	// now have an unmasked, pending, deliverable vector!
   6.608 +	// getting ivr has "side effects"
   6.609 +#if 0
   6.610 +	if (firsttime[vector]) {
   6.611 +		printf("*** First get_ivr on vector=%d,itc=%lx\n",
   6.612 +			vector,ia64_get_itc());
   6.613 +		firsttime[vector]=0;
   6.614 +	}
   6.615 +#endif
   6.616 +	i = vector >> 6;
   6.617 +	mask = 1L << (vector & 0x3f);
   6.618 +//printf("ZZZZZZ vcpu_get_ivr: setting insvc mask for vector %ld\n",vector);
   6.619 +	PSCB(vcpu).insvc[i] |= mask;
   6.620 +	PSCB(vcpu).irr[i] &= ~mask;
   6.621 +	PSCB(vcpu).pending_interruption--;
   6.622 +	*pval = vector;
   6.623 +	return IA64_NO_FAULT;
   6.624 +}
   6.625 +
   6.626 +IA64FAULT vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
   6.627 +{
   6.628 +	*pval = PSCB(vcpu).tpr;
   6.629 +	return (IA64_NO_FAULT);
   6.630 +}
   6.631 +
   6.632 +IA64FAULT vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
   6.633 +{
   6.634 +	*pval = 0L;  // reads of eoi always return 0
   6.635 +	return (IA64_NO_FAULT);
   6.636 +}
   6.637 +
   6.638 +IA64FAULT vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
   6.639 +{
   6.640 +#ifndef IRR_USE_FIXED
   6.641 +	printk("vcpu_get_irr: called, not implemented yet\n");
   6.642 +	return IA64_ILLOP_FAULT;
   6.643 +#else
   6.644 +	*pval = vcpu->irr[0];
   6.645 +	return (IA64_NO_FAULT);
   6.646 +#endif
   6.647 +}
   6.648 +
   6.649 +IA64FAULT vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
   6.650 +{
   6.651 +#ifndef IRR_USE_FIXED
   6.652 +	printk("vcpu_get_irr: called, not implemented yet\n");
   6.653 +	return IA64_ILLOP_FAULT;
   6.654 +#else
   6.655 +	*pval = vcpu->irr[1];
   6.656 +	return (IA64_NO_FAULT);
   6.657 +#endif
   6.658 +}
   6.659 +
   6.660 +IA64FAULT vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
   6.661 +{
   6.662 +#ifndef IRR_USE_FIXED
   6.663 +	printk("vcpu_get_irr: called, not implemented yet\n");
   6.664 +	return IA64_ILLOP_FAULT;
   6.665 +#else
   6.666 +	*pval = vcpu->irr[2];
   6.667 +	return (IA64_NO_FAULT);
   6.668 +#endif
   6.669 +}
   6.670 +
   6.671 +IA64FAULT vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
   6.672 +{
   6.673 +#ifndef IRR_USE_FIXED
   6.674 +	printk("vcpu_get_irr: called, not implemented yet\n");
   6.675 +	return IA64_ILLOP_FAULT;
   6.676 +#else
   6.677 +	*pval = vcpu->irr[3];
   6.678 +	return (IA64_NO_FAULT);
   6.679 +#endif
   6.680 +}
   6.681 +
   6.682 +IA64FAULT vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
   6.683 +{
   6.684 +	*pval = PSCB(vcpu).itv;
   6.685 +	return (IA64_NO_FAULT);
   6.686 +}
   6.687 +
   6.688 +IA64FAULT vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
   6.689 +{
   6.690 +	*pval = PSCB(vcpu).pmv;
   6.691 +	return (IA64_NO_FAULT);
   6.692 +}
   6.693 +
   6.694 +IA64FAULT vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
   6.695 +{
   6.696 +	*pval = PSCB(vcpu).cmcv;
   6.697 +	return (IA64_NO_FAULT);
   6.698 +}
   6.699 +
   6.700 +IA64FAULT vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
   6.701 +{
   6.702 +	// fix this when setting values other than m-bit is supported
   6.703 +	printf("vcpu_get_lrr0: Unmasked interrupts unsupported\n");
   6.704 +	*pval = (1L << 16);
   6.705 +	return (IA64_NO_FAULT);
   6.706 +}
   6.707 +
   6.708 +IA64FAULT vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
   6.709 +{
   6.710 +	// fix this when setting values other than m-bit is supported
   6.711 +	printf("vcpu_get_lrr1: Unmasked interrupts unsupported\n");
   6.712 +	*pval = (1L << 16);
   6.713 +	return (IA64_NO_FAULT);
   6.714 +}
   6.715 +
   6.716 +IA64FAULT vcpu_set_lid(VCPU *vcpu, UINT64 val)
   6.717 +{
   6.718 +	printf("vcpu_set_lid: Setting cr.lid is unsupported\n");
   6.719 +	return (IA64_ILLOP_FAULT);
   6.720 +}
   6.721 +
   6.722 +IA64FAULT vcpu_set_tpr(VCPU *vcpu, UINT64 val)
   6.723 +{
   6.724 +	if (val & 0xff00) return IA64_RSVDREG_FAULT;
   6.725 +	PSCB(vcpu).tpr = val;
   6.726 +	return (IA64_NO_FAULT);
   6.727 +}
   6.728 +
   6.729 +IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
   6.730 +{
   6.731 +	UINT64 *p, bits, vec, bitnum;
   6.732 +	int i;
   6.733 +
   6.734 +	p = &PSCB(vcpu).insvc[3];
   6.735 +	for (i = 3; (i >= 0) && !(bits = *p); i--, p--);
   6.736 +	if (i < 0) {
   6.737 +		printf("Trying to EOI interrupt when none are in-service.\r\n");
   6.738 +		return;
   6.739 +	}
   6.740 +	bitnum = ia64_fls(bits);
   6.741 +	vec = bitnum + (i*64);
   6.742 +	/* clear the correct bit */
   6.743 +	bits &= ~(1L << bitnum);
   6.744 +	*p = bits;
   6.745 +	/* clearing an eoi bit may unmask another pending interrupt... */
   6.746 +	if (PSCB(vcpu).interrupt_delivery_enabled) { // but only if enabled...
   6.747 +		// worry about this later... Linux only calls eoi
   6.748 +		// with interrupts disabled
   6.749 +		printf("Trying to EOI interrupt with interrupts enabled\r\n");
   6.750 +	}
   6.751 +//printf("YYYYY vcpu_set_eoi: Successful\n");
   6.752 +	return (IA64_NO_FAULT);
   6.753 +}
   6.754 +
   6.755 +IA64FAULT vcpu_set_lrr0(VCPU *vcpu, UINT64 val)
   6.756 +{
   6.757 +	if (!(val & (1L << 16))) {
   6.758 +		printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
   6.759 +		return (IA64_ILLOP_FAULT);
   6.760 +	}
   6.761 +	// no place to save this state but nothing to do anyway
   6.762 +	return (IA64_NO_FAULT);
   6.763 +}
   6.764 +
   6.765 +IA64FAULT vcpu_set_lrr1(VCPU *vcpu, UINT64 val)
   6.766 +{
   6.767 +	if (!(val & (1L << 16))) {
   6.768 +		printf("vcpu_set_lrr0: Unmasked interrupts unsupported\n");
   6.769 +		return (IA64_ILLOP_FAULT);
   6.770 +	}
   6.771 +	// no place to save this state but nothing to do anyway
   6.772 +	return (IA64_NO_FAULT);
   6.773 +}
   6.774 +
   6.775 +
   6.776 +IA64FAULT vcpu_set_itv(VCPU *vcpu, UINT64 val)
   6.777 +{
   6.778 +extern unsigned long privop_trace;
   6.779 +//privop_trace=1;
   6.780 +	if (val & 0xef00) return (IA64_ILLOP_FAULT);
   6.781 +	PSCB(vcpu).itv = val;
   6.782 +	if (val & 0x10000) {
   6.783 +printf("**** vcpu_set_itv(%d): vitm=%lx, setting to 0\n",val,PSCB(vcpu).domain_itm);
   6.784 +		PSCB(vcpu).domain_itm = 0;
   6.785 +	}
   6.786 +	else vcpu_enable_timer(vcpu,1000000L);
   6.787 +	return (IA64_NO_FAULT);
   6.788 +}
   6.789 +
   6.790 +IA64FAULT vcpu_set_pmv(VCPU *vcpu, UINT64 val)
   6.791 +{
   6.792 +	if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
   6.793 +	PSCB(vcpu).pmv = val;
   6.794 +	return (IA64_NO_FAULT);
   6.795 +}
   6.796 +
   6.797 +IA64FAULT vcpu_set_cmcv(VCPU *vcpu, UINT64 val)
   6.798 +{
   6.799 +	if (val & 0xef00) /* reserved fields */ return IA64_RSVDREG_FAULT;
   6.800 +	PSCB(vcpu).cmcv = val;
   6.801 +	return (IA64_NO_FAULT);
   6.802 +}
   6.803 +
   6.804 +/**************************************************************************
   6.805 +Interval timer routines
   6.806 +**************************************************************************/
   6.807 +
   6.808 +BOOLEAN vcpu_timer_disabled(VCPU *vcpu)
   6.809 +{
   6.810 +	UINT64 itv = PSCB(vcpu).itv;
   6.811 +	return(!itv || !!(itv & 0x10000));
   6.812 +}
   6.813 +
   6.814 +BOOLEAN vcpu_timer_expired(VCPU *vcpu)
   6.815 +{
   6.816 +	unsigned long domain_itm = PSCB(vcpu).domain_itm;
   6.817 +	unsigned long now = ia64_get_itc();
   6.818 + 
   6.819 +	if (domain_itm && (now > domain_itm) &&
   6.820 +		!vcpu_timer_disabled(vcpu)) return TRUE;
   6.821 +	return FALSE;
   6.822 +}
   6.823 +
   6.824 +void vcpu_safe_set_itm(unsigned long val)
   6.825 +{
   6.826 +	unsigned long epsilon = 100;
   6.827 +	UINT64 now = ia64_get_itc();
   6.828 +
   6.829 +	local_irq_disable();
   6.830 +	while (1) {
   6.831 +//printf("*** vcpu_safe_set_itm: Setting itm to %lx, itc=%lx\n",val,now);
   6.832 +		ia64_set_itm(val);
   6.833 +		if (val > (now = ia64_get_itc())) break;
   6.834 +		val = now + epsilon;
   6.835 +		epsilon <<= 1;
   6.836 +	}
   6.837 +	local_irq_enable();
   6.838 +}
   6.839 +
   6.840 +void vcpu_set_next_timer(VCPU *vcpu)
   6.841 +{
   6.842 +	UINT64 d = PSCB(vcpu).domain_itm;
   6.843 +	//UINT64 s = PSCB(vcpu).xen_itm;
   6.844 +	UINT64 s = local_cpu_data->itm_next;
   6.845 +	UINT64 now = ia64_get_itc();
   6.846 +	//UINT64 interval = PSCB(vcpu).xen_timer_interval;
   6.847 +
   6.848 +	/* gloss over the wraparound problem for now... we know it exists
   6.849 +	 * but it doesn't matter right now */
   6.850 +
   6.851 +#if 0
   6.852 +	/* ensure at least next SP tick is in the future */
   6.853 +	if (!interval) PSCB(vcpu).xen_itm = now +
   6.854 +#if 0
   6.855 +		(running_on_sim() ? SIM_DEFAULT_CLOCK_RATE :
   6.856 +		 			DEFAULT_CLOCK_RATE);
   6.857 +#else
   6.858 +	3000000;
   6.859 +//printf("vcpu_set_next_timer: HACK!\n");
   6.860 +#endif
   6.861 +#if 0
   6.862 +	if (PSCB(vcpu).xen_itm < now)
   6.863 +		while (PSCB(vcpu).xen_itm < now + (interval>>1))
   6.864 +			PSCB(vcpu).xen_itm += interval;
   6.865 +#endif
   6.866 +#endif
   6.867 +
   6.868 +	if (is_idle_task(vcpu)) {
   6.869 +		printf("****** vcpu_set_next_timer called during idle!!\n");
   6.870 +	}
   6.871 +	//s = PSCB(vcpu).xen_itm;
   6.872 +	if (d && (d > now) && (d < s)) {
   6.873 +		vcpu_safe_set_itm(d);
   6.874 +		//using_domain_as_itm++;
   6.875 +	}
   6.876 +	else {
   6.877 +		vcpu_safe_set_itm(s);
   6.878 +		//using_xen_as_itm++;
   6.879 +	}
   6.880 +}
   6.881 +
   6.882 +// parameter is a time interval specified in cycles
   6.883 +void vcpu_enable_timer(VCPU *vcpu,UINT64 cycles)
   6.884 +{
   6.885 +    PSCB(vcpu).xen_timer_interval = cycles;
   6.886 +    vcpu_set_next_timer(vcpu);
   6.887 +    printf("vcpu_enable_timer(%d): interval set to %d cycles\n",
   6.888 +             PSCB(vcpu).xen_timer_interval);
   6.889 +    __set_bit(PSCB(vcpu).itv, PSCB(vcpu).delivery_mask);
   6.890 +}
   6.891 +
   6.892 +IA64FAULT vcpu_set_itm(VCPU *vcpu, UINT64 val)
   6.893 +{
   6.894 +	UINT now = ia64_get_itc();
   6.895 +
   6.896 +	//if (val < now) val = now + 1000;
   6.897 +//printf("*** vcpu_set_itm: called with %lx\n",val);
   6.898 +	PSCB(vcpu).domain_itm = val;
   6.899 +	vcpu_set_next_timer(vcpu);
   6.900 +	return (IA64_NO_FAULT);
   6.901 +}
   6.902 +
   6.903 +IA64FAULT vcpu_set_itc(VCPU *vcpu, UINT64 val)
   6.904 +{
   6.905 +	
   6.906 +	UINT64 oldnow = ia64_get_itc();
   6.907 +	UINT64 olditm = PSCB(vcpu).domain_itm;
   6.908 +	unsigned long d = olditm - oldnow;
   6.909 +	unsigned long x = local_cpu_data->itm_next - oldnow;
   6.910 +	
   6.911 +	UINT64 newnow = val, min_delta;
   6.912 +
   6.913 +	local_irq_disable();
   6.914 +	if (olditm) {
   6.915 +printf("**** vcpu_set_itc(%lx): vitm changed to %lx\n",val,newnow+d);
   6.916 +		PSCB(vcpu).domain_itm = newnow + d;
   6.917 +	}
   6.918 +	local_cpu_data->itm_next = newnow + x;
   6.919 +	d = PSCB(vcpu).domain_itm;
   6.920 +	x = local_cpu_data->itm_next;
   6.921 +	
   6.922 +	ia64_set_itc(newnow);
   6.923 +	if (d && (d > newnow) && (d < x)) {
   6.924 +		vcpu_safe_set_itm(d);
   6.925 +		//using_domain_as_itm++;
   6.926 +	}
   6.927 +	else {
   6.928 +		vcpu_safe_set_itm(x);
   6.929 +		//using_xen_as_itm++;
   6.930 +	}
   6.931 +	local_irq_enable();
   6.932 +	return (IA64_NO_FAULT);
   6.933 +}
   6.934 +
   6.935 +IA64FAULT vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
   6.936 +{
   6.937 +	//FIXME: Implement this
   6.938 +	printf("vcpu_get_itm: Getting cr.itm is unsupported... continuing\n");
   6.939 +	return (IA64_NO_FAULT);
   6.940 +	//return (IA64_ILLOP_FAULT);
   6.941 +}
   6.942 +
   6.943 +IA64FAULT vcpu_get_itc(VCPU *vcpu, UINT64 *pval)
   6.944 +{
   6.945 +	//TODO: Implement this
   6.946 +	printf("vcpu_get_itc: Getting ar.itc is unsupported\n");
   6.947 +	return (IA64_ILLOP_FAULT);
   6.948 +}
   6.949 +
   6.950 +void vcpu_pend_timer(VCPU *vcpu)
   6.951 +{
   6.952 +	UINT64 itv = PSCB(vcpu).itv & 0xff;
   6.953 +
   6.954 +	if (vcpu_timer_disabled(vcpu)) return;
   6.955 +	vcpu_pend_interrupt(vcpu, itv);
   6.956 +}
   6.957 +
   6.958 +//FIXME: This is a hack because everything dies if a timer tick is lost
   6.959 +void vcpu_poke_timer(VCPU *vcpu)
   6.960 +{
   6.961 +	UINT64 itv = PSCB(vcpu).itv & 0xff;
   6.962 +	UINT64 now = ia64_get_itc();
   6.963 +	UINT64 itm = PSCB(vcpu).domain_itm;
   6.964 +	UINT64 irr;
   6.965 +
   6.966 +	if (vcpu_timer_disabled(vcpu)) return;
   6.967 +	if (!itm) return;
   6.968 +	if (itv != 0xefL) {
   6.969 +		printf("vcpu_poke_timer: unimplemented itv=%lx!\n",itv);
   6.970 +		while(1);
   6.971 +	}
   6.972 +	// using 0xef instead of itv so can get real irr
   6.973 +	if (now > itm && !test_bit(0xefL, PSCB(vcpu).insvc)) {
   6.974 +		if (!test_bit(0xefL,PSCB(vcpu).irr)) {
   6.975 +			irr = ia64_getreg(_IA64_REG_CR_IRR3);
   6.976 +			if (irr & (1L<<(0xef-0xc0))) return;
   6.977 +if (now-itm>0x800000)
   6.978 +printf("*** poking timer: now=%lx,vitm=%lx,xitm=%lx,itm=%lx\n",now,itm,local_cpu_data->itm_next,ia64_get_itm());
   6.979 +			vcpu_pend_interrupt(vcpu, 0xefL);
   6.980 +		}
   6.981 +	}
   6.982 +}
   6.983 +
   6.984 +
   6.985 +/**************************************************************************
   6.986 +Privileged operation emulation routines
   6.987 +**************************************************************************/
   6.988 +
   6.989 +IA64FAULT vcpu_force_data_miss(VCPU *vcpu, UINT64 ifa)
   6.990 +{
   6.991 +	PSCB(vcpu).ifa = ifa;	// privop traps don't set ifa so do it here
   6.992 +	return (IA64_DATA_TLB_VECTOR | IA64_FORCED_IFA);
   6.993 +}
   6.994 +
   6.995 +
   6.996 +IA64FAULT vcpu_rfi(VCPU *vcpu)
   6.997 +{
   6.998 +	// TODO: Only allowed for current vcpu
   6.999 +	PSR psr;
  6.1000 +	UINT64 int_enable, regspsr = 0;
  6.1001 +	UINT64 ifs;
  6.1002 +	REGS *regs = vcpu_regs(vcpu);
  6.1003 +	extern void dorfirfi(void);
  6.1004 +
  6.1005 +	psr.i64 = PSCB(vcpu).ipsr;
  6.1006 +	if (psr.cpl < 3) psr.cpl = 2;
  6.1007 +	if (psr.i) PSCB(vcpu).interrupt_delivery_enabled = 1;
  6.1008 +	int_enable = psr.i;
  6.1009 +	if (psr.ic)  PSCB(vcpu).interrupt_collection_enabled = 1;
  6.1010 +	if (psr.dt && psr.rt && psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
  6.1011 +	else vcpu_set_metaphysical_mode(vcpu,TRUE);
  6.1012 +	psr.ic = 1; psr.i = 1;
  6.1013 +	psr.dt = 1; psr.rt = 1; psr.it = 1;
  6.1014 +	psr.bn = 1;
  6.1015 +	//psr.pk = 1;  // checking pkeys shouldn't be a problem but seems broken
  6.1016 +	if (psr.be) {
  6.1017 +		printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
  6.1018 +		return (IA64_ILLOP_FAULT);
  6.1019 +	}
  6.1020 +	PSCB(vcpu).incomplete_regframe = 0; // is this necessary?
  6.1021 +	ifs = PSCB(vcpu).ifs;
  6.1022 +	//if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
  6.1023 +	//if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
  6.1024 +	if (ifs & regs->cr_ifs & 0x8000000000000000L) {
  6.1025 +#define SI_OFS(x)	((char *)(&PSCB(vcpu).x) - (char *)(vcpu->shared_info))
  6.1026 +if (SI_OFS(iip)!=0x150 || SI_OFS(ipsr)!=0x148 || SI_OFS(ifs)!=0x158) {
  6.1027 +printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
  6.1028 +while(1);
  6.1029 +}
  6.1030 +		// TODO: validate PSCB(vcpu).iip 
  6.1031 +		// TODO: PSCB(vcpu).ipsr = psr;
  6.1032 +		PSCB(vcpu).ipsr = psr.i64;
  6.1033 +		// now set up the trampoline
  6.1034 +		regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
  6.1035 +		__asm__ __volatile ("mov %0=psr;;":"=r"(regspsr)::"memory");
  6.1036 +		regs->cr_ipsr = regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
  6.1037 +	}
  6.1038 +	else {
  6.1039 +		regs->cr_ipsr = psr.i64;
  6.1040 +		regs->cr_iip = PSCB(vcpu).iip;
  6.1041 +	}
  6.1042 +	PSCB(vcpu).interrupt_collection_enabled = 1;
  6.1043 +	vcpu_bsw1(vcpu);
  6.1044 +	PSCB(vcpu).interrupt_delivery_enabled = int_enable;
  6.1045 +	return (IA64_NO_FAULT);
  6.1046 +}
  6.1047 +
  6.1048 +IA64FAULT vcpu_cover(VCPU *vcpu)
  6.1049 +{
  6.1050 +	REGS *regs = vcpu_regs(vcpu);
  6.1051 +
  6.1052 +	if (!PSCB(vcpu).interrupt_collection_enabled) {
  6.1053 +		if (!PSCB(vcpu).incomplete_regframe)
  6.1054 +			PSCB(vcpu).ifs = regs->cr_ifs;
  6.1055 +		else PSCB(vcpu).incomplete_regframe = 0;
  6.1056 +	}
  6.1057 +	regs->cr_ifs = 0;
  6.1058 +	return (IA64_NO_FAULT);
  6.1059 +}
  6.1060 +
  6.1061 +IA64FAULT vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval)
  6.1062 +{
  6.1063 +	extern unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr);
  6.1064 +	UINT64 pta = PSCB(vcpu).pta;
  6.1065 +	UINT64 pta_sz = (pta & IA64_PTA_SZ(0x3f)) >> IA64_PTA_SZ_BIT;
  6.1066 +	UINT64 pta_base = pta & ~((1UL << IA64_PTA_BASE_BIT)-1);
  6.1067 +	UINT64 Mask = (1L << pta_sz) - 1;
  6.1068 +	UINT64 Mask_60_15 = (Mask >> 15) & 0x3fffffffffff;
  6.1069 +	UINT64 compMask_60_15 = ~Mask_60_15;
  6.1070 +	//UINT64 rr_ps = RR_TO_PS(get_rr(vadr));
  6.1071 +	UINT64 rr_ps = vcpu_get_rr_ps(vcpu,vadr);
  6.1072 +	UINT64 VHPT_offset = (vadr >> rr_ps) << 3;
  6.1073 +	UINT64 VHPT_addr1 = vadr & 0xe000000000000000L;
  6.1074 +	UINT64 VHPT_addr2a =
  6.1075 +		((pta_base >> 15) & 0x3fffffffffff) & compMask_60_15;
  6.1076 +	UINT64 VHPT_addr2b =
  6.1077 +		((VHPT_offset >> 15) & 0x3fffffffffff) & Mask_60_15;;
  6.1078 +	UINT64 VHPT_addr3 = VHPT_offset & 0x3fff;
  6.1079 +	UINT64 VHPT_addr = VHPT_addr1 | ((VHPT_addr2a | VHPT_addr2b) << 15) |
  6.1080 +			VHPT_addr3;
  6.1081 +
  6.1082 +	if (VHPT_addr1 == 0xe000000000000000L) {
  6.1083 +	    printf("vcpu_thash: thash unsupported with rr7 @%lx\n",
  6.1084 +		PSCB(vcpu).iip);
  6.1085 +	    return (IA64_ILLOP_FAULT);
  6.1086 +	}
  6.1087 +//verbose("vcpu_thash: vadr=%p, VHPT_addr=%p\n",vadr,VHPT_addr);
  6.1088 +	*pval = VHPT_addr;
  6.1089 +	return (IA64_NO_FAULT);
  6.1090 +}
  6.1091 +
  6.1092 +IA64FAULT vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
  6.1093 +{
  6.1094 +	printf("vcpu_ttag: ttag instruction unsupported\n");
  6.1095 +	return (IA64_ILLOP_FAULT);
  6.1096 +}
  6.1097 +
  6.1098 +IA64FAULT vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
  6.1099 +{
  6.1100 +	extern TR_ENTRY *match_tr(VCPU *,UINT64);
  6.1101 +	extern TR_ENTRY *match_dtlb(VCPU *,UINT64);
  6.1102 +	TR_ENTRY *trp;
  6.1103 +	UINT64 mask;
  6.1104 +
  6.1105 +extern unsigned long privop_trace;
  6.1106 +	if ((trp=match_tr(current,vadr)) || (trp=match_dtlb(current,vadr))) {
  6.1107 +		mask = (1L << trp->ps) - 1;
  6.1108 +		*padr = ((trp->ppn << 12) & ~mask) | (vadr & mask);
  6.1109 +		verbose("vcpu_tpa: addr=%p @%p, successful, padr=%p\n",vadr,PSCB(vcpu).iip,*padr);
  6.1110 +		return (IA64_NO_FAULT);
  6.1111 +	}
  6.1112 +	verbose("vcpu_tpa addr=%p, @%p, forcing data miss\n",vadr,PSCB(vcpu).iip);
  6.1113 +	return vcpu_force_data_miss(vcpu, vadr);
  6.1114 +}
  6.1115 +
  6.1116 +IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
  6.1117 +{
  6.1118 +	printf("vcpu_tak: tak instruction unsupported\n");
  6.1119 +	return (IA64_ILLOP_FAULT);
  6.1120 +	// HACK ALERT: tak does a thash for now
  6.1121 +	//return vcpu_thash(vcpu,vadr,key);
  6.1122 +}
  6.1123 +
  6.1124 +/**************************************************************************
  6.1125 + VCPU debug breakpoint register access routines
  6.1126 +**************************************************************************/
  6.1127 +
  6.1128 +IA64FAULT vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
  6.1129 +{
  6.1130 +	// TODO: unimplemented DBRs return a reserved register fault
  6.1131 +	// TODO: Should set Logical CPU state, not just physical
  6.1132 +	ia64_set_dbr(reg,val);
  6.1133 +	return (IA64_NO_FAULT);
  6.1134 +}
  6.1135 +
  6.1136 +IA64FAULT vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
  6.1137 +{
  6.1138 +	// TODO: unimplemented IBRs return a reserved register fault
  6.1139 +	// TODO: Should set Logical CPU state, not just physical
  6.1140 +	ia64_set_ibr(reg,val);
  6.1141 +	return (IA64_NO_FAULT);
  6.1142 +}
  6.1143 +
  6.1144 +IA64FAULT vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  6.1145 +{
  6.1146 +	// TODO: unimplemented DBRs return a reserved register fault
  6.1147 +	UINT64 val = ia64_get_dbr(reg);
  6.1148 +	*pval = val;
  6.1149 +	return (IA64_NO_FAULT);
  6.1150 +}
  6.1151 +
  6.1152 +IA64FAULT vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  6.1153 +{
  6.1154 +	// TODO: unimplemented IBRs return a reserved register fault
  6.1155 +	UINT64 val = ia64_get_ibr(reg);
  6.1156 +	*pval = val;
  6.1157 +	return (IA64_NO_FAULT);
  6.1158 +}
  6.1159 +
  6.1160 +/**************************************************************************
  6.1161 + VCPU performance monitor register access routines
  6.1162 +**************************************************************************/
  6.1163 +
  6.1164 +IA64FAULT vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
  6.1165 +{
  6.1166 +	// TODO: Should set Logical CPU state, not just physical
  6.1167 +	// NOTE: Writes to unimplemented PMC registers are discarded
  6.1168 +	ia64_set_pmc(reg,val);
  6.1169 +	return (IA64_NO_FAULT);
  6.1170 +}
  6.1171 +
  6.1172 +IA64FAULT vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
  6.1173 +{
  6.1174 +	// TODO: Should set Logical CPU state, not just physical
  6.1175 +	// NOTE: Writes to unimplemented PMD registers are discarded
  6.1176 +	ia64_set_pmd(reg,val);
  6.1177 +	return (IA64_NO_FAULT);
  6.1178 +}
  6.1179 +
  6.1180 +IA64FAULT vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  6.1181 +{
  6.1182 +	// NOTE: Reads from unimplemented PMC registers return zero
  6.1183 +	UINT64 val = (UINT64)ia64_get_pmc(reg);
  6.1184 +	*pval = val;
  6.1185 +	return (IA64_NO_FAULT);
  6.1186 +}
  6.1187 +
  6.1188 +IA64FAULT vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  6.1189 +{
  6.1190 +	// NOTE: Reads from unimplemented PMD registers return zero
  6.1191 +	UINT64 val = (UINT64)ia64_get_pmd(reg);
  6.1192 +	*pval = val;
  6.1193 +	return (IA64_NO_FAULT);
  6.1194 +}
  6.1195 +
  6.1196 +/**************************************************************************
  6.1197 + VCPU banked general register access routines
  6.1198 +**************************************************************************/
  6.1199 +
  6.1200 +IA64FAULT vcpu_bsw0(VCPU *vcpu)
  6.1201 +{
  6.1202 +	REGS *regs = vcpu_regs(vcpu);
  6.1203 +	unsigned long *r = &regs->r16;
  6.1204 +	unsigned long *b0 = &PSCB(vcpu).bank0_regs[0];
  6.1205 +	unsigned long *b1 = &PSCB(vcpu).bank1_regs[0];
  6.1206 +	int i;
  6.1207 +
  6.1208 +	if (PSCB(vcpu).banknum) {
  6.1209 +		for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
  6.1210 +		PSCB(vcpu).banknum = 0;
  6.1211 +	}
  6.1212 +	return (IA64_NO_FAULT);
  6.1213 +}
  6.1214 +
  6.1215 +IA64FAULT vcpu_bsw1(VCPU *vcpu)
  6.1216 +{
  6.1217 +	REGS *regs = vcpu_regs(vcpu);
  6.1218 +	unsigned long *r = &regs->r16;
  6.1219 +	unsigned long *b0 = &PSCB(vcpu).bank0_regs[0];
  6.1220 +	unsigned long *b1 = &PSCB(vcpu).bank1_regs[0];
  6.1221 +	int i;
  6.1222 +
  6.1223 +	if (!PSCB(vcpu).banknum) {
  6.1224 +		for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
  6.1225 +		PSCB(vcpu).banknum = 1;
  6.1226 +	}
  6.1227 +	return (IA64_NO_FAULT);
  6.1228 +}
  6.1229 +
  6.1230 +/**************************************************************************
  6.1231 + VCPU cpuid access routines
  6.1232 +**************************************************************************/
  6.1233 +
  6.1234 +
  6.1235 +IA64FAULT vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  6.1236 +{
  6.1237 +	// FIXME: This could get called as a result of a rsvd-reg fault
  6.1238 +	// if reg > 3
  6.1239 +	switch(reg) {
  6.1240 +	    case 0:
  6.1241 +	    case 1:
  6.1242 +		memcpy(pval,"Xen/ia64",8);
  6.1243 +		break;
  6.1244 +	    case 2:
  6.1245 +		*pval = 0;
  6.1246 +		break;
  6.1247 +	    case 3:
  6.1248 +		*pval = 0;  //FIXME: See vol1, 3.1.11
  6.1249 +		break;
  6.1250 +	    case 4:
  6.1251 +		*pval = 1;  //FIXME: See vol1, 3.1.11
  6.1252 +		break;
  6.1253 +	    default:
  6.1254 +		*pval = 0;  //FIXME: See vol1, 3.1.11
  6.1255 +		break;
  6.1256 +	}
  6.1257 +	return (IA64_NO_FAULT);
  6.1258 +}
  6.1259 +
  6.1260 +/**************************************************************************
  6.1261 + VCPU region register access routines
  6.1262 +**************************************************************************/
  6.1263 +
  6.1264 +unsigned long vcpu_get_rr_ve(VCPU *vcpu,UINT64 vadr)
  6.1265 +{
  6.1266 +	
  6.1267 +	ia64_rr rr;
  6.1268 +
  6.1269 +	rr.rrval = PSCB(vcpu).rrs[vadr>>61];
  6.1270 +	return(rr.ve);
  6.1271 +}
  6.1272 +
  6.1273 +
  6.1274 +unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr)
  6.1275 +{
  6.1276 +	
  6.1277 +	ia64_rr rr;
  6.1278 +
  6.1279 +	rr.rrval = PSCB(vcpu).rrs[vadr>>61];
  6.1280 +	return(rr.ps);
  6.1281 +}
  6.1282 +
  6.1283 +
  6.1284 +unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr)
  6.1285 +{
  6.1286 +	
  6.1287 +	ia64_rr rr;
  6.1288 +
  6.1289 +	rr.rrval = PSCB(vcpu).rrs[vadr>>61];
  6.1290 +	return(rr.rid);
  6.1291 +}
  6.1292 +
  6.1293 +
  6.1294 +IA64FAULT vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
  6.1295 +{
  6.1296 +	extern void set_one_rr(UINT64, UINT64);
  6.1297 +	PSCB(vcpu).rrs[reg>>61] = val;
  6.1298 +	// warning: set_one_rr() does it "live"
  6.1299 +	set_one_rr(reg,val);
  6.1300 +	return (IA64_NO_FAULT);
  6.1301 +}
  6.1302 +
  6.1303 +IA64FAULT vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  6.1304 +{
  6.1305 +	UINT val = PSCB(vcpu).rrs[reg>>61];
  6.1306 +	*pval = val;
  6.1307 +	return (IA64_NO_FAULT);
  6.1308 +}
  6.1309 +
  6.1310 +/**************************************************************************
  6.1311 + VCPU protection key register access routines
  6.1312 +**************************************************************************/
  6.1313 +
  6.1314 +IA64FAULT vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
  6.1315 +{
  6.1316 +#ifndef PKR_USE_FIXED
  6.1317 +	printk("vcpu_get_pkr: called, not implemented yet\n");
  6.1318 +	return IA64_ILLOP_FAULT;
  6.1319 +#else
  6.1320 +	UINT64 val = (UINT64)ia64_get_pkr(reg);
  6.1321 +	*pval = val;
  6.1322 +	return (IA64_NO_FAULT);
  6.1323 +#endif
  6.1324 +}
  6.1325 +
  6.1326 +IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
  6.1327 +{
  6.1328 +#ifndef PKR_USE_FIXED
  6.1329 +	printk("vcpu_set_pkr: called, not implemented yet\n");
  6.1330 +	return IA64_ILLOP_FAULT;
  6.1331 +#else
  6.1332 +//	if (reg >= NPKRS) return (IA64_ILLOP_FAULT);
  6.1333 +	vcpu->pkrs[reg] = val;
  6.1334 +	ia64_set_pkr(reg,val);
  6.1335 +	return (IA64_NO_FAULT);
  6.1336 +#endif
  6.1337 +}
  6.1338 +
  6.1339 +/**************************************************************************
  6.1340 + VCPU translation register access routines
  6.1341 +**************************************************************************/
  6.1342 +
  6.1343 +static void vcpu_purge_tr_entry(TR_ENTRY *trp)
  6.1344 +{
  6.1345 +	trp->p = 0;
  6.1346 +}
  6.1347 +
  6.1348 +static void vcpu_set_tr_entry(TR_ENTRY *trp, UINT64 pte, UINT64 itir, UINT64 ifa)
  6.1349 +{
  6.1350 +	UINT64 ps;
  6.1351 +
  6.1352 +	trp->itir = itir;
  6.1353 +	trp->rid = virtualize_rid(current, get_rr(ifa) & RR_RID_MASK);
  6.1354 +	trp->p = 1;
  6.1355 +	ps = trp->ps;
  6.1356 +	trp->page_flags = pte;
  6.1357 +	if (trp->pl < 2) trp->pl = 2;
  6.1358 +	trp->vadr = ifa & ~0xfff;
  6.1359 +	if (ps > 12) { // "ignore" relevant low-order bits
  6.1360 +		trp->ppn &= ~((1UL<<(ps-12))-1);
  6.1361 +		trp->vadr &= ~((1UL<<ps)-1);
  6.1362 +	}
  6.1363 +}
  6.1364 +
  6.1365 +TR_ENTRY *vcpu_match_tr_entry(VCPU *vcpu, TR_ENTRY *trp, UINT64 ifa, int count)
  6.1366 +{
  6.1367 +	unsigned long rid = (get_rr(ifa) & RR_RID_MASK);
  6.1368 +	int i;
  6.1369 +
  6.1370 +	for (i = 0; i < count; i++, trp++) {
  6.1371 +		if (!trp->p) continue;
  6.1372 +		if (physicalize_rid(vcpu,trp->rid) != rid) continue;
  6.1373 +        	if (ifa < trp->vadr) continue;
  6.1374 +        	if (ifa >= (trp->vadr + (1L << trp->ps)) - 1) continue;
  6.1375 +		//if (trp->key && !match_pkr(vcpu,trp->key)) continue;
  6.1376 +		return trp;
  6.1377 +	}
  6.1378 +	return 0;
  6.1379 +}
  6.1380 +
  6.1381 +TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa)
  6.1382 +{
  6.1383 +	TR_ENTRY *trp;
  6.1384 +
  6.1385 +	trp = vcpu_match_tr_entry(vcpu,vcpu->shared_info->arch.dtrs,ifa,NDTRS);
  6.1386 +	if (trp) return trp;
  6.1387 +	trp = vcpu_match_tr_entry(vcpu,vcpu->shared_info->arch.itrs,ifa,NITRS);
  6.1388 +	if (trp) return trp;
  6.1389 +	return 0;
  6.1390 +}
  6.1391 +
  6.1392 +IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte,
  6.1393 +		UINT64 itir, UINT64 ifa)
  6.1394 +{
  6.1395 +	TR_ENTRY *trp;
  6.1396 +
  6.1397 +	if (slot >= NDTRS) return IA64_RSVDREG_FAULT;
  6.1398 +	trp = &PSCB(vcpu).dtrs[slot];
  6.1399 +	vcpu_set_tr_entry(trp,pte,itir,ifa);
  6.1400 +	return IA64_NO_FAULT;
  6.1401 +}
  6.1402 +
  6.1403 +IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte,
  6.1404 +		UINT64 itir, UINT64 ifa)
  6.1405 +{
  6.1406 +	TR_ENTRY *trp;
  6.1407 +
  6.1408 +	if (slot >= NITRS) return IA64_RSVDREG_FAULT;
  6.1409 +	trp = &PSCB(vcpu).itrs[slot];
  6.1410 +	vcpu_set_tr_entry(trp,pte,itir,ifa);
  6.1411 +	return IA64_NO_FAULT;
  6.1412 +}
  6.1413 +
  6.1414 +/**************************************************************************
  6.1415 + VCPU translation cache access routines
  6.1416 +**************************************************************************/
  6.1417 +
  6.1418 +void foobar(void) { /*vcpu_verbose = 1;*/ }
  6.1419 +
  6.1420 +extern VCPU *dom0;
  6.1421 +
  6.1422 +void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64 logps)
  6.1423 +{
  6.1424 +	unsigned long psr;
  6.1425 +	unsigned long ps = (vcpu==dom0) ? logps : PAGE_SHIFT;
  6.1426 +
  6.1427 +	// FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
  6.1428 +	// FIXME, must be inlined or potential for nested fault here!
  6.1429 +	psr = ia64_clear_ic();
  6.1430 +	ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
  6.1431 +	ia64_set_psr(psr);
  6.1432 +	// ia64_srlz_i(); // no srls req'd, will rfi later
  6.1433 +	if (IorD & 0x1) vcpu_set_tr_entry(&PSCB(vcpu).itlb,pte,logps<<2,vaddr);
  6.1434 +	if (IorD & 0x2) vcpu_set_tr_entry(&PSCB(vcpu).dtlb,pte,logps<<2,vaddr);
  6.1435 +}
  6.1436 +
  6.1437 +TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa)
  6.1438 +{
  6.1439 +	return vcpu_match_tr_entry(vcpu,&vcpu->shared_info->arch.dtlb,ifa,1);
  6.1440 +}
  6.1441 +
  6.1442 +IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  6.1443 +{
  6.1444 +	unsigned long pteval, logps = (itir >> 2) & 0x3f;
  6.1445 +	unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
  6.1446 +
  6.1447 +	if (((itir & ~0xfc) >> 2) < PAGE_SHIFT) {
  6.1448 +		printf("vcpu_itc_d: domain trying to use smaller page size!\n");
  6.1449 +		//FIXME: kill domain here
  6.1450 +		while(1);
  6.1451 +	}
  6.1452 +	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
  6.1453 +	pteval = translate_domain_pte(pte,ifa,itir);
  6.1454 +	if (!pteval) return IA64_ILLOP_FAULT;
  6.1455 +	vcpu_itc_no_srlz(vcpu,2,ifa,pteval,logps);
  6.1456 +	return IA64_NO_FAULT;
  6.1457 +}
  6.1458 +
  6.1459 +IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
  6.1460 +{
  6.1461 +	unsigned long pteval, logps = (itir >> 2) & 0x3f;
  6.1462 +	unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
  6.1463 +
  6.1464 +	// FIXME: validate ifa here (not in Xen space), COULD MACHINE CHECK!
  6.1465 +	if (((itir & ~0xfc) >> 2) < PAGE_SHIFT) {
  6.1466 +		printf("vcpu_itc_i: domain trying to use smaller page size!\n");
  6.1467 +		//FIXME: kill domain here
  6.1468 +		while(1);
  6.1469 +	}
  6.1470 +	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
  6.1471 +	pteval = translate_domain_pte(pte,ifa,itir);
  6.1472 +	// FIXME: what to do if bad physical address? (machine check?)
  6.1473 +	if (!pteval) return IA64_ILLOP_FAULT;
  6.1474 +	vcpu_itc_no_srlz(vcpu, 1,ifa,pteval,logps);
  6.1475 +	return IA64_NO_FAULT;
  6.1476 +}
  6.1477 +
  6.1478 +IA64FAULT vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
  6.1479 +{
  6.1480 +	printk("vcpu_ptc_l: called, not implemented yet\n");
  6.1481 +	return IA64_ILLOP_FAULT;
  6.1482 +}
  6.1483 +
  6.1484 +IA64FAULT vcpu_fc(VCPU *vcpu, UINT64 vadr)
  6.1485 +{
  6.1486 +	UINT64 mpaddr;
  6.1487 +	IA64FAULT fault;
  6.1488 +	unsigned long lookup_domain_mpa(VCPU *,unsigned long);
  6.1489 +	unsigned long pteval, dom_imva;
  6.1490 +
  6.1491 +	fault = vcpu_tpa(vcpu, vadr, &mpaddr);
  6.1492 +	if (fault == IA64_NO_FAULT) {
  6.1493 +		struct domain *dom0;
  6.1494 +		unsigned long dom0_start, dom0_size;
  6.1495 +		if (vcpu == dom0) {
  6.1496 +			if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
  6.1497 +				printk("vcpu_fc: bad dom0 mpaddr %p!\n",mpaddr);
  6.1498 +			}
  6.1499 +		}
  6.1500 +		pteval = lookup_domain_mpa(vcpu,mpaddr);
  6.1501 +		if (pteval) {
  6.1502 +			dom_imva = __va(pteval & _PFN_MASK);
  6.1503 +			ia64_fc(dom_imva);
  6.1504 +		}
  6.1505 +		else {
  6.1506 +			REGS *regs = vcpu_regs(vcpu);
  6.1507 +			printk("vcpu_fc: can't flush vadr=%p, iip=%p\n",
  6.1508 +					vadr,regs->cr_iip);
  6.1509 +		}
  6.1510 +	}
  6.1511 +	return fault;
  6.1512 +}
  6.1513 +
  6.1514 +IA64FAULT vcpu_ptc_e(VCPU *vcpu, UINT64 vadr)
  6.1515 +{
  6.1516 +
  6.1517 +	// Note that this only needs to be called once, i.e. the
  6.1518 +	// architected loop to purge the entire TLB, should use
  6.1519 +	//  base = stride1 = stride2 = 0, count0 = count 1 = 1
  6.1520 +
  6.1521 +	// FIXME: When VHPT is in place, flush that too!
  6.1522 +	local_flush_tlb_all();
  6.1523 +	// just invalidate the "whole" tlb
  6.1524 +	vcpu_purge_tr_entry(&PSCB(vcpu).dtlb);
  6.1525 +	vcpu_purge_tr_entry(&PSCB(vcpu).itlb);
  6.1526 +	return IA64_NO_FAULT;
  6.1527 +}
  6.1528 +
  6.1529 +IA64FAULT vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 addr_range)
  6.1530 +{
  6.1531 +	printk("vcpu_ptc_g: called, not implemented yet\n");
  6.1532 +	return IA64_ILLOP_FAULT;
  6.1533 +}
  6.1534 +
  6.1535 +IA64FAULT vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
  6.1536 +{
  6.1537 +	extern ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
  6.1538 +	// FIXME: validate not flushing Xen addresses
  6.1539 +	// if (Xen address) return(IA64_ILLOP_FAULT);
  6.1540 +	// FIXME: ??breaks if domain PAGE_SIZE < Xen PAGE_SIZE
  6.1541 +	ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
  6.1542 +	vcpu_purge_tr_entry(&PSCB(vcpu).dtlb);
  6.1543 +	vcpu_purge_tr_entry(&PSCB(vcpu).itlb);
  6.1544 +	return IA64_NO_FAULT;
  6.1545 +}
  6.1546 +
  6.1547 +IA64FAULT vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
  6.1548 +{
  6.1549 +	printf("vcpu_ptr_d: Purging TLB is unsupported\n");
  6.1550 +	return (IA64_ILLOP_FAULT);
  6.1551 +}
  6.1552 +
  6.1553 +IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range)
  6.1554 +{
  6.1555 +	printf("vcpu_ptr_i: Purging TLB is unsupported\n");
  6.1556 +	return (IA64_ILLOP_FAULT);
  6.1557 +}
  6.1558 +
  6.1559 +void vcpu_set_regs(VCPU *vcpu, REGS *regs)
  6.1560 +{
  6.1561 +	vcpu->regs = regs;
  6.1562 +}
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/ia64/xenasm.S	Mon Nov 22 23:08:21 2004 +0000
     7.3 @@ -0,0 +1,461 @@
     7.4 +/*
     7.5 + * Assembly support routines for Xen/ia64
     7.6 + *
     7.7 + * Copyright (C) 2004 Hewlett-Packard Co
     7.8 + *	Dan Magenheimer <dan.magenheimer@hp.com>
     7.9 + */
    7.10 +
    7.11 +#include <linux/config.h>
    7.12 +#include <asm/asmmacro.h>
    7.13 +#include <asm/processor.h>
    7.14 +#include <asm/pgtable.h>
    7.15 +#include <asm/vhpt.h>
    7.16 +
    7.17 +#define RunningOnHpSki(rx,ry,pn) 			\
    7.18 +	addl rx = 2, r0; 				\
    7.19 +	addl ry = 3, r0; 				\
    7.20 +	;; 						\
    7.21 +	mov rx = cpuid[rx]; 				\
    7.22 +	mov ry = cpuid[ry]; 				\
    7.23 +	;; 						\
    7.24 +	cmp.eq pn,p0 = 0, rx; 				\
    7.25 +	;; 						\
    7.26 +	(pn) movl rx = 0x7000004 ; 			\
    7.27 +	;; 						\
    7.28 +	(pn) cmp.eq pn,p0 = ry, rx; 			\
    7.29 +	;;
    7.30 +
    7.31 +//int platform_is_hp_ski(void)
    7.32 +GLOBAL_ENTRY(platform_is_hp_ski)
    7.33 +	mov r8 = 0
    7.34 +	RunningOnHpSki(r3,r9,p8)
    7.35 +(p8)	mov r8 = 1
    7.36 +	br.ret.sptk.many b0
    7.37 +END(platform_is_hp_ski)
    7.38 +
    7.39 +// Change rr7 to the passed value while ensuring
    7.40 +// Xen is mapped into the new region
    7.41 +#define PSR_BITS_TO_CLEAR						\
    7.42 +	(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT |		\
    7.43 +	 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED |	\
    7.44 +	 IA64_PSR_DFL | IA64_PSR_DFH)
    7.45 +// FIXME? Note that this turns off the DB bit (debug)
    7.46 +#define PSR_BITS_TO_SET	IA64_PSR_BN
    7.47 +
    7.48 +GLOBAL_ENTRY(ia64_new_rr7)
    7.49 +	// not sure this unwind statement is correct...
    7.50 +	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
    7.51 +	alloc loc1 = ar.pfs, 1, 7, 0, 0
    7.52 +1:	{
    7.53 +	  mov r28  = in0		// copy procedure index
    7.54 +	  mov r8   = ip			// save ip to compute branch
    7.55 +	  mov loc0 = rp			// save rp
    7.56 +	};;
    7.57 +	.body
    7.58 +	movl loc2=PERCPU_ADDR
    7.59 +	;;
    7.60 +	tpa loc2=loc2			// grab this BEFORE changing rr7
    7.61 +	;;
    7.62 +#if VHPT_ENABLED
    7.63 +	movl loc6=VHPT_ADDR
    7.64 +	;;
    7.65 +	tpa loc6=loc6			// grab this BEFORE changing rr7
    7.66 +	;;
    7.67 +#endif
    7.68 +	movl loc5=SHAREDINFO_ADDR
    7.69 +	;;
    7.70 +	tpa loc5=loc5			// grab this BEFORE changing rr7
    7.71 +	;;
    7.72 +	mov loc3 = psr			// save psr
    7.73 +	adds r8  = 1f-1b,r8		// calculate return address for call
    7.74 +	;;
    7.75 +	tpa r8=r8			// convert rp to physical
    7.76 +	;;
    7.77 +	mov loc4=ar.rsc			// save RSE configuration
    7.78 +	;;
    7.79 +	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
    7.80 +	movl r16=PSR_BITS_TO_CLEAR
    7.81 +	movl r17=PSR_BITS_TO_SET
    7.82 +	;;
    7.83 +	or loc3=loc3,r17		// add in psr the bits to set
    7.84 +	;;
    7.85 +	andcm r16=loc3,r16		// removes bits to clear from psr
    7.86 +	br.call.sptk.many rp=ia64_switch_mode_phys
    7.87 +1:
    7.88 +	// now in physical mode with psr.i/ic off so do rr7 switch
    7.89 +	dep	r16=-1,r0,61,3
    7.90 +	;;
    7.91 +	mov	rr[r16]=in0
    7.92 +	srlz.d
    7.93 +	;;
    7.94 +
    7.95 +	// re-pin mappings for kernel text and data
    7.96 +	mov r18=KERNEL_TR_PAGE_SHIFT<<2
    7.97 +	movl r17=KERNEL_START
    7.98 +	;;
    7.99 +	rsm psr.i | psr.ic
   7.100 +	;;
   7.101 +	srlz.i
   7.102 +	;;
   7.103 +	ptr.i	r17,r18
   7.104 +	ptr.d	r17,r18
   7.105 +	;;
   7.106 +	mov cr.itir=r18
   7.107 +	mov cr.ifa=r17
   7.108 +	mov r16=IA64_TR_KERNEL
   7.109 +	//mov r3=ip
   7.110 +	movl r18=PAGE_KERNEL
   7.111 +	;;
   7.112 +	dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
   7.113 +	;;
   7.114 +	or r18=r2,r18
   7.115 +	;;
   7.116 +	srlz.i
   7.117 +	;;
   7.118 +	itr.i itr[r16]=r18
   7.119 +	;;
   7.120 +	itr.d dtr[r16]=r18
   7.121 +	;;
   7.122 +
   7.123 +	// re-pin mappings for stack (current), per-cpu, vhpt, and shared info
   7.124 +
   7.125 +	// unless overlaps with KERNEL_TR
   7.126 +	dep r18=0,r13,0,KERNEL_TR_PAGE_SHIFT
   7.127 +	;;
   7.128 +	cmp.eq p7,p0=r17,r18
   7.129 +(p7)	br.cond.sptk	.stack_overlaps
   7.130 +	;;
   7.131 +	movl r25=PAGE_KERNEL
   7.132 +	dep r20=0,r13,50,14		// physical address of "current"
   7.133 +	;;
   7.134 +	or r23=r25,r20			// construct PA | page properties
   7.135 +	mov r25=IA64_GRANULE_SHIFT<<2
   7.136 +	;;
   7.137 +	ptr.d	r13,r25
   7.138 +	;;
   7.139 +	mov cr.itir=r25
   7.140 +	mov cr.ifa=r13			// VA of next task...
   7.141 +	;;
   7.142 +	mov r25=IA64_TR_CURRENT_STACK
   7.143 +	;;
   7.144 +	itr.d dtr[r25]=r23		// wire in new mapping...
   7.145 +	;;
   7.146 +.stack_overlaps:
   7.147 +
   7.148 +	movl r22=PERCPU_ADDR
   7.149 +	;;
   7.150 +	movl r25=PAGE_KERNEL
   7.151 +	;;
   7.152 +	mov r20=loc2			// saved percpu physical address
   7.153 +	;;
   7.154 +	or r23=r25,r20			// construct PA | page properties
   7.155 +	mov r24=PERCPU_PAGE_SHIFT<<2
   7.156 +	;;
   7.157 +	ptr.d	r22,r24
   7.158 +	;;
   7.159 +	mov cr.itir=r24
   7.160 +	mov cr.ifa=r22
   7.161 +	;;
   7.162 +	mov r25=IA64_TR_PERCPU_DATA
   7.163 +	;;
   7.164 +	itr.d dtr[r25]=r23		// wire in new mapping...
   7.165 +	;;
   7.166 +
   7.167 +#if VHPT_ENABLED
   7.168 +	movl r22=VHPT_ADDR
   7.169 +	;;
   7.170 +	movl r25=PAGE_KERNEL
   7.171 +	;;
   7.172 +	mov r20=loc6			// saved vhpt physical address
   7.173 +	;;
   7.174 +	or r23=r25,r20			// construct PA | page properties
   7.175 +	mov r24=VHPT_PAGE_SHIFT<<2
   7.176 +	;;
   7.177 +	ptr.d	r22,r24
   7.178 +	;;
   7.179 +	mov cr.itir=r24
   7.180 +	mov cr.ifa=r22
   7.181 +	;;
   7.182 +	mov r25=IA64_TR_VHPT
   7.183 +	;;
   7.184 +	itr.d dtr[r25]=r23		// wire in new mapping...
   7.185 +	;;
   7.186 +#endif
   7.187 +
   7.188 +	movl r22=SHAREDINFO_ADDR
   7.189 +	;;
   7.190 +	movl r25=PAGE_KERNEL
   7.191 +	;;
   7.192 +	mov r20=loc5			// saved sharedinfo physical address
   7.193 +	;;
   7.194 +	or r23=r25,r20			// construct PA | page properties
   7.195 +	mov r24=PAGE_SHIFT<<2
   7.196 +	;;
   7.197 +	ptr.d	r22,r24
   7.198 +	;;
   7.199 +	mov cr.itir=r24
   7.200 +	mov cr.ifa=r22
   7.201 +	;;
   7.202 +	mov r25=IA64_TR_SHARED_INFO
   7.203 +	;;
   7.204 +	itr.d dtr[r25]=r23		// wire in new mapping...
   7.205 +	;;
   7.206 +
   7.207 +	// done, switch back to virtual and return
   7.208 +	mov r16=loc3			// r16= original psr
   7.209 +	br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
   7.210 +	mov psr.l = loc3		// restore init PSR
   7.211 +
   7.212 +	mov ar.pfs = loc1
   7.213 +	mov rp = loc0
   7.214 +	;;
   7.215 +	mov ar.rsc=loc4			// restore RSE configuration
   7.216 +	srlz.d				// seralize restoration of psr.l
   7.217 +	br.ret.sptk.many rp
   7.218 +END(ia64_new_rr7)
   7.219 +
   7.220 +#include "minstate.h"
   7.221 +
   7.222 +GLOBAL_ENTRY(ia64_prepare_handle_privop)
   7.223 +	.prologue
   7.224 +	/*
   7.225 +	 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
   7.226 +	 */
   7.227 +	mov r16=r0
   7.228 +	DO_SAVE_SWITCH_STACK
   7.229 +	br.call.sptk.many rp=ia64_handle_privop		// stack frame setup in ivt
   7.230 +.ret22:	.body
   7.231 +	DO_LOAD_SWITCH_STACK
   7.232 +	br.cond.sptk.many rp				// goes to ia64_leave_kernel
   7.233 +END(ia64_prepare_handle_privop)
   7.234 +
   7.235 +GLOBAL_ENTRY(ia64_prepare_handle_break)
   7.236 +	.prologue
   7.237 +	/*
   7.238 +	 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
   7.239 +	 */
   7.240 +	mov r16=r0
   7.241 +	DO_SAVE_SWITCH_STACK
   7.242 +	br.call.sptk.many rp=ia64_handle_break	// stack frame setup in ivt
   7.243 +.ret23:	.body
   7.244 +	DO_LOAD_SWITCH_STACK
   7.245 +	br.cond.sptk.many rp			// goes to ia64_leave_kernel
   7.246 +END(ia64_prepare_handle_break)
   7.247 +
   7.248 +GLOBAL_ENTRY(ia64_prepare_handle_reflection)
   7.249 +	.prologue
   7.250 +	/*
   7.251 +	 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
   7.252 +	 */
   7.253 +	mov r16=r0
   7.254 +	DO_SAVE_SWITCH_STACK
   7.255 +	br.call.sptk.many rp=ia64_handle_reflection	// stack frame setup in ivt
   7.256 +.ret24:	.body
   7.257 +	DO_LOAD_SWITCH_STACK
   7.258 +	br.cond.sptk.many rp			// goes to ia64_leave_kernel
   7.259 +END(ia64_prepare_handle_reflection)
   7.260 +
   7.261 +// NOTE: instruction spacing must be explicit for recovery on miss
   7.262 +GLOBAL_ENTRY(__get_domain_bundle)
   7.263 +	ld8 r8=[r32],8
   7.264 +	nop 0
   7.265 +	nop 0
   7.266 +	;;
   7.267 +	ld8 r9=[r32]
   7.268 +	nop 0
   7.269 +	nop 0
   7.270 +	;;
   7.271 +	br.ret.sptk.many rp
   7.272 +	nop 0
   7.273 +	nop 0
   7.274 +	;;
   7.275 +END(__get_domain_bundle)
   7.276 +
   7.277 +GLOBAL_ENTRY(dorfirfi)
   7.278 +#define SI_CR_IIP_OFFSET 0x150
   7.279 +#define SI_CR_IPSR_OFFSET 0x148
   7.280 +#define SI_CR_IFS_OFFSET 0x158
   7.281 +        movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET
   7.282 +        movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET
   7.283 +        movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET
   7.284 +	;;
   7.285 +	ld8 r16 = [r16]
   7.286 +	ld8 r17 = [r17]
   7.287 +	ld8 r18 = [r18]
   7.288 +	;;
   7.289 +        mov cr.iip=r16
   7.290 +        mov cr.ipsr=r17
   7.291 +        mov cr.ifs=r18
   7.292 +	;;
   7.293 +        // fall through
   7.294 +END(dorfirfi)
   7.295 +
   7.296 +GLOBAL_ENTRY(dorfi)
   7.297 +        rfi
   7.298 +	;;
   7.299 +END(dorfirfi)
   7.300 +
   7.301 +//
   7.302 +// Long's Peak UART Offsets
   7.303 +//
   7.304 +#define COM_TOP 0xff5e0000
   7.305 +#define COM_BOT 0xff5e2000
   7.306 +
   7.307 +// UART offsets	
   7.308 +#define UART_TX		0	/* Out: Transmit buffer (DLAB=0) */
   7.309 +#define UART_INT_ENB	1	/* interrupt enable (DLAB=0) */	
   7.310 +#define UART_INT_ID	2	/* Interrupt ID register */
   7.311 +#define UART_LINE_CTL	3	/* Line control register */
   7.312 +#define UART_MODEM_CTL	4	/* Modem Control Register */
   7.313 +#define UART_LSR	5	/* In:  Line Status Register */
   7.314 +#define UART_MSR	6	/* Modem status register */	
   7.315 +#define UART_DLATCH_LOW UART_TX
   7.316 +#define UART_DLATCH_HIGH UART_INT_ENB
   7.317 +#define COM1   0x3f8
   7.318 +#define COM2   0x2F8
   7.319 +#define COM3   0x3E8
   7.320 +
   7.321 +/* interrupt enable bits (offset 1) */
   7.322 +#define DATA_AVAIL_INT 1
   7.323 +#define XMIT_HOLD_EMPTY_INT 2
   7.324 +#define LINE_STAT_INT 4
   7.325 +#define MODEM_STAT_INT 8
   7.326 +
   7.327 +/* line status bits (offset 5) */
   7.328 +#define REC_DATA_READY 1
   7.329 +#define OVERRUN 2
   7.330 +#define PARITY_ERROR 4
   7.331 +#define FRAMING_ERROR 8
   7.332 +#define BREAK_INTERRUPT 0x10
   7.333 +#define XMIT_HOLD_EMPTY 0x20
   7.334 +#define XMIT_SHIFT_EMPTY 0x40
   7.335 +
   7.336 +// Write a single character
   7.337 +// input: r32 = character to be written
   7.338 +// output: none
   7.339 +GLOBAL_ENTRY(longs_peak_putc)	
   7.340 +	rsm psr.dt
   7.341 +        movl r16 = 0x8000000000000000 + COM_TOP + UART_LSR
   7.342 +	;;
   7.343 +	srlz.i
   7.344 +	;;
   7.345 +
   7.346 +.Chk_THRE_p:
   7.347 +        ld1.acq r18=[r16]
   7.348 +        ;;
   7.349 +	
   7.350 +	and r18 = XMIT_HOLD_EMPTY, r18
   7.351 +	;;
   7.352 +	cmp4.eq p6,p0=0,r18
   7.353 +	;;
   7.354 +	
   7.355 +(p6)    br .Chk_THRE_p
   7.356 +	;;
   7.357 +        movl r16 = 0x8000000000000000 + COM_TOP + UART_TX
   7.358 +	;;
   7.359 +	st1.rel [r16]=r32
   7.360 +	;;
   7.361 +	ssm psr.dt
   7.362 +	;;
   7.363 +	srlz.i
   7.364 +	;;
   7.365 +	br.ret.sptk.many b0
   7.366 +END(longs_peak_putc)	
   7.367 +
   7.368 +/* derived from linux/arch/ia64/hp/sim/boot/boot_head.S */
   7.369 +GLOBAL_ENTRY(pal_emulator_static)
   7.370 +	mov r8=-1
   7.371 +	mov r9=256
   7.372 +	;;
   7.373 +	cmp.gtu p7,p8=r9,r32		/* r32 <= 255? */
   7.374 +(p7)	br.cond.sptk.few static
   7.375 +	;;
   7.376 +	mov r9=512
   7.377 +	;;
   7.378 +	cmp.gtu p7,p8=r9,r32
   7.379 +(p7)	br.cond.sptk.few stacked
   7.380 +	;;
   7.381 +static:	cmp.eq p7,p8=6,r32		/* PAL_PTCE_INFO */
   7.382 +(p8)	br.cond.sptk.few 1f
   7.383 +	;;
   7.384 +	mov r8=0			/* status = 0 */
   7.385 +	movl r9=0x100000000		/* tc.base */
   7.386 +	movl r10=0x0000000200000003	/* count[0], count[1] */
   7.387 +	movl r11=0x1000000000002000	/* stride[0], stride[1] */
   7.388 +	br.ret.sptk.few rp
   7.389 +1:	cmp.eq p7,p8=14,r32		/* PAL_FREQ_RATIOS */
   7.390 +(p8)	br.cond.sptk.few 1f
   7.391 +	mov r8=0			/* status = 0 */
   7.392 +	movl r9 =0x900000002		/* proc_ratio (1/100) */
   7.393 +	movl r10=0x100000100		/* bus_ratio<<32 (1/256) */
   7.394 +	movl r11=0x900000002		/* itc_ratio<<32 (1/100) */
   7.395 +	;;
   7.396 +1:	cmp.eq p7,p8=19,r32		/* PAL_RSE_INFO */
   7.397 +(p8)	br.cond.sptk.few 1f
   7.398 +	mov r8=0			/* status = 0 */
   7.399 +	mov r9=96			/* num phys stacked */
   7.400 +	mov r10=0			/* hints */
   7.401 +	mov r11=0
   7.402 +	br.ret.sptk.few rp
   7.403 +1:	cmp.eq p7,p8=1,r32		/* PAL_CACHE_FLUSH */
   7.404 +(p8)	br.cond.sptk.few 1f
   7.405 +#if 0
   7.406 +	mov r9=ar.lc
   7.407 +	movl r8=524288			/* flush 512k million cache lines (16MB) */
   7.408 +	;;
   7.409 +	mov ar.lc=r8
   7.410 +	movl r8=0xe000000000000000
   7.411 +	;;
   7.412 +.loop:	fc r8
   7.413 +	add r8=32,r8
   7.414 +	br.cloop.sptk.few .loop
   7.415 +	sync.i
   7.416 +	;;
   7.417 +	srlz.i
   7.418 +	;;
   7.419 +	mov ar.lc=r9
   7.420 +	mov r8=r0
   7.421 +	;;
   7.422 +1:	cmp.eq p7,p8=15,r32		/* PAL_PERF_MON_INFO */
   7.423 +(p8)	br.cond.sptk.few 1f
   7.424 +	mov r8=0			/* status = 0 */
   7.425 +	movl r9 =0x08122f04		/* generic=4 width=47 retired=8 cycles=18 */
   7.426 +	mov r10=0			/* reserved */
   7.427 +	mov r11=0			/* reserved */
   7.428 +	mov r16=0xffff			/* implemented PMC */
   7.429 +	mov r17=0x3ffff			/* implemented PMD */
   7.430 +	add r18=8,r29			/* second index */
   7.431 +	;;
   7.432 +	st8 [r29]=r16,16		/* store implemented PMC */
   7.433 +	st8 [r18]=r0,16			/* clear remaining bits  */
   7.434 +	;;
   7.435 +	st8 [r29]=r0,16			/* clear remaining bits  */
   7.436 +	st8 [r18]=r0,16			/* clear remaining bits  */
   7.437 +	;;
   7.438 +	st8 [r29]=r17,16		/* store implemented PMD */
   7.439 +	st8 [r18]=r0,16			/* clear remaining bits  */
   7.440 +	mov r16=0xf0			/* cycles count capable PMC */
   7.441 +	;;
   7.442 +	st8 [r29]=r0,16			/* clear remaining bits  */
   7.443 +	st8 [r18]=r0,16			/* clear remaining bits  */
   7.444 +	mov r17=0xf0			/* retired bundles capable PMC */
   7.445 +	;;
   7.446 +	st8 [r29]=r16,16		/* store cycles capable */
   7.447 +	st8 [r18]=r0,16			/* clear remaining bits  */
   7.448 +	;;
   7.449 +	st8 [r29]=r0,16			/* clear remaining bits  */
   7.450 +	st8 [r18]=r0,16			/* clear remaining bits  */
   7.451 +	;;
   7.452 +	st8 [r29]=r17,16		/* store retired bundle capable */
   7.453 +	st8 [r18]=r0,16			/* clear remaining bits  */
   7.454 +	;;
   7.455 +	st8 [r29]=r0,16			/* clear remaining bits  */
   7.456 +	st8 [r18]=r0,16			/* clear remaining bits  */
   7.457 +	;;
   7.458 +1:	br.cond.sptk.few rp
   7.459 +#else
   7.460 +1:
   7.461 +#endif
   7.462 +stacked:
   7.463 +	br.ret.sptk.few rp
   7.464 +END(pal_emulator_static)