ia64/xen-unstable

changeset 9599:ece9b5710b29

Merged.
author emellor@leeni.uk.xensource.com
date Thu Apr 06 00:59:18 2006 +0100 (2006-04-06)
parents 09967f2d6e3b 8f7aad20b4a5
children a151e82c4ffd edeeddb1bbf9
files xen/Post.mk xen/include/asm-x86/mach-bigsmp/mach_apic.h xen/include/asm-x86/mach-default/mach_apic.h xen/include/asm-x86/mach-es7000/mach_apic.h xen/include/asm-x86/mach-summit/mach_apic.h xen/include/asm-x86/mach_ipi.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Apr 06 00:59:06 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Apr 06 00:59:18 2006 +0100
     1.3 @@ -329,9 +329,9 @@ static void net_rx_action(unsigned long 
     1.4  		irq = netif->irq;
     1.5  		id = RING_GET_REQUEST(&netif->rx, netif->rx.rsp_prod_pvt)->id;
     1.6  		flags = 0;
     1.7 -		if (skb->ip_summed == CHECKSUM_HW)
     1.8 -			flags |= NETRXF_csum_blank;
     1.9 -		if (skb->proto_data_valid)
    1.10 +		if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
    1.11 +			flags |= NETRXF_csum_blank | NETRXF_data_validated;
    1.12 +		else if (skb->proto_data_valid) /* remote but checksummed? */
    1.13  			flags |= NETRXF_data_validated;
    1.14  		if (make_rx_response(netif, id, status,
    1.15  				     (unsigned long)skb->data & ~PAGE_MASK,
    1.16 @@ -658,7 +658,11 @@ static void net_tx_action(unsigned long 
    1.17  		skb->dev      = netif->dev;
    1.18  		skb->protocol = eth_type_trans(skb, skb->dev);
    1.19  
    1.20 -		if (txreq.flags & NETTXF_data_validated) {
    1.21 +		/*
    1.22 +		 * Old frontends do not assert data_validated but we
    1.23 +		 * can infer it from csum_blank so test both flags.
    1.24 +		 */
    1.25 +		if (txreq.flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
    1.26  			skb->ip_summed = CHECKSUM_UNNECESSARY;
    1.27  			skb->proto_data_valid = 1;
    1.28  		} else {
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Apr 06 00:59:06 2006 +0100
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Apr 06 00:59:18 2006 +0100
     2.3 @@ -698,9 +698,9 @@ static int network_start_xmit(struct sk_
     2.4  	tx->size = skb->len;
     2.5  
     2.6  	tx->flags = 0;
     2.7 -	if (skb->ip_summed == CHECKSUM_HW)
     2.8 -		tx->flags |= NETTXF_csum_blank;
     2.9 -	if (skb->proto_data_valid)
    2.10 +	if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
    2.11 +		tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
    2.12 +	if (skb->proto_data_valid) /* remote but checksummed? */
    2.13  		tx->flags |= NETTXF_data_validated;
    2.14  
    2.15  	np->tx.req_prod_pvt = i + 1;
    2.16 @@ -816,7 +816,11 @@ static int netif_poll(struct net_device 
    2.17  		skb->len  = rx->status;
    2.18  		skb->tail = skb->data + skb->len;
    2.19  
    2.20 -		if (rx->flags & NETRXF_data_validated) {
    2.21 +		/*
    2.22 +		 * Old backends do not assert data_validated but we
    2.23 +		 * can infer it from csum_blank so test both flags.
    2.24 +		 */
    2.25 +		if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) {
    2.26  			skb->ip_summed = CHECKSUM_UNNECESSARY;
    2.27  			skb->proto_data_valid = 1;
    2.28  		} else {
    2.29 @@ -1017,8 +1021,11 @@ static void network_connect(struct net_d
    2.30  		tx->gref = np->grant_tx_ref[i];
    2.31  		tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
    2.32  		tx->size = skb->len;
    2.33 -		tx->flags = (skb->ip_summed == CHECKSUM_HW) ?
    2.34 -			NETTXF_csum_blank : 0;
    2.35 +		tx->flags = 0;
    2.36 +		if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
    2.37 +			tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
    2.38 +		if (skb->proto_data_valid) /* remote but checksummed? */
    2.39 +			tx->flags |= NETTXF_data_validated;
    2.40  
    2.41  		np->stats.tx_bytes += skb->len;
    2.42  		np->stats.tx_packets++;
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/pciback/conf_space.c	Thu Apr 06 00:59:06 2006 +0100
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/pciback/conf_space.c	Thu Apr 06 00:59:18 2006 +0100
     3.3 @@ -106,7 +106,7 @@ static inline int valid_request(int offs
     3.4  }
     3.5  
     3.6  static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
     3.7 -			      u32 offset)
     3.8 +			      int offset)
     3.9  {
    3.10  	if (offset >= 0) {
    3.11  		new_val_mask <<= (offset * 8);
    3.12 @@ -180,7 +180,8 @@ int pciback_config_read(struct pci_dev *
    3.13  
    3.14  		if ((req_start >= field_start && req_start < field_end)
    3.15  		    || (req_end > field_start && req_end <= field_end)) {
    3.16 -			err = conf_space_read(dev, cfg_entry, offset, &tmp_val);
    3.17 +			err = conf_space_read(dev, cfg_entry, field_start,
    3.18 +					      &tmp_val);
    3.19  			if (err)
    3.20  				goto out;
    3.21  
    3.22 @@ -228,14 +229,16 @@ int pciback_config_write(struct pci_dev 
    3.23  		    || (req_end > field_start && req_end <= field_end)) {
    3.24  			tmp_val = 0;
    3.25  
    3.26 -			err = pciback_config_read(dev, offset, size, &tmp_val);
    3.27 +			err = pciback_config_read(dev, field_start,
    3.28 +						  field->size, &tmp_val);
    3.29  			if (err)
    3.30  				break;
    3.31  
    3.32  			tmp_val = merge_value(tmp_val, value, get_mask(size),
    3.33 -					      field_start - req_start);
    3.34 +					      req_start - field_start);
    3.35  
    3.36 -			err = conf_space_write(dev, cfg_entry, offset, tmp_val);
    3.37 +			err = conf_space_write(dev, cfg_entry, field_start,
    3.38 +					       tmp_val);
    3.39  			handled = 1;
    3.40  		}
    3.41  	}
     4.1 --- a/tools/Rules.mk	Thu Apr 06 00:59:06 2006 +0100
     4.2 +++ b/tools/Rules.mk	Thu Apr 06 00:59:18 2006 +0100
     4.3 @@ -12,6 +12,8 @@ XEN_LIBXENSTAT     = $(XEN_ROOT)/tools/x
     4.4  
     4.5  X11_LDPATH = -L/usr/X11R6/$(LIBDIR)
     4.6  
     4.7 +CFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030101
     4.8 +
     4.9  %.opic: %.c
    4.10  	$(CC) $(CPPFLAGS) -DPIC $(CFLAGS) -fPIC -c -o $@ $<
    4.11  
     5.1 --- a/tools/debugger/gdb/README	Thu Apr 06 00:59:06 2006 +0100
     5.2 +++ b/tools/debugger/gdb/README	Thu Apr 06 00:59:18 2006 +0100
     5.3 @@ -1,16 +1,17 @@
     5.4  
     5.5 -DomU GDB server for 32-bit (PAE and non-PAE) systems
     5.6 +DomU & HVM GDB server for 32-bit (PAE and non-PAE) and x86_64 systems
     5.7  ----------------------------------------------------
     5.8  
     5.9  Lines marked below with [*] are optional, if you want full
    5.10  source-level debugging of your kernel image.
    5.11  
    5.12  To build the GDB server:
    5.13 + 0. Build rest of the Xen first from the base directory
    5.14   1. Run ./gdbbuild from within this directory.
    5.15   2. Copy ./gdb-6.2.1-linux-i386-xen/gdb/gdbserver/gdbserver-xen
    5.16      to your test machine.
    5.17  
    5.18 -To build a debuggable guest kernel image:
    5.19 +To build a debuggable guest domU kernel image:
    5.20   1. cd linux-2.6.xx-xenU
    5.21   2. make menuconfig
    5.22   3. From within the configurator, enable the following options:
    5.23 @@ -28,7 +29,7 @@ To debug a running guest:
    5.24      # bt
    5.25      # disass
    5.26  
    5.27 -To debug a crashed guest:
    5.28 +To debug a crashed domU guest:
    5.29   1. Add '(enable-dump yes)' to /etc/xen/xend-config.sxp before
    5.30      starting xend.
    5.31   2. When the domain crashes, a core file is written to
     6.1 --- a/xen/Makefile	Thu Apr 06 00:59:06 2006 +0100
     6.2 +++ b/xen/Makefile	Thu Apr 06 00:59:18 2006 +0100
     6.3 @@ -1,27 +1,20 @@
     6.4 -INSTALL			= install
     6.5 -INSTALL_DATA		= $(INSTALL) -m0644
     6.6 -INSTALL_DIR		= $(INSTALL) -d -m0755
     6.7 -
     6.8  # This is the correct place to edit the build version.
     6.9  # All other places this is stored (eg. compile.h) should be autogenerated.
    6.10 -export XEN_VERSION       = 3
    6.11 -export XEN_SUBVERSION    = 0
    6.12 -export XEN_EXTRAVERSION  = -unstable
    6.13 -export XEN_FULLVERSION   = $(XEN_VERSION).$(XEN_SUBVERSION)$(XEN_EXTRAVERSION)
    6.14 +export XEN_VERSION      := 3
    6.15 +export XEN_SUBVERSION   := 0
    6.16 +export XEN_EXTRAVERSION := -unstable
    6.17 +export XEN_FULLVERSION  := $(XEN_VERSION).$(XEN_SUBVERSION)$(XEN_EXTRAVERSION)
    6.18  
    6.19 -export BASEDIR          := $(CURDIR)
    6.20 -
    6.21 -include Rules.mk
    6.22 +export BASEDIR := $(CURDIR)
    6.23  
    6.24  default: build
    6.25 -$(TARGET).gz: $(TARGET)
    6.26 -	gzip -f -9 < $< > $@.new
    6.27 -	mv $@.new $@
    6.28 +
    6.29 +ifeq ($(XEN_ROOT),)
    6.30  
    6.31 -debug:	
    6.32 -	objdump -D -S $(TARGET)-syms > $(TARGET).s
    6.33 +build install clean:
    6.34 +	make -f Rules.mk $@
    6.35  
    6.36 -dist: install
    6.37 +else
    6.38  
    6.39  build: $(TARGET).gz
    6.40  
    6.41 @@ -38,24 +31,35 @@ install: $(TARGET).gz
    6.42  	$(INSTALL_DATA) include/public/io/*.h $(DESTDIR)/usr/include/xen/io
    6.43  	$(INSTALL_DATA) include/public/COPYING $(DESTDIR)/usr/include/xen
    6.44  
    6.45 -clean: delete-unfresh-files
    6.46 +clean:: delete-unfresh-files
    6.47  	$(MAKE) -C tools clean
    6.48 -	$(MAKE) -C common clean
    6.49 -	$(MAKE) -C drivers clean
    6.50 -	$(MAKE) -C acm clean
    6.51 -	$(MAKE) -C arch/$(TARGET_ARCH) clean
    6.52 +	$(MAKE) -f $(BASEDIR)/Rules.mk -C common clean
    6.53 +	$(MAKE) -f $(BASEDIR)/Rules.mk -C drivers clean
    6.54 +	$(MAKE) -f $(BASEDIR)/Rules.mk -C acm clean
    6.55 +	$(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) clean
    6.56  	rm -f include/asm *.o $(TARGET)* *~ core
    6.57  	rm -f include/asm-*/asm-offsets.h
    6.58  	rm -f include/xen/acm_policy.h
    6.59  
    6.60 +endif
    6.61 +
    6.62 +dist: install
    6.63 +
    6.64 +debug: FORCE
    6.65 +	objdump -D -S $(TARGET)-syms > $(TARGET).s
    6.66 +
    6.67 +$(TARGET).gz: $(TARGET)
    6.68 +	gzip -f -9 < $< > $@.new
    6.69 +	mv $@.new $@
    6.70 +
    6.71  $(TARGET): delete-unfresh-files
    6.72  	$(MAKE) -C tools
    6.73 -	$(MAKE) include/xen/compile.h
    6.74 -	$(MAKE) include/xen/acm_policy.h
    6.75 +	$(MAKE) -f $(BASEDIR)/Rules.mk include/xen/compile.h
    6.76 +	$(MAKE) -f $(BASEDIR)/Rules.mk include/xen/acm_policy.h
    6.77  	[ -e include/asm ] || ln -sf asm-$(TARGET_ARCH) include/asm
    6.78 -	$(MAKE) -C arch/$(TARGET_ARCH) asm-offsets.s
    6.79 -	$(MAKE) include/asm-$(TARGET_ARCH)/asm-offsets.h
    6.80 -	$(MAKE) -C arch/$(TARGET_ARCH) $(TARGET)
    6.81 +	$(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) asm-offsets.s
    6.82 +	$(MAKE) -f $(BASEDIR)/Rules.mk include/asm-$(TARGET_ARCH)/asm-offsets.h
    6.83 +	$(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) $(TARGET)
    6.84  
    6.85  # drivers/char/console.o contains static banner/compile info. Blow it away.
    6.86  # Don't refresh these files during e.g., 'sudo make install'
    6.87 @@ -115,7 +119,7 @@ include/asm-$(TARGET_ARCH)/asm-offsets.h
    6.88  	  echo ""; \
    6.89  	  echo "#endif") <$< >$@
    6.90  
    6.91 -.PHONY: default debug install dist clean delete-unfresh-files TAGS tags
    6.92 +.PHONY: default debug build install dist clean delete-unfresh-files TAGS tags
    6.93  
    6.94  SUBDIRS = acm arch/$(TARGET_ARCH) common drivers 
    6.95  define all_sources
     7.1 --- a/xen/Post.mk	Thu Apr 06 00:59:06 2006 +0100
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,27 +0,0 @@
     7.4 -# Ensure each subdirectory has exactly one trailing slash.
     7.5 -subdir-n := $(patsubst %,%/,$(patsubst %/,%,$(subdir-n)))
     7.6 -subdir-y := $(patsubst %,%/,$(patsubst %/,%,$(subdir-y)))
     7.7 -
     7.8 -# Add explicitly declared subdirectories to the object list.
     7.9 -obj-y += $(patsubst %,%/built_in.o,$(subdir-y))
    7.10 -
    7.11 -# Add implicitly declared subdirectories (in the object list) to the
    7.12 -# subdirectory list, and rewrite the object-list entry.
    7.13 -subdir-y += $(filter %/,$(obj-y))
    7.14 -obj-y    := $(patsubst %/,%/built-in.o,$(obj-y))
    7.15 -
    7.16 -subdir-all := $(subdir-y) $(subdir-n)
    7.17 -
    7.18 -built_in.o: $(obj-y)
    7.19 -	$(LD) $(LDFLAGS) -r -o $@ $^
    7.20 -
    7.21 -.PHONY: FORCE
    7.22 -FORCE:
    7.23 -
    7.24 -%/built_in.o: FORCE
    7.25 -	$(MAKE) -C $*
    7.26 -
    7.27 -clean:: $(addprefix _clean_, $(subdir-all)) FORCE
    7.28 -	rm -f *.o *~ core
    7.29 -_clean_%/: FORCE
    7.30 -	$(MAKE) -C $* clean
     8.1 --- a/xen/Rules.mk	Thu Apr 06 00:59:06 2006 +0100
     8.2 +++ b/xen/Rules.mk	Thu Apr 06 00:59:18 2006 +0100
     8.3 @@ -26,18 +26,24 @@ override TARGET_SUBARCH  := $(XEN_TARGET
     8.4  override COMPILE_ARCH    := $(patsubst x86%,x86,$(XEN_COMPILE_ARCH))
     8.5  override TARGET_ARCH     := $(patsubst x86%,x86,$(XEN_TARGET_ARCH))
     8.6  
     8.7 -TARGET  := $(BASEDIR)/xen
     8.8 -HDRS    := $(wildcard $(BASEDIR)/include/xen/*.h)
     8.9 -HDRS    += $(wildcard $(BASEDIR)/include/public/*.h)
    8.10 -HDRS    += $(wildcard $(BASEDIR)/include/asm-$(TARGET_ARCH)/*.h)
    8.11 -HDRS    += $(wildcard $(BASEDIR)/include/asm-$(TARGET_ARCH)/$(TARGET_SUBARCH)/*.h)
    8.12 -# Do not depend on auto-generated header files.
    8.13 -HDRS    := $(subst $(BASEDIR)/include/asm-$(TARGET_ARCH)/asm-offsets.h,,$(HDRS))
    8.14 -HDRS    := $(subst $(BASEDIR)/include/xen/banner.h,,$(HDRS))
    8.15 -HDRS    := $(subst $(BASEDIR)/include/xen/compile.h,,$(HDRS))
    8.16 +TARGET := $(BASEDIR)/xen
    8.17 +
    8.18 +HDRS := $(wildcard $(BASEDIR)/include/xen/*.h)
    8.19 +HDRS += $(wildcard $(BASEDIR)/include/public/*.h)
    8.20 +HDRS += $(wildcard $(BASEDIR)/include/asm-$(TARGET_ARCH)/*.h)
    8.21 +HDRS += $(wildcard $(BASEDIR)/include/asm-$(TARGET_ARCH)/$(TARGET_SUBARCH)/*.h)
    8.22 +
    8.23 +INSTALL      := install
    8.24 +INSTALL_DATA := $(INSTALL) -m0644
    8.25 +INSTALL_DIR  := $(INSTALL) -d -m0755
    8.26  
    8.27  include $(BASEDIR)/arch/$(TARGET_ARCH)/Rules.mk
    8.28  
    8.29 +# Do not depend on auto-generated header files.
    8.30 +HDRS := $(subst $(BASEDIR)/include/asm-$(TARGET_ARCH)/asm-offsets.h,,$(HDRS))
    8.31 +HDRS := $(subst $(BASEDIR)/include/xen/banner.h,,$(HDRS))
    8.32 +HDRS := $(subst $(BASEDIR)/include/xen/compile.h,,$(HDRS))
    8.33 +
    8.34  # Note that link order matters!
    8.35  ALL_OBJS-y               += $(BASEDIR)/common/built_in.o
    8.36  ALL_OBJS-y               += $(BASEDIR)/drivers/built_in.o
    8.37 @@ -61,6 +67,36 @@ ALL_OBJS := $(ALL_OBJS-y)
    8.38  CFLAGS   := $(strip $(CFLAGS) $(CFLAGS-y))
    8.39  AFLAGS   := $(strip $(AFLAGS) $(AFLAGS-y))
    8.40  
    8.41 +include Makefile
    8.42 +
    8.43 +# Ensure each subdirectory has exactly one trailing slash.
    8.44 +subdir-n := $(patsubst %,%/,$(patsubst %/,%,$(subdir-n)))
    8.45 +subdir-y := $(patsubst %,%/,$(patsubst %/,%,$(subdir-y)))
    8.46 +
    8.47 +# Add explicitly declared subdirectories to the object list.
    8.48 +obj-y += $(patsubst %,%/built_in.o,$(subdir-y))
    8.49 +
    8.50 +# Add implicitly declared subdirectories (in the object list) to the
    8.51 +# subdirectory list, and rewrite the object-list entry.
    8.52 +subdir-y += $(filter %/,$(obj-y))
    8.53 +obj-y    := $(patsubst %/,%/built-in.o,$(obj-y))
    8.54 +
    8.55 +subdir-all := $(subdir-y) $(subdir-n)
    8.56 +
    8.57 +built_in.o: $(obj-y)
    8.58 +	$(LD) $(LDFLAGS) -r -o $@ $^
    8.59 +
    8.60 +.PHONY: FORCE
    8.61 +FORCE:
    8.62 +
    8.63 +%/built_in.o: FORCE
    8.64 +	$(MAKE) -f $(BASEDIR)/Rules.mk -C $* built_in.o
    8.65 +
    8.66 +clean:: $(addprefix _clean_, $(subdir-all)) FORCE
    8.67 +	rm -f *.o *~ core
    8.68 +_clean_%/: FORCE
    8.69 +	$(MAKE) -f $(BASEDIR)/Rules.mk -C $* clean
    8.70 +
    8.71  %.o: %.c $(HDRS) Makefile
    8.72  	$(CC) $(CFLAGS) -c $< -o $@
    8.73  
     9.1 --- a/xen/acm/Makefile	Thu Apr 06 00:59:06 2006 +0100
     9.2 +++ b/xen/acm/Makefile	Thu Apr 06 00:59:18 2006 +0100
     9.3 @@ -1,9 +1,5 @@
     9.4 -include $(BASEDIR)/Rules.mk
     9.5 -
     9.6  obj-y += acm_core.o 
     9.7  obj-y += acm_policy.o
     9.8  obj-y += acm_simple_type_enforcement_hooks.o
     9.9  obj-y += acm_chinesewall_hooks.o
    9.10  obj-y += acm_null_hooks.o
    9.11 -
    9.12 -include $(BASEDIR)/Post.mk
    10.1 --- a/xen/arch/ia64/Makefile	Thu Apr 06 00:59:06 2006 +0100
    10.2 +++ b/xen/arch/ia64/Makefile	Thu Apr 06 00:59:18 2006 +0100
    10.3 @@ -1,21 +1,17 @@
    10.4 -include $(BASEDIR)/Rules.mk
    10.5 -
    10.6  subdir-y += xen
    10.7  subdir-y += vmx
    10.8  subdir-y += linux
    10.9  subdir-y += linux-xen
   10.10  
   10.11 -include $(BASEDIR)/Post.mk
   10.12 -
   10.13  $(TARGET)-syms: linux-xen/head.o $(ALL_OBJS) xen.lds.s
   10.14  	$(LD) $(LDFLAGS) -T xen.lds.s -N \
   10.15  		-Map map.out linux-xen/head.o $(ALL_OBJS) -o $@
   10.16  	$(NM) -n $@ | $(BASEDIR)/tools/symbols > $(BASEDIR)/xen-syms.S
   10.17 -	$(MAKE) $(BASEDIR)/xen-syms.o
   10.18 +	$(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/xen-syms.o
   10.19  	$(LD) $(LDFLAGS) -T xen.lds.s -N \
   10.20  		-Map map.out linux-xen/head.o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@
   10.21  	$(NM) -n $@ | $(BASEDIR)/tools/symbols >$(BASEDIR)/xen-syms.S
   10.22 -	$(MAKE) $(BASEDIR)/xen-syms.o
   10.23 +	$(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/xen-syms.o
   10.24  	$(LD) $(LDFLAGS) -T xen.lds.s -N \
   10.25  		-Map map.out linux-xen/head.o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@
   10.26  	rm -f $(BASEDIR)/xen-syms.S $(BASEDIR)/xen-syms.o
    11.1 --- a/xen/arch/ia64/linux-xen/Makefile	Thu Apr 06 00:59:06 2006 +0100
    11.2 +++ b/xen/arch/ia64/linux-xen/Makefile	Thu Apr 06 00:59:18 2006 +0100
    11.3 @@ -1,5 +1,3 @@
    11.4 -include $(BASEDIR)/Rules.mk
    11.5 -
    11.6  obj-y += efi.o
    11.7  obj-y += entry.o
    11.8  obj-y += irq_ia64.o
    11.9 @@ -15,5 +13,3 @@ obj-y += time.o
   11.10  obj-y += tlb.o
   11.11  obj-y += unaligned.o
   11.12  obj-y += unwind.o
   11.13 -
   11.14 -include $(BASEDIR)/Post.mk
    12.1 --- a/xen/arch/ia64/linux/Makefile	Thu Apr 06 00:59:06 2006 +0100
    12.2 +++ b/xen/arch/ia64/linux/Makefile	Thu Apr 06 00:59:18 2006 +0100
    12.3 @@ -1,6 +1,3 @@
    12.4 -include $(BASEDIR)/Rules.mk
    12.5 -
    12.6 -
    12.7  obj-y += bitop.o
    12.8  obj-y += clear_page.o
    12.9  obj-y += cmdline.o
   12.10 @@ -26,8 +23,6 @@ obj-y += __udivdi3.o
   12.11  obj-y += __moddi3.o
   12.12  obj-y += __umoddi3.o
   12.13  
   12.14 -include $(BASEDIR)/Post.mk
   12.15 -
   12.16  ## variants of divide/modulo
   12.17  ## see files in xen/arch/ia64/linux/lib (linux/arch/ia64/lib)
   12.18  __divdi3.o: idiv64.S
    13.1 --- a/xen/arch/ia64/vmx/Makefile	Thu Apr 06 00:59:06 2006 +0100
    13.2 +++ b/xen/arch/ia64/vmx/Makefile	Thu Apr 06 00:59:18 2006 +0100
    13.3 @@ -1,5 +1,3 @@
    13.4 -include $(BASEDIR)/Rules.mk
    13.5 -
    13.6  obj-y += hvm_vioapic.o
    13.7  obj-y += mm.o
    13.8  obj-y += mmio.o
    13.9 @@ -20,5 +18,3 @@ obj-y += vmx_vcpu.o
   13.10  obj-y += vmx_virt.o
   13.11  obj-y += vmx_vsa.o
   13.12  obj-y += vtlb.o
   13.13 -
   13.14 -include $(BASEDIR)/Post.mk
    14.1 --- a/xen/arch/ia64/xen/Makefile	Thu Apr 06 00:59:06 2006 +0100
    14.2 +++ b/xen/arch/ia64/xen/Makefile	Thu Apr 06 00:59:18 2006 +0100
    14.3 @@ -1,5 +1,3 @@
    14.4 -include $(BASEDIR)/Rules.mk
    14.5 -
    14.6  obj-y += acpi.o
    14.7  obj-y += dom0_ops.o
    14.8  obj-y += domain.o
    14.9 @@ -26,5 +24,3 @@ obj-y += xensetup.o
   14.10  obj-y += xentime.o
   14.11  
   14.12  obj-$(crash_debug) += gdbstub.o
   14.13 -
   14.14 -include $(BASEDIR)/Post.mk
    15.1 --- a/xen/arch/ia64/xen/xentime.c	Thu Apr 06 00:59:06 2006 +0100
    15.2 +++ b/xen/arch/ia64/xen/xentime.c	Thu Apr 06 00:59:18 2006 +0100
    15.3 @@ -84,7 +84,13 @@ s_time_t get_s_time(void)
    15.4      return now; 
    15.5  }
    15.6  
    15.7 -void update_dom_time(struct vcpu *v)
    15.8 +void update_vcpu_system_time(struct vcpu *v)
    15.9 +{
   15.10 +    /* N-op here, and let dom0 to manage system time directly */
   15.11 +    return;
   15.12 +}
   15.13 +
   15.14 +void update_domain_wallclock_time(struct domain *d)
   15.15  {
   15.16      /* N-op here, and let dom0 to manage system time directly */
   15.17      return;
   15.18 @@ -268,6 +274,6 @@ int reprogram_timer(s_time_t timeout)
   15.19  
   15.20  void send_timer_event(struct vcpu *v)
   15.21  {
   15.22 -	send_guest_virq(v, VIRQ_TIMER);
   15.23 +	send_guest_vcpu_virq(v, VIRQ_TIMER);
   15.24  }
   15.25  
    16.1 --- a/xen/arch/x86/Makefile	Thu Apr 06 00:59:06 2006 +0100
    16.2 +++ b/xen/arch/x86/Makefile	Thu Apr 06 00:59:18 2006 +0100
    16.3 @@ -1,5 +1,3 @@
    16.4 -include $(BASEDIR)/Rules.mk
    16.5 -
    16.6  subdir-y += acpi
    16.7  subdir-y += cpu
    16.8  subdir-y += genapic
    16.9 @@ -30,6 +28,7 @@ obj-y += nmi.o
   16.10  obj-y += physdev.o
   16.11  obj-y += rwlock.o
   16.12  obj-y += setup.o
   16.13 +obj-y += shutdown.o
   16.14  obj-y += smp.o
   16.15  obj-y += smpboot.o
   16.16  obj-y += string.o
   16.17 @@ -49,8 +48,6 @@ obj-$(x86_64) += shadow.o shadow_public.
   16.18  
   16.19  obj-$(crash_debug) += gdbstub.o
   16.20  
   16.21 -include $(BASEDIR)/Post.mk
   16.22 -
   16.23  $(TARGET): $(TARGET)-syms boot/mkelf32
   16.24  	./boot/mkelf32 $(TARGET)-syms $(TARGET) 0x100000 \
   16.25  	`$(NM) $(TARGET)-syms | sort | tail -n 1 | sed -e 's/^\([^ ]*\).*/0x\1/'`
   16.26 @@ -59,11 +56,11 @@ include $(BASEDIR)/Post.mk
   16.27  	$(LD) $(LDFLAGS) -T xen.lds -N \
   16.28  	    boot/$(TARGET_SUBARCH).o $(ALL_OBJS) -o $@
   16.29  	$(NM) -n $@ | $(BASEDIR)/tools/symbols >$(BASEDIR)/xen-syms.S
   16.30 -	$(MAKE) $(BASEDIR)/xen-syms.o
   16.31 +	$(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/xen-syms.o
   16.32  	$(LD) $(LDFLAGS) -T xen.lds -N \
   16.33  	    boot/$(TARGET_SUBARCH).o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@
   16.34  	$(NM) -n $@ | $(BASEDIR)/tools/symbols >$(BASEDIR)/xen-syms.S
   16.35 -	$(MAKE) $(BASEDIR)/xen-syms.o
   16.36 +	$(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/xen-syms.o
   16.37  	$(LD) $(LDFLAGS) -T xen.lds -N \
   16.38  	    boot/$(TARGET_SUBARCH).o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@
   16.39  	rm -f $(BASEDIR)/xen-syms.S $(BASEDIR)/xen-syms.o
    17.1 --- a/xen/arch/x86/Rules.mk	Thu Apr 06 00:59:06 2006 +0100
    17.2 +++ b/xen/arch/x86/Rules.mk	Thu Apr 06 00:59:18 2006 +0100
    17.3 @@ -46,6 +46,10 @@ x86_32 := n
    17.4  x86_64 := y
    17.5  endif
    17.6  
    17.7 +HDRS += $(wildcard $(BASEDIR)/include/asm-x86/hvm/*.h)
    17.8 +HDRS += $(wildcard $(BASEDIR)/include/asm-x86/hvm/svm/*.h)
    17.9 +HDRS += $(wildcard $(BASEDIR)/include/asm-x86/hvm/vmx/*.h)
   17.10 +
   17.11  # Test for at least GCC v3.2.x.
   17.12  gcc-ver = $(shell $(CC) -dumpversion | sed -e 's/^\(.\)\.\(.\)\.\(.\)/\$(1)/')
   17.13  ifeq ($(call gcc-ver,1),1)
    18.1 --- a/xen/arch/x86/acpi/Makefile	Thu Apr 06 00:59:06 2006 +0100
    18.2 +++ b/xen/arch/x86/acpi/Makefile	Thu Apr 06 00:59:18 2006 +0100
    18.3 @@ -1,5 +1,1 @@
    18.4 -include $(BASEDIR)/Rules.mk
    18.5 -
    18.6  obj-y += boot.o
    18.7 -
    18.8 -include $(BASEDIR)/Post.mk
    19.1 --- a/xen/arch/x86/apic.c	Thu Apr 06 00:59:06 2006 +0100
    19.2 +++ b/xen/arch/x86/apic.c	Thu Apr 06 00:59:18 2006 +0100
    19.3 @@ -657,9 +657,10 @@ void __init init_apic_mappings(void)
    19.4       * zeroes page to simulate the local APIC and another
    19.5       * one for the IO-APIC.
    19.6       */
    19.7 -    if (!smp_found_config && detect_init_APIC())
    19.8 +    if (!smp_found_config && detect_init_APIC()) {
    19.9          apic_phys = __pa(alloc_xenheap_page());
   19.10 -    else
   19.11 +        memset(__va(apic_phys), 0, PAGE_SIZE);
   19.12 +    } else
   19.13          apic_phys = mp_lapic_addr;
   19.14  
   19.15      set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
   19.16 @@ -693,6 +694,7 @@ void __init init_apic_mappings(void)
   19.17              } else {
   19.18  fake_ioapic_page:
   19.19                  ioapic_phys = __pa(alloc_xenheap_page());
   19.20 +                memset(__va(ioapic_phys), 0, PAGE_SIZE);
   19.21              }
   19.22              set_fixmap_nocache(idx, ioapic_phys);
   19.23              apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
    20.1 --- a/xen/arch/x86/cpu/Makefile	Thu Apr 06 00:59:06 2006 +0100
    20.2 +++ b/xen/arch/x86/cpu/Makefile	Thu Apr 06 00:59:18 2006 +0100
    20.3 @@ -1,5 +1,3 @@
    20.4 -include $(BASEDIR)/Rules.mk
    20.5 -
    20.6  subdir-y += mcheck
    20.7  subdir-y += mtrr
    20.8  
    20.9 @@ -12,5 +10,3 @@ obj-$(x86_32) += centaur.o
   20.10  obj-$(x86_32) += cyrix.o
   20.11  obj-$(x86_32) += rise.o
   20.12  obj-$(x86_32) += transmeta.o
   20.13 -
   20.14 -include $(BASEDIR)/Post.mk
    21.1 --- a/xen/arch/x86/cpu/common.c	Thu Apr 06 00:59:06 2006 +0100
    21.2 +++ b/xen/arch/x86/cpu/common.c	Thu Apr 06 00:59:18 2006 +0100
    21.3 @@ -427,6 +427,17 @@ void __devinit identify_cpu(struct cpuin
    21.4  }
    21.5  
    21.6  #ifdef CONFIG_X86_HT
    21.7 +/* cpuid returns the value latched in the HW at reset, not the APIC ID
    21.8 + * register's value.  For any box whose BIOS changes APIC IDs, like
    21.9 + * clustered APIC systems, we must use hard_smp_processor_id.
   21.10 + *
   21.11 + * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
   21.12 + */
   21.13 +static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
   21.14 +{
   21.15 +	return hard_smp_processor_id() >> index_msb;
   21.16 +}
   21.17 +
   21.18  void __devinit detect_ht(struct cpuinfo_x86 *c)
   21.19  {
   21.20  	u32 	eax, ebx, ecx, edx;
    22.1 --- a/xen/arch/x86/cpu/mcheck/Makefile	Thu Apr 06 00:59:06 2006 +0100
    22.2 +++ b/xen/arch/x86/cpu/mcheck/Makefile	Thu Apr 06 00:59:18 2006 +0100
    22.3 @@ -1,5 +1,3 @@
    22.4 -include $(BASEDIR)/Rules.mk
    22.5 -
    22.6  obj-y += k7.o
    22.7  obj-y += mce.o
    22.8  obj-y += non-fatal.o
    22.9 @@ -7,5 +5,3 @@ obj-y += p4.o
   22.10  obj-y += p5.o
   22.11  obj-y += p6.o
   22.12  obj-y += winchip.o
   22.13 -
   22.14 -include $(BASEDIR)/Post.mk
    23.1 --- a/xen/arch/x86/cpu/mtrr/Makefile	Thu Apr 06 00:59:06 2006 +0100
    23.2 +++ b/xen/arch/x86/cpu/mtrr/Makefile	Thu Apr 06 00:59:18 2006 +0100
    23.3 @@ -1,10 +1,6 @@
    23.4 -include $(BASEDIR)/Rules.mk
    23.5 -
    23.6  obj-y += amd.o
    23.7  obj-y += centaur.o
    23.8  obj-y += cyrix.o
    23.9  obj-y += generic.o
   23.10  obj-y += main.o
   23.11  obj-y += state.o
   23.12 -
   23.13 -include $(BASEDIR)/Post.mk
    24.1 --- a/xen/arch/x86/domain.c	Thu Apr 06 00:59:06 2006 +0100
    24.2 +++ b/xen/arch/x86/domain.c	Thu Apr 06 00:59:18 2006 +0100
    24.3 @@ -41,10 +41,6 @@
    24.4  #include <xen/kernel.h>
    24.5  #include <xen/multicall.h>
    24.6  
    24.7 -/* opt_noreboot: If true, machine will need manual reset on error. */
    24.8 -static int opt_noreboot = 0;
    24.9 -boolean_param("noreboot", opt_noreboot);
   24.10 -
   24.11  struct percpu_ctxt {
   24.12      struct vcpu *curr_vcpu;
   24.13      unsigned int dirty_segment_mask;
   24.14 @@ -99,84 +95,6 @@ void startup_cpu_idle_loop(void)
   24.15      reset_stack_and_jump(idle_loop);
   24.16  }
   24.17  
   24.18 -static long no_idt[2];
   24.19 -static int reboot_mode;
   24.20 -
   24.21 -static inline void kb_wait(void)
   24.22 -{
   24.23 -    int i;
   24.24 -
   24.25 -    for ( i = 0; i < 0x10000; i++ )
   24.26 -        if ( (inb_p(0x64) & 0x02) == 0 )
   24.27 -            break;
   24.28 -}
   24.29 -
   24.30 -void __attribute__((noreturn)) __machine_halt(void *unused)
   24.31 -{
   24.32 -    for ( ; ; )
   24.33 -        safe_halt();
   24.34 -}
   24.35 -
   24.36 -void machine_halt(void)
   24.37 -{
   24.38 -    watchdog_disable();
   24.39 -    console_start_sync();
   24.40 -    smp_call_function(__machine_halt, NULL, 1, 0);
   24.41 -    __machine_halt(NULL);
   24.42 -}
   24.43 -
   24.44 -void machine_restart(char * __unused)
   24.45 -{
   24.46 -    int i;
   24.47 -
   24.48 -    if ( opt_noreboot )
   24.49 -    {
   24.50 -        printk("Reboot disabled on cmdline: require manual reset\n");
   24.51 -        machine_halt();
   24.52 -    }
   24.53 -
   24.54 -    watchdog_disable();
   24.55 -    console_start_sync();
   24.56 -
   24.57 -    local_irq_enable();
   24.58 -
   24.59 -    /* Ensure we are the boot CPU. */
   24.60 -    if ( GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid )
   24.61 -    {
   24.62 -        smp_call_function((void *)machine_restart, NULL, 1, 0);
   24.63 -        for ( ; ; )
   24.64 -            safe_halt();
   24.65 -    }
   24.66 -
   24.67 -    /*
   24.68 -     * Stop all CPUs and turn off local APICs and the IO-APIC, so
   24.69 -     * other OSs see a clean IRQ state.
   24.70 -     */
   24.71 -    smp_send_stop();
   24.72 -    disable_IO_APIC();
   24.73 -    hvm_disable();
   24.74 -
   24.75 -    /* Rebooting needs to touch the page at absolute address 0. */
   24.76 -    *((unsigned short *)__va(0x472)) = reboot_mode;
   24.77 -
   24.78 -    for ( ; ; )
   24.79 -    {
   24.80 -        /* Pulse the keyboard reset line. */
   24.81 -        for ( i = 0; i < 100; i++ )
   24.82 -        {
   24.83 -            kb_wait();
   24.84 -            udelay(50);
   24.85 -            outb(0xfe,0x64); /* pulse reset low */
   24.86 -            udelay(50);
   24.87 -        }
   24.88 -
   24.89 -        /* That didn't work - force a triple fault.. */
   24.90 -        __asm__ __volatile__("lidt %0": "=m" (no_idt));
   24.91 -        __asm__ __volatile__("int3");
   24.92 -    }
   24.93 -}
   24.94 -
   24.95 -
   24.96  void dump_pageframe_info(struct domain *d)
   24.97  {
   24.98      struct page_info *page;
   24.99 @@ -445,7 +363,7 @@ int arch_set_info_guest(
  24.100      update_pagetables(v);
  24.101  
  24.102      if ( v->vcpu_id == 0 )
  24.103 -        init_domain_time(d);
  24.104 +        update_domain_wallclock_time(d);
  24.105  
  24.106      /* Don't redo final setup */
  24.107      set_bit(_VCPUF_initialised, &v->vcpu_flags);
    25.1 --- a/xen/arch/x86/domain_build.c	Thu Apr 06 00:59:06 2006 +0100
    25.2 +++ b/xen/arch/x86/domain_build.c	Thu Apr 06 00:59:18 2006 +0100
    25.3 @@ -773,7 +773,7 @@ int construct_dom0(struct domain *d,
    25.4      zap_low_mappings(idle_pg_table_l2);
    25.5  #endif
    25.6  
    25.7 -    init_domain_time(d);
    25.8 +    update_domain_wallclock_time(d);
    25.9  
   25.10      set_bit(_VCPUF_initialised, &v->vcpu_flags);
   25.11  
    26.1 --- a/xen/arch/x86/genapic/Makefile	Thu Apr 06 00:59:06 2006 +0100
    26.2 +++ b/xen/arch/x86/genapic/Makefile	Thu Apr 06 00:59:18 2006 +0100
    26.3 @@ -1,10 +1,7 @@
    26.4 -include $(BASEDIR)/Rules.mk
    26.5 -
    26.6  obj-y += bigsmp.o
    26.7  obj-y += default.o
    26.8 +obj-y += delivery.o
    26.9  obj-y += es7000.o
   26.10  obj-y += es7000plat.o
   26.11  obj-y += probe.o
   26.12  obj-y += summit.o
   26.13 -
   26.14 -include $(BASEDIR)/Post.mk
    27.1 --- a/xen/arch/x86/genapic/bigsmp.c	Thu Apr 06 00:59:06 2006 +0100
    27.2 +++ b/xen/arch/x86/genapic/bigsmp.c	Thu Apr 06 00:59:18 2006 +0100
    27.3 @@ -1,7 +1,3 @@
    27.4 -/* 
    27.5 - * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
    27.6 - * Drives the local APIC in "clustered mode".
    27.7 - */
    27.8  #include <xen/config.h>
    27.9  #include <xen/cpumask.h>
   27.10  #include <asm/current.h>
   27.11 @@ -13,8 +9,6 @@
   27.12  #include <xen/smp.h>
   27.13  #include <xen/init.h>
   27.14  #include <xen/dmi.h>
   27.15 -#include <asm/mach_ipi.h>
   27.16 -#include <asm/mach-bigsmp/mach_apic.h>
   27.17  #include <asm/mach-default/mach_mpparse.h>
   27.18  
   27.19  static int dmi_bigsmp; /* can be set by dmi scanners */
   27.20 @@ -52,5 +46,5 @@ static __init int probe_bigsmp(void)
   27.21  
   27.22  struct genapic apic_bigsmp = {
   27.23  	APIC_INIT("bigsmp", probe_bigsmp),
   27.24 -	.send_ipi_mask = send_IPI_mask_sequence
   27.25 +	GENAPIC_PHYS
   27.26  };
    28.1 --- a/xen/arch/x86/genapic/default.c	Thu Apr 06 00:59:06 2006 +0100
    28.2 +++ b/xen/arch/x86/genapic/default.c	Thu Apr 06 00:59:18 2006 +0100
    28.3 @@ -12,8 +12,6 @@
    28.4  #include <xen/string.h>
    28.5  #include <xen/smp.h>
    28.6  #include <xen/init.h>
    28.7 -#include <asm/mach_ipi.h>
    28.8 -#include <asm/mach-default/mach_apic.h>
    28.9  #include <asm/mach-default/mach_mpparse.h>
   28.10  
   28.11  /* should be called last. */
   28.12 @@ -24,5 +22,5 @@ static __init int probe_default(void)
   28.13  
   28.14  struct genapic apic_default = {
   28.15  	APIC_INIT("default", probe_default),
   28.16 -	.send_ipi_mask = send_IPI_mask_bitmask
   28.17 +	GENAPIC_FLAT
   28.18  };
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/arch/x86/genapic/delivery.c	Thu Apr 06 00:59:18 2006 +0100
    29.3 @@ -0,0 +1,68 @@
    29.4 +#include <xen/config.h>
    29.5 +#include <xen/irq.h>
    29.6 +#include <xen/sched.h>
    29.7 +#include <asm/current.h>
    29.8 +#include <asm/smp.h>
    29.9 +#include <asm/hardirq.h>
   29.10 +#include <mach_apic.h>
   29.11 +
   29.12 +
   29.13 +/*
   29.14 + * LOGICAL FLAT DELIVERY MODE (multicast via bitmask to <= 8 logical APIC IDs).
   29.15 + */
   29.16 +
   29.17 +void init_apic_ldr_flat(void)
   29.18 +{
   29.19 +	unsigned long val;
   29.20 +
   29.21 +	apic_write_around(APIC_DFR, APIC_DFR_FLAT);
   29.22 +	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
   29.23 +	val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
   29.24 +	apic_write_around(APIC_LDR, val);
   29.25 +}
   29.26 +
   29.27 +void clustered_apic_check_flat(void)
   29.28 +{
   29.29 +	printk("Enabling APIC mode:  Flat.  Using %d I/O APICs\n", nr_ioapics);
   29.30 +}
   29.31 +
   29.32 +cpumask_t target_cpus_flat(void)
   29.33 +{
   29.34 +	return cpu_online_map;
   29.35 +} 
   29.36 +
   29.37 +unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask)
   29.38 +{
   29.39 +	return cpus_addr(cpumask)[0];
   29.40 +}
   29.41 +
   29.42 +
   29.43 +/*
   29.44 + * PHYSICAL DELIVERY MODE (unicast to physical APIC IDs).
   29.45 + */
   29.46 +
   29.47 +void init_apic_ldr_phys(void)
   29.48 +{
   29.49 +	unsigned long val;
   29.50 +	apic_write_around(APIC_DFR, APIC_DFR_FLAT);
   29.51 +	/* A dummy logical ID should be fine. We only deliver in phys mode. */
   29.52 +	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
   29.53 +	apic_write_around(APIC_LDR, val);
   29.54 +}
   29.55 +
   29.56 +void clustered_apic_check_phys(void)
   29.57 +{
   29.58 +	printk("Enabling APIC mode:  Phys.  Using %d I/O APICs\n", nr_ioapics);
   29.59 +}
   29.60 +
   29.61 +cpumask_t target_cpus_phys(void)
   29.62 +{
   29.63 +	/* IRQs will get bound more accurately later. */
   29.64 +	return cpumask_of_cpu(0);
   29.65 +}
   29.66 +
   29.67 +unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask)
   29.68 +{
   29.69 +	/* As we are using single CPU as destination, pick only one CPU here */
   29.70 +	return cpu_physical_id(first_cpu(cpumask));
   29.71 +}
    30.1 --- a/xen/arch/x86/genapic/es7000.c	Thu Apr 06 00:59:06 2006 +0100
    30.2 +++ b/xen/arch/x86/genapic/es7000.c	Thu Apr 06 00:59:18 2006 +0100
    30.3 @@ -13,8 +13,6 @@
    30.4  #include <xen/string.h>
    30.5  #include <xen/smp.h>
    30.6  #include <xen/init.h>
    30.7 -#include <asm/mach_ipi.h>
    30.8 -#include <asm/mach-es7000/mach_apic.h>
    30.9  #include <asm/mach-es7000/mach_mpparse.h>
   30.10  
   30.11  static __init int probe_es7000(void)
   30.12 @@ -25,5 +23,5 @@ static __init int probe_es7000(void)
   30.13  
   30.14  struct genapic apic_es7000 = {
   30.15  	APIC_INIT("es7000", probe_es7000),
   30.16 -	.send_ipi_mask = send_IPI_mask_sequence
   30.17 +	GENAPIC_PHYS
   30.18  };
    31.1 --- a/xen/arch/x86/genapic/summit.c	Thu Apr 06 00:59:06 2006 +0100
    31.2 +++ b/xen/arch/x86/genapic/summit.c	Thu Apr 06 00:59:18 2006 +0100
    31.3 @@ -12,8 +12,6 @@
    31.4  #include <xen/string.h>
    31.5  #include <xen/smp.h>
    31.6  #include <xen/init.h>
    31.7 -#include <asm/mach_ipi.h>
    31.8 -#include <asm/mach-summit/mach_apic.h>
    31.9  #include <asm/mach-summit/mach_mpparse.h>
   31.10  
   31.11  static __init int probe_summit(void)
   31.12 @@ -24,5 +22,5 @@ static __init int probe_summit(void)
   31.13  
   31.14  struct genapic apic_summit = {
   31.15  	APIC_INIT("summit", probe_summit),
   31.16 -	.send_ipi_mask = send_IPI_mask_sequence
   31.17 +	GENAPIC_PHYS
   31.18  };
    32.1 --- a/xen/arch/x86/hvm/Makefile	Thu Apr 06 00:59:06 2006 +0100
    32.2 +++ b/xen/arch/x86/hvm/Makefile	Thu Apr 06 00:59:18 2006 +0100
    32.3 @@ -1,5 +1,3 @@
    32.4 -include $(BASEDIR)/Rules.mk
    32.5 -
    32.6  subdir-y += svm
    32.7  subdir-y += vmx
    32.8  
    32.9 @@ -10,5 +8,3 @@ obj-y += io.o
   32.10  obj-y += platform.o
   32.11  obj-y += vioapic.o
   32.12  obj-y += vlapic.o
   32.13 -
   32.14 -include $(BASEDIR)/Post.mk
    33.1 --- a/xen/arch/x86/hvm/intercept.c	Thu Apr 06 00:59:06 2006 +0100
    33.2 +++ b/xen/arch/x86/hvm/intercept.c	Thu Apr 06 00:59:18 2006 +0100
    33.3 @@ -123,6 +123,16 @@ static inline void hvm_mmio_access(struc
    33.4          req->u.data = tmp1;
    33.5          break;
    33.6  
    33.7 +    case IOREQ_TYPE_XCHG:
    33.8 +        /* 
    33.9 +         * Note that we don't need to be atomic here since VCPU is accessing
   33.10 +         * its own local APIC.
   33.11 +         */
   33.12 +        tmp1 = read_handler(v, req->addr, req->size);
   33.13 +        write_handler(v, req->addr, req->size, (unsigned long) req->u.data);
   33.14 +        req->u.data = tmp1;
   33.15 +        break;
   33.16 +
   33.17      default:
   33.18          printk("error ioreq type for local APIC %x\n", req->type);
   33.19          domain_crash_synchronous();
   33.20 @@ -143,7 +153,7 @@ int hvm_mmio_intercept(ioreq_t *p)
   33.21          if ( hvm_mmio_handlers[i]->check_handler(v, p->addr) ) {
   33.22              hvm_mmio_access(v, p,
   33.23                              hvm_mmio_handlers[i]->read_handler,
   33.24 -	                    hvm_mmio_handlers[i]->write_handler);
   33.25 +                            hvm_mmio_handlers[i]->write_handler);
   33.26              return 1;
   33.27          }
   33.28      }
    34.1 --- a/xen/arch/x86/hvm/io.c	Thu Apr 06 00:59:06 2006 +0100
    34.2 +++ b/xen/arch/x86/hvm/io.c	Thu Apr 06 00:59:18 2006 +0100
    34.3 @@ -365,45 +365,47 @@ static void hvm_pio_assist(struct cpu_us
    34.4      unsigned long old_eax;
    34.5      int sign = p->df ? -1 : 1;
    34.6  
    34.7 -    if (p->dir == IOREQ_WRITE) {
    34.8 -        if (p->pdata_valid) {
    34.9 -            regs->esi += sign * p->count * p->size;
   34.10 -            if (mmio_opp->flags & REPZ)
   34.11 -                regs->ecx -= p->count;
   34.12 -        }
   34.13 -    } else {
   34.14 -        if (mmio_opp->flags & OVERLAP) {
   34.15 -            unsigned long addr;
   34.16 -
   34.17 +    if ( p->pdata_valid || (mmio_opp->flags & OVERLAP) )
   34.18 +    {
   34.19 +        if ( mmio_opp->flags & REPZ )
   34.20 +            regs->ecx -= p->count;
   34.21 +        if ( p->dir == IOREQ_READ )
   34.22 +        {
   34.23              regs->edi += sign * p->count * p->size;
   34.24 -            if (mmio_opp->flags & REPZ)
   34.25 -                regs->ecx -= p->count;
   34.26 -
   34.27 -            addr = regs->edi;
   34.28 -            if (sign > 0)
   34.29 -                addr -= p->size;
   34.30 -            hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT);
   34.31 -        } else if (p->pdata_valid) {
   34.32 -            regs->edi += sign * p->count * p->size;
   34.33 -            if (mmio_opp->flags & REPZ)
   34.34 -                regs->ecx -= p->count;
   34.35 -        } else {
   34.36 -            old_eax = regs->eax;
   34.37 -            switch (p->size) {
   34.38 -            case 1:
   34.39 -                regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
   34.40 -                break;
   34.41 -            case 2:
   34.42 -                regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
   34.43 -                break;
   34.44 -            case 4:
   34.45 -                regs->eax = (p->u.data & 0xffffffff);
   34.46 -                break;
   34.47 -            default:
   34.48 -                printk("Error: %s unknown port size\n", __FUNCTION__);
   34.49 -                domain_crash_synchronous();
   34.50 +            if ( mmio_opp->flags & OVERLAP )
   34.51 +            {
   34.52 +                unsigned long addr = regs->edi;
   34.53 +                if (hvm_realmode(current))
   34.54 +                    addr += regs->es << 4;
   34.55 +                if (sign > 0)
   34.56 +                    addr -= p->size;
   34.57 +                hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT);
   34.58              }
   34.59          }
   34.60 +        else /* p->dir == IOREQ_WRITE */
   34.61 +        {
   34.62 +            ASSERT(p->dir == IOREQ_WRITE);
   34.63 +            regs->esi += sign * p->count * p->size;
   34.64 +        }
   34.65 +    }
   34.66 +    else if ( p->dir == IOREQ_READ )
   34.67 +    {
   34.68 +        old_eax = regs->eax;
   34.69 +        switch ( p->size )
   34.70 +        {
   34.71 +        case 1:
   34.72 +            regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
   34.73 +            break;
   34.74 +        case 2:
   34.75 +            regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
   34.76 +            break;
   34.77 +        case 4:
   34.78 +            regs->eax = (p->u.data & 0xffffffff);
   34.79 +            break;
   34.80 +        default:
   34.81 +            printk("Error: %s unknown port size\n", __FUNCTION__);
   34.82 +            domain_crash_synchronous();
   34.83 +        }
   34.84      }
   34.85  }
   34.86  
    35.1 --- a/xen/arch/x86/hvm/platform.c	Thu Apr 06 00:59:06 2006 +0100
    35.2 +++ b/xen/arch/x86/hvm/platform.c	Thu Apr 06 00:59:18 2006 +0100
    35.3 @@ -439,6 +439,14 @@ static int hvm_decode(int realmode, unsi
    35.4          GET_OP_SIZE_FOR_BYTE(size_reg);
    35.5          return mem_reg(size_reg, opcode, instr, rex);
    35.6  
    35.7 +    case 0x87:  /* xchg {r/m16|r/m32}, {m/r16|m/r32} */
    35.8 +        instr->instr = INSTR_XCHG;
    35.9 +        GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
   35.10 +        if (((*(opcode+1)) & 0xc7) == 5)
   35.11 +            return reg_mem(instr->op_size, opcode, instr, rex);
   35.12 +        else
   35.13 +            return mem_reg(instr->op_size, opcode, instr, rex);
   35.14 +
   35.15      case 0x88: /* mov r8, m8 */
   35.16          instr->instr = INSTR_MOV;
   35.17          instr->op_size = BYTE;
   35.18 @@ -936,6 +944,17 @@ void handle_mmio(unsigned long va, unsig
   35.19              break;
   35.20          }
   35.21  
   35.22 +    case INSTR_XCHG:
   35.23 +        mmio_opp->flags = mmio_inst.flags;
   35.24 +        mmio_opp->instr = mmio_inst.instr;
   35.25 +        mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */
   35.26 +        mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */
   35.27 +
   35.28 +        /* send the request and wait for the value */
   35.29 +        send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1,
   35.30 +                      mmio_inst.op_size, 0, IOREQ_WRITE, 0);
   35.31 +        break;
   35.32 +
   35.33      default:
   35.34          printf("Unhandled MMIO instruction\n");
   35.35          domain_crash_synchronous();
    36.1 --- a/xen/arch/x86/hvm/svm/Makefile	Thu Apr 06 00:59:06 2006 +0100
    36.2 +++ b/xen/arch/x86/hvm/svm/Makefile	Thu Apr 06 00:59:18 2006 +0100
    36.3 @@ -1,5 +1,3 @@
    36.4 -include $(BASEDIR)/Rules.mk
    36.5 -
    36.6  subdir-$(x86_32) += x86_32
    36.7  subdir-$(x86_64) += x86_64
    36.8  
    36.9 @@ -8,5 +6,3 @@ obj-y += instrlen.o
   36.10  obj-y += intr.o
   36.11  obj-y += svm.o
   36.12  obj-y += vmcb.o
   36.13 -
   36.14 -include $(BASEDIR)/Post.mk
    37.1 --- a/xen/arch/x86/hvm/svm/x86_32/Makefile	Thu Apr 06 00:59:06 2006 +0100
    37.2 +++ b/xen/arch/x86/hvm/svm/x86_32/Makefile	Thu Apr 06 00:59:18 2006 +0100
    37.3 @@ -1,5 +1,1 @@
    37.4 -include $(BASEDIR)/Rules.mk
    37.5 -
    37.6  obj-y += exits.o
    37.7 -
    37.8 -include $(BASEDIR)/Post.mk
    38.1 --- a/xen/arch/x86/hvm/svm/x86_64/Makefile	Thu Apr 06 00:59:06 2006 +0100
    38.2 +++ b/xen/arch/x86/hvm/svm/x86_64/Makefile	Thu Apr 06 00:59:18 2006 +0100
    38.3 @@ -1,5 +1,1 @@
    38.4 -include $(BASEDIR)/Rules.mk
    38.5 -
    38.6  obj-y += exits.o
    38.7 -
    38.8 -include $(BASEDIR)/Post.mk
    39.1 --- a/xen/arch/x86/hvm/vmx/Makefile	Thu Apr 06 00:59:06 2006 +0100
    39.2 +++ b/xen/arch/x86/hvm/vmx/Makefile	Thu Apr 06 00:59:18 2006 +0100
    39.3 @@ -1,10 +1,6 @@
    39.4 -include $(BASEDIR)/Rules.mk
    39.5 -
    39.6  subdir-$(x86_32) += x86_32
    39.7  subdir-$(x86_64) += x86_64
    39.8  
    39.9  obj-y += io.o
   39.10  obj-y += vmcs.o
   39.11  obj-y += vmx.o
   39.12 -
   39.13 -include $(BASEDIR)/Post.mk
    40.1 --- a/xen/arch/x86/hvm/vmx/x86_32/Makefile	Thu Apr 06 00:59:06 2006 +0100
    40.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/Makefile	Thu Apr 06 00:59:18 2006 +0100
    40.3 @@ -1,5 +1,1 @@
    40.4 -include $(BASEDIR)/Rules.mk
    40.5 -
    40.6  obj-y += exits.o
    40.7 -
    40.8 -include $(BASEDIR)/Post.mk
    41.1 --- a/xen/arch/x86/hvm/vmx/x86_64/Makefile	Thu Apr 06 00:59:06 2006 +0100
    41.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/Makefile	Thu Apr 06 00:59:18 2006 +0100
    41.3 @@ -1,5 +1,1 @@
    41.4 -include $(BASEDIR)/Rules.mk
    41.5 -
    41.6  obj-y += exits.o
    41.7 -
    41.8 -include $(BASEDIR)/Post.mk
    42.1 --- a/xen/arch/x86/io_apic.c	Thu Apr 06 00:59:06 2006 +0100
    42.2 +++ b/xen/arch/x86/io_apic.c	Thu Apr 06 00:59:18 2006 +0100
    42.3 @@ -1736,8 +1736,10 @@ int __init io_apic_get_unique_id (int io
    42.4          spin_unlock_irqrestore(&ioapic_lock, flags);
    42.5  
    42.6          /* Sanity check */
    42.7 -        if (reg_00.bits.ID != apic_id)
    42.8 -            panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic);
    42.9 +        if (reg_00.bits.ID != apic_id) {
   42.10 +            printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
   42.11 +            return -1;
   42.12 +        }
   42.13      }
   42.14  
   42.15      apic_printk(APIC_VERBOSE, KERN_INFO
    43.1 --- a/xen/arch/x86/mpparse.c	Thu Apr 06 00:59:06 2006 +0100
    43.2 +++ b/xen/arch/x86/mpparse.c	Thu Apr 06 00:59:18 2006 +0100
    43.3 @@ -35,7 +35,7 @@
    43.4  
    43.5  /* Have we found an MP table */
    43.6  int smp_found_config;
    43.7 -unsigned int __initdata maxcpus = NR_CPUS;
    43.8 +unsigned int __devinitdata maxcpus = NR_CPUS;
    43.9  
   43.10  #ifdef CONFIG_HOTPLUG_CPU
   43.11  #define CPU_HOTPLUG_ENABLED	(1)
   43.12 @@ -226,16 +226,11 @@ static void __devinit MP_processor_info 
   43.13  	num_processors++;
   43.14  
   43.15  	if (CPU_HOTPLUG_ENABLED || (num_processors > 8)) {
   43.16 -		switch (boot_cpu_data.x86_vendor) {
   43.17 -		case X86_VENDOR_INTEL:
   43.18 -			if (!APIC_XAPIC(ver)) {
   43.19 -				def_to_bigsmp = 0;
   43.20 -				break;
   43.21 -			}
   43.22 -			/* If P4 and above fall through */
   43.23 -		case X86_VENDOR_AMD:
   43.24 -			def_to_bigsmp = 1;
   43.25 -		}
   43.26 +		/*
   43.27 +		 * No need for processor or APIC checks: physical delivery
   43.28 +		 * (bigsmp) mode should always work.
   43.29 +		 */
   43.30 +		def_to_bigsmp = 1;
   43.31  	}
   43.32  	bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
   43.33  }
   43.34 @@ -916,6 +911,7 @@ void __init mp_register_ioapic (
   43.35  	u32			gsi_base)
   43.36  {
   43.37  	int			idx = 0;
   43.38 +	int			tmpid;
   43.39  
   43.40  	if (nr_ioapics >= MAX_IO_APICS) {
   43.41  		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
   43.42 @@ -936,9 +932,14 @@ void __init mp_register_ioapic (
   43.43  
   43.44  	set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
   43.45  	if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15))
   43.46 -		mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
   43.47 +		tmpid = io_apic_get_unique_id(idx, id);
   43.48  	else
   43.49 -		mp_ioapics[idx].mpc_apicid = id;
   43.50 +		tmpid = id;
   43.51 +	if (tmpid == -1) {
   43.52 +		nr_ioapics--;
   43.53 +		return;
   43.54 +	}
   43.55 +	mp_ioapics[idx].mpc_apicid = tmpid;
   43.56  	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
   43.57  	
   43.58  	/* 
    44.1 --- a/xen/arch/x86/nmi.c	Thu Apr 06 00:59:06 2006 +0100
    44.2 +++ b/xen/arch/x86/nmi.c	Thu Apr 06 00:59:18 2006 +0100
    44.3 @@ -431,14 +431,14 @@ void nmi_watchdog_tick(struct cpu_user_r
    44.4   */
    44.5  static void do_nmi_trigger(unsigned char key)
    44.6  {
    44.7 -    u32 id = apic_read(APIC_ID);
    44.8 +    u32 id = GET_APIC_ID(apic_read(APIC_ID));
    44.9  
   44.10      printk("Triggering NMI on APIC ID %x\n", id);
   44.11  
   44.12      local_irq_disable();
   44.13      apic_wait_icr_idle();
   44.14      apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(id));
   44.15 -    apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_INT_ASSERT);
   44.16 +    apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_PHYSICAL);
   44.17      local_irq_enable();
   44.18  }
   44.19  
    45.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    45.2 +++ b/xen/arch/x86/shutdown.c	Thu Apr 06 00:59:18 2006 +0100
    45.3 @@ -0,0 +1,342 @@
    45.4 +/******************************************************************************
    45.5 + * arch/x86/shutdown.c
    45.6 + *
    45.7 + * x86-specific shutdown handling.
    45.8 + */
    45.9 +
   45.10 +#include <xen/config.h>
   45.11 +#include <xen/init.h>
   45.12 +#include <xen/lib.h>
   45.13 +#include <xen/sched.h>
   45.14 +#include <xen/smp.h>
   45.15 +#include <xen/delay.h>
   45.16 +#include <xen/dmi.h>
   45.17 +#include <asm/regs.h>
   45.18 +#include <asm/mc146818rtc.h>
   45.19 +#include <asm/system.h>
   45.20 +#include <asm/io.h>
   45.21 +#include <asm/processor.h>
   45.22 +#include <asm/mpspec.h>
   45.23 +#include <xen/irq.h>
   45.24 +#include <xen/console.h>
   45.25 +#include <asm/msr.h>
   45.26 +
   45.27 +/* opt_noreboot: If true, machine will need manual reset on error. */
   45.28 +static int opt_noreboot = 0;
   45.29 +boolean_param("noreboot", opt_noreboot);
   45.30 +
   45.31 +/* reboot_str: comma-separated list of reboot options. */
   45.32 +static char __initdata reboot_str[10] = "";
   45.33 +string_param("reboot", reboot_str);
   45.34 +
   45.35 +static long no_idt[2];
   45.36 +static int reboot_mode;
   45.37 +
   45.38 +static inline void kb_wait(void)
   45.39 +{
   45.40 +    int i;
   45.41 +
   45.42 +    for ( i = 0; i < 0x10000; i++ )
   45.43 +        if ( (inb_p(0x64) & 0x02) == 0 )
   45.44 +            break;
   45.45 +}
   45.46 +
   45.47 +void __attribute__((noreturn)) __machine_halt(void *unused)
   45.48 +{
   45.49 +    for ( ; ; )
   45.50 +        safe_halt();
   45.51 +}
   45.52 +
   45.53 +void machine_halt(void)
   45.54 +{
   45.55 +    watchdog_disable();
   45.56 +    console_start_sync();
   45.57 +    smp_call_function(__machine_halt, NULL, 1, 0);
   45.58 +    __machine_halt(NULL);
   45.59 +}
   45.60 +
   45.61 +#ifdef __i386__
   45.62 +
   45.63 +static int reboot_thru_bios;
   45.64 +
   45.65 +/* The following code and data reboots the machine by switching to real
   45.66 +   mode and jumping to the BIOS reset entry point, as if the CPU has
   45.67 +   really been reset.  The previous version asked the keyboard
   45.68 +   controller to pulse the CPU reset line, which is more thorough, but
   45.69 +   doesn't work with at least one type of 486 motherboard.  It is easy
   45.70 +   to stop this code working; hence the copious comments. */
   45.71 +
   45.72 +static unsigned long long
   45.73 +real_mode_gdt_entries [3] =
   45.74 +{
   45.75 +    0x0000000000000000ULL,      /* Null descriptor */
   45.76 +    0x00009a000000ffffULL,      /* 16-bit real-mode 64k code at 0x00000000 */
   45.77 +    0x000092000100ffffULL       /* 16-bit real-mode 64k data at 0x00000100 */
   45.78 +};
   45.79 +
   45.80 +static const struct
   45.81 +{
   45.82 +    unsigned short       size __attribute__ ((packed));
   45.83 +    unsigned long long * base __attribute__ ((packed));
   45.84 +}
   45.85 +real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries },
   45.86 +real_mode_idt = { 0x3ff, NULL };
   45.87 +
   45.88 +
   45.89 +/* This is 16-bit protected mode code to disable paging and the cache,
   45.90 +   switch to real mode and jump to the BIOS reset code.
   45.91 +
   45.92 +   The instruction that switches to real mode by writing to CR0 must be
   45.93 +   followed immediately by a far jump instruction, which set CS to a
   45.94 +   valid value for real mode, and flushes the prefetch queue to avoid
   45.95 +   running instructions that have already been decoded in protected
   45.96 +   mode.
   45.97 +
   45.98 +   Clears all the flags except ET, especially PG (paging), PE
   45.99 +   (protected-mode enable) and TS (task switch for coprocessor state
  45.100 +   save).  Flushes the TLB after paging has been disabled.  Sets CD and
  45.101 +   NW, to disable the cache on a 486, and invalidates the cache.  This
  45.102 +   is more like the state of a 486 after reset.  I don't know if
  45.103 +   something else should be done for other chips.
  45.104 +
  45.105 +   More could be done here to set up the registers as if a CPU reset had
  45.106 +   occurred; hopefully real BIOSs don't assume much. */
  45.107 +
  45.108 +static const unsigned char real_mode_switch [] =
  45.109 +{
  45.110 +    0x0f, 0x20, 0xc0,                           /*    movl  %cr0,%eax        */
  45.111 +    0x66, 0x83, 0xe0, 0x11,                     /*    andl  $0x00000011,%eax */
  45.112 +    0x66, 0x0d, 0x00, 0x00, 0x00, 0x60,         /*    orl   $0x60000000,%eax */
  45.113 +    0x0f, 0x22, 0xc0,                           /*    movl  %eax,%cr0        */
  45.114 +    0x0f, 0x22, 0xd8,                           /*    movl  %eax,%cr3        */
  45.115 +    0x0f, 0x20, 0xc2,                           /*    movl  %cr0,%edx        */
  45.116 +    0x66, 0x81, 0xe2, 0x00, 0x00, 0x00, 0x60,   /*    andl  $0x60000000,%edx */
  45.117 +    0x74, 0x02,                                 /*    jz    f                */
  45.118 +    0x0f, 0x09,                                 /*    wbinvd                 */
  45.119 +    0x24, 0x10,                                 /* f: andb  $0x10,al         */
  45.120 +    0x0f, 0x22, 0xc0                            /*    movl  %eax,%cr0        */
  45.121 +};
  45.122 +#define MAX_LENGTH 0x40
  45.123 +static const unsigned char jump_to_bios [] =
  45.124 +{
  45.125 +    0xea, 0xf0, 0xff, 0x00, 0xf0                /*    ljmp  $0xf000,$0xfff0  */
  45.126 +};
  45.127 +
  45.128 +/*
  45.129 + * Switch to real mode and then execute the code
  45.130 + * specified by the code and length parameters.
  45.131 + * We assume that length will aways be less that MAX_LENGTH!
  45.132 + */
  45.133 +void machine_real_restart(const unsigned char *code, unsigned length)
  45.134 +{
  45.135 +    local_irq_disable();
  45.136 +
  45.137 +    /* Write zero to CMOS register number 0x0f, which the BIOS POST
  45.138 +       routine will recognize as telling it to do a proper reboot.  (Well
  45.139 +       that's what this book in front of me says -- it may only apply to
  45.140 +       the Phoenix BIOS though, it's not clear).  At the same time,
  45.141 +       disable NMIs by setting the top bit in the CMOS address register,
  45.142 +       as we're about to do peculiar things to the CPU. */
  45.143 +
  45.144 +    spin_lock(&rtc_lock);
  45.145 +    CMOS_WRITE(0x00, 0x8f);
  45.146 +    spin_unlock(&rtc_lock);
  45.147 +
  45.148 +    /* Identity-map virtual address zero. */
  45.149 +
  45.150 +    map_pages_to_xen(0, 0, 1, __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
  45.151 +    set_current(idle_vcpu[0]);
  45.152 +    write_ptbase(idle_vcpu[0]);
  45.153 +
  45.154 +    /* For the switch to real mode, copy some code to low memory.  It has
  45.155 +       to be in the first 64k because it is running in 16-bit mode, and it
  45.156 +       has to have the same physical and virtual address, because it turns
  45.157 +       off paging.  Copy it near the end of the first page, out of the way
  45.158 +       of BIOS variables. */
  45.159 +
  45.160 +    memcpy((void *)(PAGE_SIZE - sizeof(real_mode_switch) - MAX_LENGTH),
  45.161 +           real_mode_switch, sizeof(real_mode_switch));
  45.162 +    memcpy((void *)(PAGE_SIZE - MAX_LENGTH), code, length);
  45.163 +
  45.164 +    /* Set up the IDT for real mode. */
  45.165 +
  45.166 +    __asm__ __volatile__("lidt %0": : "m" (real_mode_idt));
  45.167 +
  45.168 +    /* Set up a GDT from which we can load segment descriptors for real
  45.169 +       mode.  The GDT is not used in real mode; it is just needed here to
  45.170 +       prepare the descriptors. */
  45.171 +
  45.172 +    __asm__ __volatile__("lgdt %0": : "m" (real_mode_gdt));
  45.173 +
  45.174 +    /* Load the data segment registers, and thus the descriptors ready for
  45.175 +       real mode.  The base address of each segment is 0x100, 16 times the
  45.176 +       selector value being loaded here.  This is so that the segment
  45.177 +       registers don't have to be reloaded after switching to real mode:
  45.178 +       the values are consistent for real mode operation already. */
  45.179 +
  45.180 +    __asm__ __volatile__ ("\tmov %0,%%ds\n"
  45.181 +                          "\tmov %0,%%es\n"
  45.182 +                          "\tmov %0,%%fs\n"
  45.183 +                          "\tmov %0,%%gs\n"
  45.184 +                          "\tmov %0,%%ss"
  45.185 +                          :
  45.186 +                          : "r" (0x0010));
  45.187 +
  45.188 +    /* Jump to the 16-bit code that we copied earlier.  It disables paging
  45.189 +       and the cache, switches to real mode, and jumps to the BIOS reset
  45.190 +       entry point. */
  45.191 +
  45.192 +    __asm__ __volatile__ ("ljmp $0x0008,%0"
  45.193 +                          :
  45.194 +                          : "i" ((void *)(PAGE_SIZE -
  45.195 +                                          sizeof(real_mode_switch) -
  45.196 +                                          MAX_LENGTH)));
  45.197 +}
  45.198 +
  45.199 +#else /* __x86_64__ */
  45.200 +
  45.201 +#define machine_real_restart(x, y)
  45.202 +#define reboot_thru_bios 0
  45.203 +
  45.204 +#endif
  45.205 +
  45.206 +void machine_restart(char * __unused)
  45.207 +{
  45.208 +    int i;
  45.209 +
  45.210 +    if ( opt_noreboot )
  45.211 +    {
  45.212 +        printk("Reboot disabled on cmdline: require manual reset\n");
  45.213 +        machine_halt();
  45.214 +    }
  45.215 +
  45.216 +    watchdog_disable();
  45.217 +    console_start_sync();
  45.218 +
  45.219 +    local_irq_enable();
  45.220 +
  45.221 +    /* Ensure we are the boot CPU. */
  45.222 +    if ( GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid )
  45.223 +    {
  45.224 +        smp_call_function((void *)machine_restart, NULL, 1, 0);
  45.225 +        for ( ; ; )
  45.226 +            safe_halt();
  45.227 +    }
  45.228 +
  45.229 +    /*
  45.230 +     * Stop all CPUs and turn off local APICs and the IO-APIC, so
  45.231 +     * other OSs see a clean IRQ state.
  45.232 +     */
  45.233 +    smp_send_stop();
  45.234 +    disable_IO_APIC();
  45.235 +    hvm_disable();
  45.236 +
  45.237 +    /* Rebooting needs to touch the page at absolute address 0. */
  45.238 +    *((unsigned short *)__va(0x472)) = reboot_mode;
  45.239 +
  45.240 +    if (reboot_thru_bios <= 0)
  45.241 +    {
  45.242 +        for ( ; ; )
  45.243 +        {
  45.244 +            /* Pulse the keyboard reset line. */
  45.245 +            for ( i = 0; i < 100; i++ )
  45.246 +            {
  45.247 +                kb_wait();
  45.248 +                udelay(50);
  45.249 +                outb(0xfe,0x64); /* pulse reset low */
  45.250 +                udelay(50);
  45.251 +            }
  45.252 +
  45.253 +            /* That didn't work - force a triple fault.. */
  45.254 +            __asm__ __volatile__("lidt %0": "=m" (no_idt));
  45.255 +            __asm__ __volatile__("int3");
  45.256 +        }
  45.257 +    }
  45.258 +    machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
  45.259 +}
  45.260 +
  45.261 +#ifndef reboot_thru_bios
  45.262 +static int __init set_bios_reboot(struct dmi_system_id *d)
  45.263 +{
  45.264 +    if ( !reboot_thru_bios )
  45.265 +    {
  45.266 +        reboot_thru_bios = 1;
  45.267 +        printk("%s series board detected. "
  45.268 +               "Selecting BIOS-method for reboots.\n", d->ident);
  45.269 +    }
  45.270 +    return 0;
  45.271 +}
  45.272 +
  45.273 +static struct dmi_system_id __initdata reboot_dmi_table[] = {
  45.274 +    {    /* Handle problems with rebooting on Dell 1300's */
  45.275 +        .callback = set_bios_reboot,
  45.276 +        .ident = "Dell PowerEdge 1300",
  45.277 +        .matches = {
  45.278 +            DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
  45.279 +            DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
  45.280 +        },
  45.281 +    },
  45.282 +    {    /* Handle problems with rebooting on Dell 300's */
  45.283 +        .callback = set_bios_reboot,
  45.284 +        .ident = "Dell PowerEdge 300",
  45.285 +        .matches = {
  45.286 +            DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
  45.287 +            DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
  45.288 +        },
  45.289 +    },
  45.290 +    {    /* Handle problems with rebooting on Dell 2400's */
  45.291 +        .callback = set_bios_reboot,
  45.292 +        .ident = "Dell PowerEdge 2400",
  45.293 +        .matches = {
  45.294 +            DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
  45.295 +            DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
  45.296 +        },
  45.297 +    },
  45.298 +    {    /* Handle problems with rebooting on HP laptops */
  45.299 +        .callback = set_bios_reboot,
  45.300 +        .ident = "HP Compaq Laptop",
  45.301 +        .matches = {
  45.302 +            DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  45.303 +            DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
  45.304 +        },
  45.305 +    },
  45.306 +    { }
  45.307 +};
  45.308 +#endif
  45.309 +
  45.310 +static int __init reboot_init(void)
  45.311 +{
  45.312 +    const char *str;
  45.313 +
  45.314 +    for ( str = reboot_str; *str != '\0'; str++ )
  45.315 +    {
  45.316 +        switch ( *str )
  45.317 +        {
  45.318 +        case 'n': /* no reboot */
  45.319 +            opt_noreboot = 1;
  45.320 +            break;
  45.321 +        case 'w': /* "warm" reboot (no memory testing etc) */
  45.322 +            reboot_mode = 0x1234;
  45.323 +            break;
  45.324 +        case 'c': /* "cold" reboot (with memory testing etc) */
  45.325 +            reboot_mode = 0x0;
  45.326 +            break;
  45.327 +#ifndef reboot_thru_bios
  45.328 +        case 'b': /* "bios" reboot by jumping through the BIOS */
  45.329 +            reboot_thru_bios = 1;
  45.330 +            break;
  45.331 +        case 'h': /* "hard" reboot by toggling RESET and/or crashing the CPU */
  45.332 +            reboot_thru_bios = -1;
  45.333 +            break;
  45.334 +#endif
  45.335 +        }
  45.336 +        if ( (str = strchr(str, ',')) == NULL )
  45.337 +            break;
  45.338 +    }
  45.339 +
  45.340 +#ifndef reboot_thru_bios
  45.341 +    dmi_check_system(reboot_dmi_table);
  45.342 +#endif
  45.343 +    return 0;
  45.344 +}
  45.345 +__initcall(reboot_init);
    46.1 --- a/xen/arch/x86/smp.c	Thu Apr 06 00:59:06 2006 +0100
    46.2 +++ b/xen/arch/x86/smp.c	Thu Apr 06 00:59:18 2006 +0100
    46.3 @@ -20,7 +20,7 @@
    46.4  #include <asm/flushtlb.h>
    46.5  #include <asm/smpboot.h>
    46.6  #include <asm/hardirq.h>
    46.7 -#include <asm/mach_ipi.h>
    46.8 +#include <asm/ipi.h>
    46.9  #include <mach_apic.h>
   46.10  
   46.11  /*
   46.12 @@ -67,7 +67,7 @@
   46.13  
   46.14  static inline int __prepare_ICR (unsigned int shortcut, int vector)
   46.15  {
   46.16 -    return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
   46.17 +    return APIC_DM_FIXED | shortcut | vector;
   46.18  }
   46.19  
   46.20  static inline int __prepare_ICR2 (unsigned int mask)
   46.21 @@ -85,7 +85,7 @@ static inline void check_IPI_mask(cpumas
   46.22      ASSERT(!cpus_empty(cpumask));
   46.23  }
   46.24  
   46.25 -void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
   46.26 +void send_IPI_mask_flat(cpumask_t cpumask, int vector)
   46.27  {
   46.28      unsigned long mask = cpus_addr(cpumask)[0];
   46.29      unsigned long cfg;
   46.30 @@ -99,18 +99,18 @@ void send_IPI_mask_bitmask(cpumask_t cpu
   46.31       * Wait for idle.
   46.32       */
   46.33      apic_wait_icr_idle();
   46.34 -		
   46.35 +
   46.36      /*
   46.37       * prepare target chip field
   46.38       */
   46.39      cfg = __prepare_ICR2(mask);
   46.40      apic_write_around(APIC_ICR2, cfg);
   46.41 -		
   46.42 +
   46.43      /*
   46.44       * program the ICR
   46.45       */
   46.46 -    cfg = __prepare_ICR(0, vector);
   46.47 -			
   46.48 +    cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;
   46.49 +
   46.50      /*
   46.51       * Send the IPI. The write to APIC_ICR fires this off.
   46.52       */
   46.53 @@ -119,7 +119,7 @@ void send_IPI_mask_bitmask(cpumask_t cpu
   46.54      local_irq_restore(flags);
   46.55  }
   46.56  
   46.57 -void send_IPI_mask_sequence(cpumask_t mask, int vector)
   46.58 +void send_IPI_mask_phys(cpumask_t mask, int vector)
   46.59  {
   46.60      unsigned long cfg, flags;
   46.61      unsigned int query_cpu;
   46.62 @@ -140,18 +140,18 @@ void send_IPI_mask_sequence(cpumask_t ma
   46.63           * Wait for idle.
   46.64           */
   46.65          apic_wait_icr_idle();
   46.66 -		
   46.67 +
   46.68          /*
   46.69           * prepare target chip field
   46.70           */
   46.71 -        cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
   46.72 +        cfg = __prepare_ICR2(cpu_physical_id(query_cpu));
   46.73          apic_write_around(APIC_ICR2, cfg);
   46.74 -		
   46.75 +
   46.76          /*
   46.77           * program the ICR
   46.78           */
   46.79 -        cfg = __prepare_ICR(0, vector);
   46.80 -			
   46.81 +        cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL;
   46.82 +
   46.83          /*
   46.84           * Send the IPI. The write to APIC_ICR fires this off.
   46.85           */
    47.1 --- a/xen/arch/x86/smpboot.c	Thu Apr 06 00:59:06 2006 +0100
    47.2 +++ b/xen/arch/x86/smpboot.c	Thu Apr 06 00:59:18 2006 +0100
    47.3 @@ -1094,7 +1094,7 @@ static void __init smp_boot_cpus(unsigne
    47.4  		if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
    47.5  			continue;
    47.6  
    47.7 -		if (!check_apicid_present(bit))
    47.8 +		if (!check_apicid_present(apicid))
    47.9  			continue;
   47.10  		if (max_cpus <= cpucount+1)
   47.11  			continue;
    48.1 --- a/xen/arch/x86/time.c	Thu Apr 06 00:59:06 2006 +0100
    48.2 +++ b/xen/arch/x86/time.c	Thu Apr 06 00:59:18 2006 +0100
    48.3 @@ -670,7 +670,7 @@ static inline void version_update_end(u3
    48.4      (*version)++;
    48.5  }
    48.6  
    48.7 -static inline void __update_dom_time(struct vcpu *v)
    48.8 +static inline void __update_vcpu_system_time(struct vcpu *v)
    48.9  {
   48.10      struct cpu_time       *t;
   48.11      struct vcpu_time_info *u;
   48.12 @@ -688,11 +688,21 @@ static inline void __update_dom_time(str
   48.13      version_update_end(&u->version);
   48.14  }
   48.15  
   48.16 -void update_dom_time(struct vcpu *v)
   48.17 +void update_vcpu_system_time(struct vcpu *v)
   48.18  {
   48.19      if ( v->domain->shared_info->vcpu_info[v->vcpu_id].time.tsc_timestamp != 
   48.20           cpu_time[smp_processor_id()].local_tsc_stamp )
   48.21 -        __update_dom_time(v);
   48.22 +        __update_vcpu_system_time(v);
   48.23 +}
   48.24 +
   48.25 +void update_domain_wallclock_time(struct domain *d)
   48.26 +{
   48.27 +    spin_lock(&wc_lock);
   48.28 +    version_update_begin(&d->shared_info->wc_version);
   48.29 +    d->shared_info->wc_sec  = wc_sec;
   48.30 +    d->shared_info->wc_nsec = wc_nsec;
   48.31 +    version_update_end(&d->shared_info->wc_version);
   48.32 +    spin_unlock(&wc_lock);
   48.33  }
   48.34  
   48.35  /* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
   48.36 @@ -701,40 +711,21 @@ void do_settime(unsigned long secs, unsi
   48.37      u64 x;
   48.38      u32 y, _wc_sec, _wc_nsec;
   48.39      struct domain *d;
   48.40 -    shared_info_t *s;
   48.41  
   48.42      x = (secs * 1000000000ULL) + (u64)nsecs - system_time_base;
   48.43      y = do_div(x, 1000000000);
   48.44  
   48.45 +    spin_lock(&wc_lock);
   48.46      wc_sec  = _wc_sec  = (u32)x;
   48.47      wc_nsec = _wc_nsec = (u32)y;
   48.48 +    spin_unlock(&wc_lock);
   48.49  
   48.50      read_lock(&domlist_lock);
   48.51 -    spin_lock(&wc_lock);
   48.52 -
   48.53      for_each_domain ( d )
   48.54 -    {
   48.55 -        s = d->shared_info;
   48.56 -        version_update_begin(&s->wc_version);
   48.57 -        s->wc_sec  = _wc_sec;
   48.58 -        s->wc_nsec = _wc_nsec;
   48.59 -        version_update_end(&s->wc_version);
   48.60 -    }
   48.61 -
   48.62 -    spin_unlock(&wc_lock);
   48.63 +        update_domain_wallclock_time(d);
   48.64      read_unlock(&domlist_lock);
   48.65  }
   48.66  
   48.67 -void init_domain_time(struct domain *d)
   48.68 -{
   48.69 -    spin_lock(&wc_lock);
   48.70 -    version_update_begin(&d->shared_info->wc_version);
   48.71 -    d->shared_info->wc_sec  = wc_sec;
   48.72 -    d->shared_info->wc_nsec = wc_nsec;
   48.73 -    version_update_end(&d->shared_info->wc_version);
   48.74 -    spin_unlock(&wc_lock);
   48.75 -}
   48.76 -
   48.77  static void local_time_calibration(void *unused)
   48.78  {
   48.79      unsigned int cpu = smp_processor_id();
   48.80 @@ -925,7 +916,7 @@ void __init early_time_init(void)
   48.81  
   48.82  void send_timer_event(struct vcpu *v)
   48.83  {
   48.84 -    send_guest_virq(v, VIRQ_TIMER);
   48.85 +    send_guest_vcpu_virq(v, VIRQ_TIMER);
   48.86  }
   48.87  
   48.88  /*
    49.1 --- a/xen/arch/x86/traps.c	Thu Apr 06 00:59:06 2006 +0100
    49.2 +++ b/xen/arch/x86/traps.c	Thu Apr 06 00:59:18 2006 +0100
    49.3 @@ -138,13 +138,13 @@ static void show_guest_stack(struct cpu_
    49.4      if ( vm86_mode(regs) )
    49.5      {
    49.6          stack = (unsigned long *)((regs->ss << 4) + (regs->esp & 0xffff));
    49.7 -        printk("Guest stack trace from ss:sp = %04x:%04x (VM86)\n   ",
    49.8 +        printk("Guest stack trace from ss:sp = %04x:%04x (VM86)\n  ",
    49.9                 regs->ss, (uint16_t)(regs->esp & 0xffff));
   49.10      }
   49.11      else
   49.12      {
   49.13          stack = (unsigned long *)regs->esp;
   49.14 -        printk("Guest stack trace from "__OP"sp=%p:\n   ", stack);
   49.15 +        printk("Guest stack trace from "__OP"sp=%p:\n  ", stack);
   49.16      }
   49.17  
   49.18      for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ )
   49.19 @@ -160,8 +160,8 @@ static void show_guest_stack(struct cpu_
   49.20              break;
   49.21          }
   49.22          if ( (i != 0) && ((i % stack_words_per_line) == 0) )
   49.23 -            printk("\n   ");
   49.24 -        printk("%p ", _p(addr));
   49.25 +            printk("\n  ");
   49.26 +        printk(" %p", _p(addr));
   49.27          stack++;
   49.28      }
   49.29      if ( i == 0 )
   49.30 @@ -257,16 +257,16 @@ void show_stack(struct cpu_user_regs *re
   49.31      if ( guest_mode(regs) )
   49.32          return show_guest_stack(regs);
   49.33  
   49.34 -    printk("Xen stack trace from "__OP"sp=%p:\n   ", stack);
   49.35 +    printk("Xen stack trace from "__OP"sp=%p:\n  ", stack);
   49.36  
   49.37      for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ )
   49.38      {
   49.39          if ( ((long)stack & (STACK_SIZE-BYTES_PER_LONG)) == 0 )
   49.40              break;
   49.41          if ( (i != 0) && ((i % stack_words_per_line) == 0) )
   49.42 -            printk("\n   ");
   49.43 +            printk("\n  ");
   49.44          addr = *stack++;
   49.45 -        printk("%p ", _p(addr));
   49.46 +        printk(" %p", _p(addr));
   49.47      }
   49.48      if ( i == 0 )
   49.49          printk("Stack empty.");
    50.1 --- a/xen/arch/x86/x86_32/Makefile	Thu Apr 06 00:59:06 2006 +0100
    50.2 +++ b/xen/arch/x86/x86_32/Makefile	Thu Apr 06 00:59:18 2006 +0100
    50.3 @@ -1,5 +1,3 @@
    50.4 -include $(BASEDIR)/Rules.mk
    50.5 -
    50.6  obj-y += domain_page.o
    50.7  obj-y += entry.o
    50.8  obj-y += mm.o
    50.9 @@ -7,5 +5,3 @@ obj-y += seg_fixup.o
   50.10  obj-y += traps.o
   50.11  
   50.12  obj-$(supervisor_mode_kernel) += supervisor_mode_kernel.o
   50.13 -
   50.14 -include $(BASEDIR)/Post.mk
    51.1 --- a/xen/arch/x86/x86_32/entry.S	Thu Apr 06 00:59:06 2006 +0100
    51.2 +++ b/xen/arch/x86/x86_32/entry.S	Thu Apr 06 00:59:18 2006 +0100
    51.3 @@ -561,7 +561,7 @@ 1:      movl  %ss:APIC_ICR(%eax),%ebx
    51.4          testl $APIC_ICR_BUSY,%ebx
    51.5          jnz   1b
    51.6          # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
    51.7 -        movl  $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \
    51.8 +        movl  $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_PHYSICAL | \
    51.9                  TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
   51.10          jmp   restore_all_xen
   51.11  #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
    52.1 --- a/xen/arch/x86/x86_64/Makefile	Thu Apr 06 00:59:06 2006 +0100
    52.2 +++ b/xen/arch/x86/x86_64/Makefile	Thu Apr 06 00:59:18 2006 +0100
    52.3 @@ -1,7 +1,3 @@
    52.4 -include $(BASEDIR)/Rules.mk
    52.5 -
    52.6  obj-y += entry.o
    52.7  obj-y += mm.o
    52.8  obj-y += traps.o
    52.9 -
   52.10 -include $(BASEDIR)/Post.mk
    53.1 --- a/xen/common/Makefile	Thu Apr 06 00:59:06 2006 +0100
    53.2 +++ b/xen/common/Makefile	Thu Apr 06 00:59:18 2006 +0100
    53.3 @@ -1,5 +1,3 @@
    53.4 -include $(BASEDIR)/Rules.mk
    53.5 -
    53.6  obj-y += acm_ops.o
    53.7  obj-y += bitmap.o
    53.8  obj-y += dom0_ops.o
    53.9 @@ -28,7 +26,5 @@ obj-y += xmalloc.o
   53.10  obj-$(perfc)       += perfc.o
   53.11  obj-$(crash_debug) += gdbstub.o
   53.12  
   53.13 -include $(BASEDIR)/Post.mk
   53.14 -
   53.15  # Object file contains changeset and compiler information.
   53.16  kernel.o: $(BASEDIR)/include/xen/compile.h
    54.1 --- a/xen/common/domain.c	Thu Apr 06 00:59:06 2006 +0100
    54.2 +++ b/xen/common/domain.c	Thu Apr 06 00:59:18 2006 +0100
    54.3 @@ -137,7 +137,7 @@ void domain_kill(struct domain *d)
    54.4          domain_relinquish_resources(d);
    54.5          put_domain(d);
    54.6  
    54.7 -        send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
    54.8 +        send_guest_global_virq(dom0, VIRQ_DOM_EXC);
    54.9      }
   54.10  }
   54.11  
   54.12 @@ -192,7 +192,7 @@ static void domain_shutdown_finalise(voi
   54.13  
   54.14      /* Don't set DOMF_shutdown until execution contexts are sync'ed. */
   54.15      if ( !test_and_set_bit(_DOMF_shutdown, &d->domain_flags) )
   54.16 -        send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
   54.17 +        send_guest_global_virq(dom0, VIRQ_DOM_EXC);
   54.18  
   54.19      UNLOCK_BIGLOCK(d);
   54.20  
   54.21 @@ -267,7 +267,7 @@ void domain_pause_for_debugger(void)
   54.22      for_each_vcpu ( d, v )
   54.23          vcpu_sleep_nosync(v);
   54.24  
   54.25 -    send_guest_virq(dom0->vcpu[0], VIRQ_DEBUGGER);
   54.26 +    send_guest_global_virq(dom0, VIRQ_DEBUGGER);
   54.27  }
   54.28  
   54.29  
   54.30 @@ -307,7 +307,7 @@ void domain_destroy(struct domain *d)
   54.31  
   54.32      free_domain(d);
   54.33  
   54.34 -    send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
   54.35 +    send_guest_global_virq(dom0, VIRQ_DOM_EXC);
   54.36  }
   54.37  
   54.38  void vcpu_pause(struct vcpu *v)
    55.1 --- a/xen/common/event_channel.c	Thu Apr 06 00:59:06 2006 +0100
    55.2 +++ b/xen/common/event_channel.c	Thu Apr 06 00:59:18 2006 +0100
    55.3 @@ -3,7 +3,7 @@
    55.4   * 
    55.5   * Event notifications from VIRQs, PIRQs, and other domains.
    55.6   * 
    55.7 - * Copyright (c) 2003-2005, K A Fraser.
    55.8 + * Copyright (c) 2003-2006, K A Fraser.
    55.9   * 
   55.10   * This program is distributed in the hope that it will be useful,
   55.11   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   55.12 @@ -46,6 +46,28 @@
   55.13          goto out;                                                   \
   55.14      } while ( 0 )
   55.15  
   55.16 +
   55.17 +static int virq_is_global(int virq)
   55.18 +{
   55.19 +    int rc;
   55.20 +
   55.21 +    ASSERT((virq >= 0) && (virq < NR_VIRQS));
   55.22 +
   55.23 +    switch ( virq )
   55.24 +    {
   55.25 +    case VIRQ_TIMER:
   55.26 +    case VIRQ_DEBUG:
   55.27 +        rc = 0;
   55.28 +        break;
   55.29 +    default:
   55.30 +        rc = 1;
   55.31 +        break;
   55.32 +    }
   55.33 +
   55.34 +    return rc;
   55.35 +}
   55.36 +
   55.37 +
   55.38  static int get_free_port(struct domain *d)
   55.39  {
   55.40      struct evtchn *chn;
   55.41 @@ -181,6 +203,9 @@ static long evtchn_bind_virq(evtchn_bind
   55.42      if ( virq >= ARRAY_SIZE(v->virq_to_evtchn) )
   55.43          return -EINVAL;
   55.44  
   55.45 +    if ( virq_is_global(virq) && (vcpu != 0) )
   55.46 +        return -EINVAL;
   55.47 +
   55.48      if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || ((v = d->vcpu[vcpu]) == NULL) )
   55.49          return -ENOENT;
   55.50  
   55.51 @@ -360,7 +385,7 @@ static long __evtchn_close(struct domain
   55.52              rc = -EINVAL;
   55.53              goto out;
   55.54          }
   55.55 -    
   55.56 +
   55.57          port2 = chn1->u.interdomain.remote_port;
   55.58          BUG_ON(!port_is_valid(d2, port2));
   55.59  
   55.60 @@ -438,6 +463,7 @@ long evtchn_send(unsigned int lport)
   55.61      return ret;
   55.62  }
   55.63  
   55.64 +
   55.65  void evtchn_set_pending(struct vcpu *v, int port)
   55.66  {
   55.67      struct domain *d = v->domain;
   55.68 @@ -471,21 +497,48 @@ void evtchn_set_pending(struct vcpu *v, 
   55.69      }
   55.70  }
   55.71  
   55.72 -void send_guest_virq(struct vcpu *v, int virq)
   55.73 +
   55.74 +void send_guest_vcpu_virq(struct vcpu *v, int virq)
   55.75  {
   55.76 -    int port = v->virq_to_evtchn[virq];
   55.77 +    int port;
   55.78 +
   55.79 +    ASSERT(!virq_is_global(virq));
   55.80 +
   55.81 +    port = v->virq_to_evtchn[virq];
   55.82 +    if ( unlikely(port == 0) )
   55.83 +        return;
   55.84 +
   55.85 +    evtchn_set_pending(v, port);
   55.86 +}
   55.87  
   55.88 -    if ( likely(port != 0) )
   55.89 -        evtchn_set_pending(v, port);
   55.90 +void send_guest_global_virq(struct domain *d, int virq)
   55.91 +{
   55.92 +    int port;
   55.93 +    struct evtchn *chn;
   55.94 +
   55.95 +    ASSERT(virq_is_global(virq));
   55.96 +
   55.97 +    port = d->vcpu[0]->virq_to_evtchn[virq];
   55.98 +    if ( unlikely(port == 0) )
   55.99 +        return;
  55.100 +
  55.101 +    chn = evtchn_from_port(d, port);
  55.102 +    evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
  55.103  }
  55.104  
  55.105 +
  55.106  void send_guest_pirq(struct domain *d, int pirq)
  55.107  {
  55.108      int port = d->pirq_to_evtchn[pirq];
  55.109 -    struct evtchn *chn = evtchn_from_port(d, port);
  55.110 +    struct evtchn *chn;
  55.111 +
  55.112 +    ASSERT(port != 0);
  55.113 +
  55.114 +    chn = evtchn_from_port(d, port);
  55.115      evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
  55.116  }
  55.117  
  55.118 +
  55.119  static long evtchn_status(evtchn_status_t *status)
  55.120  {
  55.121      struct domain   *d;
  55.122 @@ -550,6 +603,7 @@ static long evtchn_status(evtchn_status_
  55.123      return rc;
  55.124  }
  55.125  
  55.126 +
  55.127  long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
  55.128  {
  55.129      struct domain *d = current->domain;
  55.130 @@ -570,6 +624,12 @@ long evtchn_bind_vcpu(unsigned int port,
  55.131      chn = evtchn_from_port(d, port);
  55.132      switch ( chn->state )
  55.133      {
  55.134 +    case ECS_VIRQ:
  55.135 +        if ( virq_is_global(chn->u.virq) )
  55.136 +            chn->notify_vcpu_id = vcpu_id;
  55.137 +        else
  55.138 +            rc = -EINVAL;
  55.139 +        break;
  55.140      case ECS_UNBOUND:
  55.141      case ECS_INTERDOMAIN:
  55.142      case ECS_PIRQ:
  55.143 @@ -585,6 +645,7 @@ long evtchn_bind_vcpu(unsigned int port,
  55.144      return rc;
  55.145  }
  55.146  
  55.147 +
  55.148  static long evtchn_unmask(evtchn_unmask_t *unmask)
  55.149  {
  55.150      struct domain *d = current->domain;
  55.151 @@ -620,6 +681,7 @@ static long evtchn_unmask(evtchn_unmask_
  55.152      return 0;
  55.153  }
  55.154  
  55.155 +
  55.156  long do_event_channel_op(GUEST_HANDLE(evtchn_op_t) uop)
  55.157  {
  55.158      long rc;
  55.159 @@ -694,6 +756,13 @@ long do_event_channel_op(GUEST_HANDLE(ev
  55.160  }
  55.161  
  55.162  
  55.163 +void evtchn_notify_reserved_port(struct domain *d, int port)
  55.164 +{
  55.165 +    struct evtchn *chn = evtchn_from_port(d, port);
  55.166 +    evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
  55.167 +}
  55.168 +
  55.169 +
  55.170  int evtchn_init(struct domain *d)
  55.171  {
  55.172      spin_lock_init(&d->evtchn_lock);
    56.1 --- a/xen/common/grant_table.c	Thu Apr 06 00:59:06 2006 +0100
    56.2 +++ b/xen/common/grant_table.c	Thu Apr 06 00:59:18 2006 +0100
    56.3 @@ -618,8 +618,10 @@ gnttab_transfer(
    56.4              return -EFAULT;
    56.5          }
    56.6  
    56.7 +        mfn = gmfn_to_mfn(d, gop.mfn);
    56.8 +
    56.9          /* Check the passed page frame for basic validity. */
   56.10 -        if ( unlikely(!mfn_valid(gop.mfn)) )
   56.11 +        if ( unlikely(!mfn_valid(mfn)) )
   56.12          { 
   56.13              DPRINTK("gnttab_transfer: out-of-range %lx\n",
   56.14                      (unsigned long)gop.mfn);
   56.15 @@ -627,7 +629,6 @@ gnttab_transfer(
   56.16              goto copyback;
   56.17          }
   56.18  
   56.19 -        mfn = gmfn_to_mfn(d, gop.mfn);
   56.20          page = mfn_to_page(mfn);
   56.21          if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
   56.22          { 
    57.1 --- a/xen/common/keyhandler.c	Thu Apr 06 00:59:06 2006 +0100
    57.2 +++ b/xen/common/keyhandler.c	Thu Apr 06 00:59:18 2006 +0100
    57.3 @@ -162,7 +162,7 @@ static void dump_domains(unsigned char k
    57.4                              &d->shared_info->evtchn_mask[0]),
    57.5                     test_bit(v->virq_to_evtchn[VIRQ_DEBUG]/BITS_PER_LONG, 
    57.6                              &v->vcpu_info->evtchn_pending_sel));
    57.7 -            send_guest_virq(v, VIRQ_DEBUG);
    57.8 +            send_guest_vcpu_virq(v, VIRQ_DEBUG);
    57.9          }
   57.10      }
   57.11  
    58.1 --- a/xen/common/page_alloc.c	Thu Apr 06 00:59:06 2006 +0100
    58.2 +++ b/xen/common/page_alloc.c	Thu Apr 06 00:59:18 2006 +0100
    58.3 @@ -170,7 +170,7 @@ paddr_t init_boot_allocator(paddr_t bitm
    58.4  
    58.5  void init_boot_pages(paddr_t ps, paddr_t pe)
    58.6  {
    58.7 -    unsigned long bad_pfn;
    58.8 +    unsigned long bad_spfn, bad_epfn, i;
    58.9      char *p;
   58.10  
   58.11      ps = round_pgup(ps);
   58.12 @@ -184,18 +184,31 @@ void init_boot_pages(paddr_t ps, paddr_t
   58.13      p = opt_badpage;
   58.14      while ( *p != '\0' )
   58.15      {
   58.16 -        bad_pfn = simple_strtoul(p, &p, 0);
   58.17 +        bad_spfn = simple_strtoul(p, &p, 0);
   58.18 +        bad_epfn = bad_spfn;
   58.19 +
   58.20 +        if ( *p == '-' )
   58.21 +        {
   58.22 +            p++;
   58.23 +            bad_epfn = simple_strtoul(p, &p, 0);
   58.24 +            if ( bad_epfn < bad_spfn )
   58.25 +                bad_epfn = bad_spfn;
   58.26 +        }
   58.27  
   58.28          if ( *p == ',' )
   58.29              p++;
   58.30          else if ( *p != '\0' )
   58.31              break;
   58.32  
   58.33 -        if ( (bad_pfn < max_page) && !allocated_in_map(bad_pfn) )
   58.34 -        {
   58.35 -            printk("Marking page %lx as bad\n", bad_pfn);
   58.36 -            map_alloc(bad_pfn, 1);
   58.37 -        }
   58.38 +        if ( bad_epfn == bad_spfn )
   58.39 +            printk("Marking page %lx as bad\n", bad_spfn);
   58.40 +        else
   58.41 +            printk("Marking pages %lx through %lx as bad\n",
   58.42 +                   bad_spfn, bad_epfn);
   58.43 +
   58.44 +        for ( i = bad_spfn; i <= bad_epfn; i++ )
   58.45 +            if ( (i < max_page) && !allocated_in_map(i) )
   58.46 +                map_alloc(i, 1);
   58.47      }
   58.48  }
   58.49  
    59.1 --- a/xen/common/schedule.c	Thu Apr 06 00:59:06 2006 +0100
    59.2 +++ b/xen/common/schedule.c	Thu Apr 06 00:59:18 2006 +0100
    59.3 @@ -572,7 +572,7 @@ static void __enter_scheduler(void)
    59.4      /* Ensure that the domain has an up-to-date time base. */
    59.5      if ( !is_idle_vcpu(next) )
    59.6      {
    59.7 -        update_dom_time(next);
    59.8 +        update_vcpu_system_time(next);
    59.9          if ( next->sleep_tick != schedule_data[cpu].tick )
   59.10              send_timer_event(next);
   59.11      }
   59.12 @@ -609,7 +609,7 @@ static void t_timer_fn(void *unused)
   59.13  
   59.14      if ( !is_idle_vcpu(v) )
   59.15      {
   59.16 -        update_dom_time(v);
   59.17 +        update_vcpu_system_time(v);
   59.18          send_timer_event(v);
   59.19      }
   59.20  
   59.21 @@ -623,7 +623,7 @@ static void dom_timer_fn(void *data)
   59.22  {
   59.23      struct vcpu *v = data;
   59.24  
   59.25 -    update_dom_time(v);
   59.26 +    update_vcpu_system_time(v);
   59.27      send_timer_event(v);
   59.28  }
   59.29  
    60.1 --- a/xen/drivers/Makefile	Thu Apr 06 00:59:06 2006 +0100
    60.2 +++ b/xen/drivers/Makefile	Thu Apr 06 00:59:18 2006 +0100
    60.3 @@ -1,6 +1,2 @@
    60.4 -include $(BASEDIR)/Rules.mk
    60.5 -
    60.6  subdir-y += char
    60.7  subdir-$(HAS_ACPI) += acpi
    60.8 -
    60.9 -include $(BASEDIR)/Post.mk
    61.1 --- a/xen/drivers/acpi/Makefile	Thu Apr 06 00:59:06 2006 +0100
    61.2 +++ b/xen/drivers/acpi/Makefile	Thu Apr 06 00:59:18 2006 +0100
    61.3 @@ -1,5 +1,1 @@
    61.4 -include $(BASEDIR)/Rules.mk
    61.5 -
    61.6  obj-y += tables.o
    61.7 -
    61.8 -include $(BASEDIR)/Post.mk
    62.1 --- a/xen/drivers/char/Makefile	Thu Apr 06 00:59:06 2006 +0100
    62.2 +++ b/xen/drivers/char/Makefile	Thu Apr 06 00:59:18 2006 +0100
    62.3 @@ -1,10 +1,6 @@
    62.4 -include $(BASEDIR)/Rules.mk
    62.5 -
    62.6  obj-y += console.o
    62.7  obj-y += ns16550.o
    62.8  obj-y += serial.o
    62.9  
   62.10 -include $(BASEDIR)/Post.mk
   62.11 -
   62.12  # Object file contains changeset and compiler information.
   62.13  console.o: $(BASEDIR)/include/xen/compile.h
    63.1 --- a/xen/drivers/char/console.c	Thu Apr 06 00:59:06 2006 +0100
    63.2 +++ b/xen/drivers/char/console.c	Thu Apr 06 00:59:18 2006 +0100
    63.3 @@ -200,10 +200,11 @@ static void putchar_console(int c)
    63.4      }
    63.5      else
    63.6      {
    63.7 +        if ( xpos >= COLUMNS )
    63.8 +            put_newline();
    63.9          video[(xpos + ypos * COLUMNS) * 2]     = c & 0xFF;
   63.10          video[(xpos + ypos * COLUMNS) * 2 + 1] = ATTRIBUTE;
   63.11 -        if ( ++xpos >= COLUMNS )
   63.12 -            put_newline();
   63.13 +        ++xpos;
   63.14      }
   63.15  }
   63.16  
   63.17 @@ -293,7 +294,7 @@ static void __serial_rx(char c, struct c
   63.18      if ( (serial_rx_prod-serial_rx_cons) != SERIAL_RX_SIZE )
   63.19          serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod++)] = c;
   63.20      /* Always notify the guest: prevents receive path from getting stuck. */
   63.21 -    send_guest_virq(dom0->vcpu[0], VIRQ_CONSOLE);
   63.22 +    send_guest_global_virq(dom0, VIRQ_CONSOLE);
   63.23  }
   63.24  
   63.25  static void serial_rx(char c, struct cpu_user_regs *regs)
    64.1 --- a/xen/include/asm-x86/apicdef.h	Thu Apr 06 00:59:06 2006 +0100
    64.2 +++ b/xen/include/asm-x86/apicdef.h	Thu Apr 06 00:59:18 2006 +0100
    64.3 @@ -62,6 +62,7 @@
    64.4  #define			APIC_INT_ASSERT		0x04000
    64.5  #define			APIC_ICR_BUSY		0x01000
    64.6  #define			APIC_DEST_LOGICAL	0x00800
    64.7 +#define			APIC_DEST_PHYSICAL	0x00000
    64.8  #define			APIC_DM_FIXED		0x00000
    64.9  #define			APIC_DM_LOWEST		0x00100
   64.10  #define			APIC_DM_SMI		0x00200
    65.1 --- a/xen/include/asm-x86/genapic.h	Thu Apr 06 00:59:06 2006 +0100
    65.2 +++ b/xen/include/asm-x86/genapic.h	Thu Apr 06 00:59:18 2006 +0100
    65.3 @@ -21,27 +21,6 @@ struct genapic {
    65.4  	char *name; 
    65.5  	int (*probe)(void); 
    65.6  
    65.7 -	int (*apic_id_registered)(void);
    65.8 -	cpumask_t (*target_cpus)(void);
    65.9 -	int int_delivery_mode;
   65.10 -	int int_dest_mode; 
   65.11 -	int ESR_DISABLE;
   65.12 -	int apic_destination_logical;
   65.13 -	unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
   65.14 -	unsigned long (*check_apicid_present)(int apicid); 
   65.15 -	int no_balance_irq;
   65.16 -	void (*init_apic_ldr)(void);
   65.17 -	physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
   65.18 -
   65.19 -	void (*clustered_apic_check)(void);
   65.20 -	int (*apicid_to_node)(int logical_apicid); 
   65.21 -	int (*cpu_to_logical_apicid)(int cpu);
   65.22 -	int (*cpu_present_to_apicid)(int mps_cpu);
   65.23 -	physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
   65.24 -	int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
   65.25 -	void (*enable_apic_mode)(void);
   65.26 -	u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
   65.27 -
   65.28  	/* When one of the next two hooks returns 1 the genapic
   65.29  	   is switched to this. Essentially they are additional probe 
   65.30  	   functions. */
   65.31 @@ -49,10 +28,14 @@ struct genapic {
   65.32  			      char *productid);
   65.33  	int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
   65.34  
   65.35 +	/* Interrupt delivery parameters ('physical' vs. 'logical flat'). */
   65.36 +	int int_delivery_mode;
   65.37 +	int int_dest_mode; 
   65.38 +	void (*init_apic_ldr)(void);
   65.39 +	void (*clustered_apic_check)(void);
   65.40 +	cpumask_t (*target_cpus)(void);
   65.41  	unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
   65.42 -	
   65.43 -	/* ipi */
   65.44 -	void (*send_ipi_mask)(cpumask_t mask, int vector);
   65.45 +	void (*send_IPI_mask)(cpumask_t mask, int vector);
   65.46  }; 
   65.47  
   65.48  #define APICFUNC(x) .x = x
   65.49 @@ -60,29 +43,37 @@ struct genapic {
   65.50  #define APIC_INIT(aname, aprobe) \
   65.51  	.name = aname, \
   65.52  	.probe = aprobe, \
   65.53 -	.int_delivery_mode = INT_DELIVERY_MODE, \
   65.54 -	.int_dest_mode = INT_DEST_MODE, \
   65.55 -	.no_balance_irq = NO_BALANCE_IRQ, \
   65.56 -	.ESR_DISABLE = esr_disable, \
   65.57 -	.apic_destination_logical = APIC_DEST_LOGICAL, \
   65.58 -	APICFUNC(apic_id_registered), \
   65.59 -	APICFUNC(target_cpus), \
   65.60 -	APICFUNC(check_apicid_used), \
   65.61 -	APICFUNC(check_apicid_present), \
   65.62 -	APICFUNC(init_apic_ldr), \
   65.63 -	APICFUNC(ioapic_phys_id_map), \
   65.64 -	APICFUNC(clustered_apic_check), \
   65.65 -	APICFUNC(apicid_to_node), \
   65.66 -	APICFUNC(cpu_to_logical_apicid), \
   65.67 -	APICFUNC(cpu_present_to_apicid), \
   65.68 -	APICFUNC(apicid_to_cpu_present), \
   65.69 -	APICFUNC(check_phys_apicid_present), \
   65.70  	APICFUNC(mps_oem_check), \
   65.71 -	APICFUNC(cpu_mask_to_apicid), \
   65.72 -	APICFUNC(acpi_madt_oem_check), \
   65.73 -	APICFUNC(enable_apic_mode), \
   65.74 -	APICFUNC(phys_pkg_id)
   65.75 +	APICFUNC(acpi_madt_oem_check)
   65.76  
   65.77  extern struct genapic *genapic;
   65.78  
   65.79 +void init_apic_ldr_flat(void);
   65.80 +void clustered_apic_check_flat(void);
   65.81 +cpumask_t target_cpus_flat(void);
   65.82 +unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask);
   65.83 +void send_IPI_mask_flat(cpumask_t mask, int vector);
   65.84 +#define GENAPIC_FLAT \
   65.85 +	.int_delivery_mode = dest_LowestPrio, \
   65.86 +	.int_dest_mode = 1 /* logical delivery */, \
   65.87 +	.init_apic_ldr = init_apic_ldr_flat, \
   65.88 +	.clustered_apic_check = clustered_apic_check_flat, \
   65.89 +	.target_cpus = target_cpus_flat, \
   65.90 +	.cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \
   65.91 +	.send_IPI_mask = send_IPI_mask_flat
   65.92 +
   65.93 +void init_apic_ldr_phys(void);
   65.94 +void clustered_apic_check_phys(void);
   65.95 +cpumask_t target_cpus_phys(void);
   65.96 +unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask);
   65.97 +void send_IPI_mask_phys(cpumask_t mask, int vector);
   65.98 +#define GENAPIC_PHYS \
   65.99 +	.int_delivery_mode = dest_Fixed, \
  65.100 +	.int_dest_mode = 0 /* physical delivery */, \
  65.101 +	.init_apic_ldr = init_apic_ldr_phys, \
  65.102 +	.clustered_apic_check = clustered_apic_check_phys, \
  65.103 +	.target_cpus = target_cpus_phys, \
  65.104 +	.cpu_mask_to_apicid = cpu_mask_to_apicid_phys, \
  65.105 +	.send_IPI_mask = send_IPI_mask_phys
  65.106 +
  65.107  #endif
    66.1 --- a/xen/include/asm-x86/hvm/io.h	Thu Apr 06 00:59:06 2006 +0100
    66.2 +++ b/xen/include/asm-x86/hvm/io.h	Thu Apr 06 00:59:18 2006 +0100
    66.3 @@ -66,6 +66,7 @@
    66.4  #define INSTR_STOS  10
    66.5  #define INSTR_TEST  11
    66.6  #define INSTR_BT    12
    66.7 +#define INSTR_XCHG  13
    66.8  
    66.9  struct instruction {
   66.10      __s8    instr;        /* instruction type */
    67.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    67.2 +++ b/xen/include/asm-x86/ipi.h	Thu Apr 06 00:59:18 2006 +0100
    67.3 @@ -0,0 +1,8 @@
    67.4 +#ifndef __ASM_IPI_H
    67.5 +#define __ASM_IPI_H
    67.6 +
    67.7 +#include <asm/genapic.h>
    67.8 +
    67.9 +#define send_IPI_mask (genapic->send_IPI_mask)
   67.10 +
   67.11 +#endif /* __ASM_IPI_H */
    68.1 --- a/xen/include/asm-x86/mach-bigsmp/mach_apic.h	Thu Apr 06 00:59:06 2006 +0100
    68.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    68.3 @@ -1,138 +0,0 @@
    68.4 -#ifndef __ASM_MACH_APIC_H
    68.5 -#define __ASM_MACH_APIC_H
    68.6 -
    68.7 -
    68.8 -extern u8 bios_cpu_apicid[];
    68.9 -
   68.10 -#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
   68.11 -#define esr_disable (1)
   68.12 -
   68.13 -static inline int apic_id_registered(void)
   68.14 -{
   68.15 -	return (1);
   68.16 -}
   68.17 -
   68.18 -/* Round robin the irqs amoung the online cpus */
   68.19 -static inline cpumask_t target_cpus(void)
   68.20 -{ 
   68.21 -	static unsigned long cpu = NR_CPUS;
   68.22 -	do {
   68.23 -		if (cpu >= NR_CPUS)
   68.24 -			cpu = first_cpu(cpu_online_map);
   68.25 -		else
   68.26 -			cpu = next_cpu(cpu, cpu_online_map);
   68.27 -	} while (cpu >= NR_CPUS);
   68.28 -	return cpumask_of_cpu(cpu);
   68.29 -}
   68.30 -
   68.31 -#undef APIC_DEST_LOGICAL
   68.32 -#define APIC_DEST_LOGICAL 	0
   68.33 -#define TARGET_CPUS		(target_cpus())
   68.34 -#define APIC_DFR_VALUE		(APIC_DFR_FLAT)
   68.35 -#define INT_DELIVERY_MODE	(dest_Fixed)
   68.36 -#define INT_DEST_MODE		(0)    /* phys delivery to target proc */
   68.37 -#define NO_BALANCE_IRQ		(0)
   68.38 -#define WAKE_SECONDARY_VIA_INIT
   68.39 -
   68.40 -
   68.41 -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
   68.42 -{
   68.43 -	return (0);
   68.44 -}
   68.45 -
   68.46 -static inline unsigned long check_apicid_present(int bit)
   68.47 -{
   68.48 -	return (1);
   68.49 -}
   68.50 -
   68.51 -static inline unsigned long calculate_ldr(int cpu)
   68.52 -{
   68.53 -	unsigned long val, id;
   68.54 -	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
   68.55 -	id = xapic_phys_to_log_apicid(cpu);
   68.56 -	val |= SET_APIC_LOGICAL_ID(id);
   68.57 -	return val;
   68.58 -}
   68.59 -
   68.60 -/*
   68.61 - * Set up the logical destination ID.
   68.62 - *
   68.63 - * Intel recommends to set DFR, LDR and TPR before enabling
   68.64 - * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
   68.65 - * document number 292116).  So here it goes...
   68.66 - */
   68.67 -static inline void init_apic_ldr(void)
   68.68 -{
   68.69 -	unsigned long val;
   68.70 -	int cpu = smp_processor_id();
   68.71 -
   68.72 -	apic_write_around(APIC_DFR, APIC_DFR_VALUE);
   68.73 -	val = calculate_ldr(cpu);
   68.74 -	apic_write_around(APIC_LDR, val);
   68.75 -}
   68.76 -
   68.77 -static inline void clustered_apic_check(void)
   68.78 -{
   68.79 -	printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
   68.80 -		"Physflat", nr_ioapics);
   68.81 -}
   68.82 -
   68.83 -static inline int apicid_to_node(int logical_apicid)
   68.84 -{
   68.85 -	return (0);
   68.86 -}
   68.87 -
   68.88 -static inline int cpu_present_to_apicid(int mps_cpu)
   68.89 -{
   68.90 -	if (mps_cpu < NR_CPUS)
   68.91 -		return (int) bios_cpu_apicid[mps_cpu];
   68.92 -
   68.93 -	return BAD_APICID;
   68.94 -}
   68.95 -
   68.96 -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
   68.97 -{
   68.98 -	return physid_mask_of_physid(phys_apicid);
   68.99 -}
  68.100 -
  68.101 -extern u8 cpu_2_logical_apicid[];
  68.102 -/* Mapping from cpu number to logical apicid */
  68.103 -static inline int cpu_to_logical_apicid(int cpu)
  68.104 -{
  68.105 -	if (cpu >= NR_CPUS)
  68.106 -		return BAD_APICID;
  68.107 -	return cpu_physical_id(cpu);
  68.108 -}
  68.109 -
  68.110 -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
  68.111 -{
  68.112 -	/* For clustered we don't have a good way to do this yet - hack */
  68.113 -	return physids_promote(0xFFL);
  68.114 -}
  68.115 -
  68.116 -static inline void enable_apic_mode(void)
  68.117 -{
  68.118 -}
  68.119 -
  68.120 -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
  68.121 -{
  68.122 -	return (1);
  68.123 -}
  68.124 -
  68.125 -/* As we are using single CPU as destination, pick only one CPU here */
  68.126 -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
  68.127 -{
  68.128 -	int cpu;
  68.129 -	int apicid;	
  68.130 -
  68.131 -	cpu = first_cpu(cpumask);
  68.132 -	apicid = cpu_to_logical_apicid(cpu);
  68.133 -	return apicid;
  68.134 -}
  68.135 -
  68.136 -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  68.137 -{
  68.138 -	return cpuid_apic >> index_msb;
  68.139 -}
  68.140 -
  68.141 -#endif /* __ASM_MACH_APIC_H */
    69.1 --- a/xen/include/asm-x86/mach-default/mach_apic.h	Thu Apr 06 00:59:06 2006 +0100
    69.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    69.3 @@ -1,110 +0,0 @@
    69.4 -#ifndef __ASM_MACH_APIC_H
    69.5 -#define __ASM_MACH_APIC_H
    69.6 -
    69.7 -#include <asm/smp.h>
    69.8 -
    69.9 -#define APIC_DFR_VALUE	(APIC_DFR_FLAT)
   69.10 -
   69.11 -static inline cpumask_t target_cpus(void)
   69.12 -{ 
   69.13 -#ifdef CONFIG_SMP
   69.14 -	return cpu_online_map;
   69.15 -#else
   69.16 -	return cpumask_of_cpu(0);
   69.17 -#endif
   69.18 -} 
   69.19 -#define TARGET_CPUS (target_cpus())
   69.20 -
   69.21 -#define NO_BALANCE_IRQ (0)
   69.22 -#define esr_disable (0)
   69.23 -
   69.24 -#define INT_DELIVERY_MODE dest_LowestPrio
   69.25 -#define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
   69.26 -
   69.27 -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
   69.28 -{
   69.29 -	return physid_isset(apicid, bitmap);
   69.30 -}
   69.31 -
   69.32 -static inline unsigned long check_apicid_present(int bit)
   69.33 -{
   69.34 -	return physid_isset(bit, phys_cpu_present_map);
   69.35 -}
   69.36 -
   69.37 -/*
   69.38 - * Set up the logical destination ID.
   69.39 - *
   69.40 - * Intel recommends to set DFR, LDR and TPR before enabling
   69.41 - * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
   69.42 - * document number 292116).  So here it goes...
   69.43 - */
   69.44 -static inline void init_apic_ldr(void)
   69.45 -{
   69.46 -	unsigned long val;
   69.47 -
   69.48 -	apic_write_around(APIC_DFR, APIC_DFR_VALUE);
   69.49 -	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
   69.50 -	val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
   69.51 -	apic_write_around(APIC_LDR, val);
   69.52 -}
   69.53 -
   69.54 -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
   69.55 -{
   69.56 -	return phys_map;
   69.57 -}
   69.58 -
   69.59 -static inline void clustered_apic_check(void)
   69.60 -{
   69.61 -	printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
   69.62 -					"Flat", nr_ioapics);
   69.63 -}
   69.64 -
   69.65 -static inline int apicid_to_node(int logical_apicid)
   69.66 -{
   69.67 -	return 0;
   69.68 -}
   69.69 -
   69.70 -/* Mapping from cpu number to logical apicid */
   69.71 -static inline int cpu_to_logical_apicid(int cpu)
   69.72 -{
   69.73 -	return 1 << cpu;
   69.74 -}
   69.75 -
   69.76 -static inline int cpu_present_to_apicid(int mps_cpu)
   69.77 -{
   69.78 -	if (mps_cpu < get_physical_broadcast())
   69.79 -		return  mps_cpu;
   69.80 -	else
   69.81 -		return BAD_APICID;
   69.82 -}
   69.83 -
   69.84 -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
   69.85 -{
   69.86 -	return physid_mask_of_physid(phys_apicid);
   69.87 -}
   69.88 -
   69.89 -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
   69.90 -{
   69.91 -	return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
   69.92 -}
   69.93 -
   69.94 -static inline int apic_id_registered(void)
   69.95 -{
   69.96 -	return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
   69.97 -}
   69.98 -
   69.99 -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
  69.100 -{
  69.101 -	return cpus_addr(cpumask)[0];
  69.102 -}
  69.103 -
  69.104 -static inline void enable_apic_mode(void)
  69.105 -{
  69.106 -}
  69.107 -
  69.108 -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  69.109 -{
  69.110 -	return cpuid_apic >> index_msb;
  69.111 -}
  69.112 -
  69.113 -#endif /* __ASM_MACH_APIC_H */
    70.1 --- a/xen/include/asm-x86/mach-es7000/mach_apic.h	Thu Apr 06 00:59:06 2006 +0100
    70.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    70.3 @@ -1,185 +0,0 @@
    70.4 -#ifndef __ASM_MACH_APIC_H
    70.5 -#define __ASM_MACH_APIC_H
    70.6 -
    70.7 -extern u8 bios_cpu_apicid[];
    70.8 -
    70.9 -#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
   70.10 -#define esr_disable (1)
   70.11 -
   70.12 -static inline int apic_id_registered(void)
   70.13 -{
   70.14 -	        return (1);
   70.15 -}
   70.16 -
   70.17 -static inline cpumask_t target_cpus(void)
   70.18 -{ 
   70.19 -#if defined CONFIG_ES7000_CLUSTERED_APIC
   70.20 -	return CPU_MASK_ALL;
   70.21 -#else
   70.22 -	return cpumask_of_cpu(smp_processor_id());
   70.23 -#endif
   70.24 -}
   70.25 -#define TARGET_CPUS	(target_cpus())
   70.26 -
   70.27 -#if defined CONFIG_ES7000_CLUSTERED_APIC
   70.28 -#define APIC_DFR_VALUE		(APIC_DFR_CLUSTER)
   70.29 -#define INT_DELIVERY_MODE	(dest_LowestPrio)
   70.30 -#define INT_DEST_MODE		(1)    /* logical delivery broadcast to all procs */
   70.31 -#define NO_BALANCE_IRQ 		(1)
   70.32 -#undef  WAKE_SECONDARY_VIA_INIT
   70.33 -#define WAKE_SECONDARY_VIA_MIP
   70.34 -#else
   70.35 -#define APIC_DFR_VALUE		(APIC_DFR_FLAT)
   70.36 -#define INT_DELIVERY_MODE	(dest_Fixed)
   70.37 -#define INT_DEST_MODE		(0)    /* phys delivery to target procs */
   70.38 -#define NO_BALANCE_IRQ 		(0)
   70.39 -#undef  APIC_DEST_LOGICAL
   70.40 -#define APIC_DEST_LOGICAL	0x0
   70.41 -#define WAKE_SECONDARY_VIA_INIT
   70.42 -#endif
   70.43 -
   70.44 -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
   70.45 -{ 
   70.46 -	return 0;
   70.47 -} 
   70.48 -static inline unsigned long check_apicid_present(int bit) 
   70.49 -{
   70.50 -	return physid_isset(bit, phys_cpu_present_map);
   70.51 -}
   70.52 -
   70.53 -#define apicid_cluster(apicid) (apicid & 0xF0)
   70.54 -
   70.55 -static inline unsigned long calculate_ldr(int cpu)
   70.56 -{
   70.57 -	unsigned long id;
   70.58 -	id = xapic_phys_to_log_apicid(cpu);
   70.59 -	return (SET_APIC_LOGICAL_ID(id));
   70.60 -}
   70.61 -
   70.62 -/*
   70.63 - * Set up the logical destination ID.
   70.64 - *
   70.65 - * Intel recommends to set DFR, LdR and TPR before enabling
   70.66 - * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
   70.67 - * document number 292116).  So here it goes...
   70.68 - */
   70.69 -static inline void init_apic_ldr(void)
   70.70 -{
   70.71 -	unsigned long val;
   70.72 -	int cpu = smp_processor_id();
   70.73 -
   70.74 -	apic_write_around(APIC_DFR, APIC_DFR_VALUE);
   70.75 -	val = calculate_ldr(cpu);
   70.76 -	apic_write_around(APIC_LDR, val);
   70.77 -}
   70.78 -
   70.79 -extern void es7000_sw_apic(void);
   70.80 -static inline void enable_apic_mode(void)
   70.81 -{
   70.82 -	es7000_sw_apic();
   70.83 -	return;
   70.84 -}
   70.85 -
   70.86 -extern int apic_version [MAX_APICS];
   70.87 -static inline void clustered_apic_check(void)
   70.88 -{
   70.89 -	int apic = bios_cpu_apicid[smp_processor_id()];
   70.90 -	printk("Enabling APIC mode:  %s.  Using %d I/O APICs, target cpus %lx\n",
   70.91 -		(apic_version[apic] == 0x14) ? 
   70.92 -		"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
   70.93 -}
   70.94 -
   70.95 -static inline int apicid_to_node(int logical_apicid)
   70.96 -{
   70.97 -	return 0;
   70.98 -}
   70.99 -
  70.100 -
  70.101 -static inline int cpu_present_to_apicid(int mps_cpu)
  70.102 -{
  70.103 -	if (!mps_cpu)
  70.104 -		return boot_cpu_physical_apicid;
  70.105 -	else if (mps_cpu < NR_CPUS)
  70.106 -		return (int) bios_cpu_apicid[mps_cpu];
  70.107 -	else
  70.108 -		return BAD_APICID;
  70.109 -}
  70.110 -
  70.111 -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
  70.112 -{
  70.113 -	static int id = 0;
  70.114 -	physid_mask_t mask;
  70.115 -	mask = physid_mask_of_physid(id);
  70.116 -	++id;
  70.117 -	return mask;
  70.118 -}
  70.119 -
  70.120 -extern u8 cpu_2_logical_apicid[];
  70.121 -/* Mapping from cpu number to logical apicid */
  70.122 -static inline int cpu_to_logical_apicid(int cpu)
  70.123 -{
  70.124 -       if (cpu >= NR_CPUS)
  70.125 -	       return BAD_APICID;
  70.126 -       return (int)cpu_2_logical_apicid[cpu];
  70.127 -}
  70.128 -
  70.129 -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
  70.130 -{
  70.131 -	/* For clustered we don't have a good way to do this yet - hack */
  70.132 -	return physids_promote(0xff);
  70.133 -}
  70.134 -
  70.135 -extern unsigned int boot_cpu_physical_apicid;
  70.136 -static inline int check_phys_apicid_present(int cpu_physical_apicid)
  70.137 -{
  70.138 -	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
  70.139 -	return (1);
  70.140 -}
  70.141 -
  70.142 -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
  70.143 -{
  70.144 -	int num_bits_set;
  70.145 -	int cpus_found = 0;
  70.146 -	int cpu;
  70.147 -	int apicid;	
  70.148 -
  70.149 -	num_bits_set = cpus_weight(cpumask);
  70.150 -	/* Return id to all */
  70.151 -	if (num_bits_set == NR_CPUS)
  70.152 -#if defined CONFIG_ES7000_CLUSTERED_APIC
  70.153 -		return 0xFF;
  70.154 -#else
  70.155 -		return cpu_to_logical_apicid(0);
  70.156 -#endif
  70.157 -	/* 
  70.158 -	 * The cpus in the mask must all be on the apic cluster.  If are not 
  70.159 -	 * on the same apicid cluster return default value of TARGET_CPUS. 
  70.160 -	 */
  70.161 -	cpu = first_cpu(cpumask);
  70.162 -	apicid = cpu_to_logical_apicid(cpu);
  70.163 -	while (cpus_found < num_bits_set) {
  70.164 -		if (cpu_isset(cpu, cpumask)) {
  70.165 -			int new_apicid = cpu_to_logical_apicid(cpu);
  70.166 -			if (apicid_cluster(apicid) != 
  70.167 -					apicid_cluster(new_apicid)){
  70.168 -				printk ("%s: Not a valid mask!\n",__FUNCTION__);
  70.169 -#if defined CONFIG_ES7000_CLUSTERED_APIC
  70.170 -				return 0xFF;
  70.171 -#else
  70.172 -				return cpu_to_logical_apicid(0);
  70.173 -#endif
  70.174 -			}
  70.175 -			apicid = new_apicid;
  70.176 -			cpus_found++;
  70.177 -		}
  70.178 -		cpu++;
  70.179 -	}
  70.180 -	return apicid;
  70.181 -}
  70.182 -
  70.183 -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  70.184 -{
  70.185 -	return cpuid_apic >> index_msb;
  70.186 -}
  70.187 -
  70.188 -#endif /* __ASM_MACH_APIC_H */
    71.1 --- a/xen/include/asm-x86/mach-generic/mach_apic.h	Thu Apr 06 00:59:06 2006 +0100
    71.2 +++ b/xen/include/asm-x86/mach-generic/mach_apic.h	Thu Apr 06 00:59:18 2006 +0100
    71.3 @@ -2,28 +2,40 @@
    71.4  #define __ASM_MACH_APIC_H
    71.5  
    71.6  #include <asm/genapic.h>
    71.7 +#include <asm/smp.h>
    71.8  
    71.9 -#define esr_disable (genapic->ESR_DISABLE)
   71.10 -#define NO_BALANCE_IRQ (genapic->no_balance_irq)
   71.11 +/* ESR was originally disabled in Linux for NUMA-Q. Do we really need to? */
   71.12 +#define esr_disable (0)
   71.13 +
   71.14 +/* The following are dependent on APIC delivery mode (logical vs. physical). */
   71.15  #define INT_DELIVERY_MODE (genapic->int_delivery_mode)
   71.16  #define INT_DEST_MODE (genapic->int_dest_mode)
   71.17 -#undef APIC_DEST_LOGICAL
   71.18 -#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
   71.19  #define TARGET_CPUS	  (genapic->target_cpus())
   71.20 -#define apic_id_registered (genapic->apic_id_registered)
   71.21  #define init_apic_ldr (genapic->init_apic_ldr)
   71.22 -#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
   71.23  #define clustered_apic_check (genapic->clustered_apic_check) 
   71.24 -#define apicid_to_node (genapic->apicid_to_node)
   71.25 -#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) 
   71.26 -#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
   71.27 -#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
   71.28 -#define check_apicid_present (genapic->check_apicid_present)
   71.29 -#define check_phys_apicid_present (genapic->check_phys_apicid_present)
   71.30 -#define check_apicid_used (genapic->check_apicid_used)
   71.31  #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
   71.32 -#define enable_apic_mode (genapic->enable_apic_mode)
   71.33 -#define phys_pkg_id (genapic->phys_pkg_id)
   71.34 +
   71.35 +extern void es7000_sw_apic(void);
   71.36 +static inline void enable_apic_mode(void)
   71.37 +{
   71.38 +	es7000_sw_apic();
   71.39 +	return;
   71.40 +}
   71.41 +
   71.42 +/* No sane NUMA support right now. We should parse ACPI SRAT. */
   71.43 +static inline int apicid_to_node(int logical_apicid)
   71.44 +{
   71.45 +	return 0;
   71.46 +}
   71.47 +
   71.48 +extern u8 bios_cpu_apicid[];
   71.49 +static inline int cpu_present_to_apicid(int mps_cpu)
   71.50 +{
   71.51 +	if (mps_cpu < NR_CPUS)
   71.52 +		return (int)bios_cpu_apicid[mps_cpu];
   71.53 +	else
   71.54 +		return BAD_APICID;
   71.55 +}
   71.56  
   71.57  static inline int mpc_apic_id(struct mpc_config_processor *m, 
   71.58  			struct mpc_config_translation *translation_record)
   71.59 @@ -47,4 +59,41 @@ static inline int multi_timer_check(int 
   71.60  
   71.61  extern void generic_bigsmp_probe(void);
   71.62  
   71.63 +/*
   71.64 + * The following functions based around phys_cpu_present_map are disabled in
   71.65 + * some i386 Linux subarchitectures, and in x86_64 'cluster' genapic mode. I'm
   71.66 + * really not sure why, since all local APICs should have distinct physical
   71.67 + * IDs, and we need to know what they are.
   71.68 + */
   71.69 +static inline int apic_id_registered(void)
   71.70 +{
   71.71 +	return physid_isset(GET_APIC_ID(apic_read(APIC_ID)),
   71.72 +			    phys_cpu_present_map);
   71.73 +}
   71.74 +
   71.75 +static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
   71.76 +{
   71.77 +	return phys_map;
   71.78 +}
   71.79 +
   71.80 +static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
   71.81 +{
   71.82 +	return physid_isset(apicid, bitmap);
   71.83 +}
   71.84 +
   71.85 +static inline unsigned long check_apicid_present(int apicid)
   71.86 +{
   71.87 +	return physid_isset(apicid, phys_cpu_present_map);
   71.88 +}
   71.89 +
   71.90 +static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
   71.91 +{
   71.92 +	return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
   71.93 +}
   71.94 +
   71.95 +static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
   71.96 +{
   71.97 +	return physid_mask_of_physid(phys_apicid);
   71.98 +}
   71.99 +
  71.100  #endif /* __ASM_MACH_APIC_H */
    72.1 --- a/xen/include/asm-x86/mach-summit/mach_apic.h	Thu Apr 06 00:59:06 2006 +0100
    72.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    72.3 @@ -1,167 +0,0 @@
    72.4 -#ifndef __ASM_MACH_APIC_H
    72.5 -#define __ASM_MACH_APIC_H
    72.6 -
    72.7 -#include <xen/config.h>
    72.8 -#include <asm/smp.h>
    72.9 -
   72.10 -#define esr_disable (1)
   72.11 -#define NO_BALANCE_IRQ (0)
   72.12 -
   72.13 -/* In clustered mode, the high nibble of APIC ID is a cluster number.
   72.14 - * The low nibble is a 4-bit bitmap. */
   72.15 -#define XAPIC_DEST_CPUS_SHIFT	4
   72.16 -#define XAPIC_DEST_CPUS_MASK	((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
   72.17 -#define XAPIC_DEST_CLUSTER_MASK	(XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
   72.18 -
   72.19 -#define APIC_DFR_VALUE	(APIC_DFR_CLUSTER)
   72.20 -
   72.21 -static inline cpumask_t target_cpus(void)
   72.22 -{
   72.23 -	/* CPU_MASK_ALL (0xff) has undefined behaviour with
   72.24 -	 * dest_LowestPrio mode logical clustered apic interrupt routing
   72.25 -	 * Just start on cpu 0.  IRQ balancing will spread load
   72.26 -	 */
   72.27 -	return cpumask_of_cpu(0);
   72.28 -} 
   72.29 -#define TARGET_CPUS	(target_cpus())
   72.30 -
   72.31 -#define INT_DELIVERY_MODE (dest_LowestPrio)
   72.32 -#define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
   72.33 -
   72.34 -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
   72.35 -{
   72.36 -	return 0;
   72.37 -} 
   72.38 -
   72.39 -/* we don't use the phys_cpu_present_map to indicate apicid presence */
   72.40 -static inline unsigned long check_apicid_present(int bit) 
   72.41 -{
   72.42 -	return 1;
   72.43 -}
   72.44 -
   72.45 -#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
   72.46 -
   72.47 -extern u8 bios_cpu_apicid[];
   72.48 -extern u8 cpu_2_logical_apicid[];
   72.49 -
   72.50 -static inline void init_apic_ldr(void)
   72.51 -{
   72.52 -	unsigned long val, id;
   72.53 -	int i, count;
   72.54 -	u8 lid;
   72.55 -	u8 my_id = (u8)hard_smp_processor_id();
   72.56 -	u8 my_cluster = (u8)apicid_cluster(my_id);
   72.57 -
   72.58 -	/* Create logical APIC IDs by counting CPUs already in cluster. */
   72.59 -	for (count = 0, i = NR_CPUS; --i >= 0; ) {
   72.60 -		lid = cpu_2_logical_apicid[i];
   72.61 -		if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
   72.62 -			++count;
   72.63 -	}
   72.64 -	/* We only have a 4 wide bitmap in cluster mode.  If a deranged
   72.65 -	 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
   72.66 -	BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
   72.67 -	id = my_cluster | (1UL << count);
   72.68 -	apic_write_around(APIC_DFR, APIC_DFR_VALUE);
   72.69 -	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
   72.70 -	val |= SET_APIC_LOGICAL_ID(id);
   72.71 -	apic_write_around(APIC_LDR, val);
   72.72 -}
   72.73 -
   72.74 -static inline int apic_id_registered(void)
   72.75 -{
   72.76 -	return 1;
   72.77 -}
   72.78 -
   72.79 -static inline void clustered_apic_check(void)
   72.80 -{
   72.81 -	printk("Enabling APIC mode:  Summit.  Using %d I/O APICs\n",
   72.82 -						nr_ioapics);
   72.83 -}
   72.84 -
   72.85 -static inline int apicid_to_node(int logical_apicid)
   72.86 -{
   72.87 -	return logical_apicid >> 5;          /* 2 clusterids per CEC */
   72.88 -}
   72.89 -
   72.90 -/* Mapping from cpu number to logical apicid */
   72.91 -static inline int cpu_to_logical_apicid(int cpu)
   72.92 -{
   72.93 -       if (cpu >= NR_CPUS)
   72.94 -	       return BAD_APICID;
   72.95 -	return (int)cpu_2_logical_apicid[cpu];
   72.96 -}
   72.97 -
   72.98 -static inline int cpu_present_to_apicid(int mps_cpu)
   72.99 -{
  72.100 -	if (mps_cpu < NR_CPUS)
  72.101 -		return (int)bios_cpu_apicid[mps_cpu];
  72.102 -	else
  72.103 -		return BAD_APICID;
  72.104 -}
  72.105 -
  72.106 -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
  72.107 -{
  72.108 -	/* For clustered we don't have a good way to do this yet - hack */
  72.109 -	return physids_promote(0x0F);
  72.110 -}
  72.111 -
  72.112 -static inline physid_mask_t apicid_to_cpu_present(int apicid)
  72.113 -{
  72.114 -	return physid_mask_of_physid(0);
  72.115 -}
  72.116 -
  72.117 -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
  72.118 -{
  72.119 -	return 1;
  72.120 -}
  72.121 -
  72.122 -static inline void enable_apic_mode(void)
  72.123 -{
  72.124 -}
  72.125 -
  72.126 -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
  72.127 -{
  72.128 -	int num_bits_set;
  72.129 -	int cpus_found = 0;
  72.130 -	int cpu;
  72.131 -	int apicid;	
  72.132 -
  72.133 -	num_bits_set = cpus_weight(cpumask);
  72.134 -	/* Return id to all */
  72.135 -	if (num_bits_set == NR_CPUS)
  72.136 -		return (int) 0xFF;
  72.137 -	/* 
  72.138 -	 * The cpus in the mask must all be on the apic cluster.  If are not 
  72.139 -	 * on the same apicid cluster return default value of TARGET_CPUS. 
  72.140 -	 */
  72.141 -	cpu = first_cpu(cpumask);
  72.142 -	apicid = cpu_to_logical_apicid(cpu);
  72.143 -	while (cpus_found < num_bits_set) {
  72.144 -		if (cpu_isset(cpu, cpumask)) {
  72.145 -			int new_apicid = cpu_to_logical_apicid(cpu);
  72.146 -			if (apicid_cluster(apicid) != 
  72.147 -					apicid_cluster(new_apicid)){
  72.148 -				printk ("%s: Not a valid mask!\n",__FUNCTION__);
  72.149 -				return 0xFF;
  72.150 -			}
  72.151 -			apicid = apicid | new_apicid;
  72.152 -			cpus_found++;
  72.153 -		}
  72.154 -		cpu++;
  72.155 -	}
  72.156 -	return apicid;
  72.157 -}
  72.158 -
  72.159 -/* cpuid returns the value latched in the HW at reset, not the APIC ID
  72.160 - * register's value.  For any box whose BIOS changes APIC IDs, like
  72.161 - * clustered APIC systems, we must use hard_smp_processor_id.
  72.162 - *
  72.163 - * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
  72.164 - */
  72.165 -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  72.166 -{
  72.167 -	return hard_smp_processor_id() >> index_msb;
  72.168 -}
  72.169 -
  72.170 -#endif /* __ASM_MACH_APIC_H */
    73.1 --- a/xen/include/asm-x86/mach-summit/mach_mpparse.h	Thu Apr 06 00:59:06 2006 +0100
    73.2 +++ b/xen/include/asm-x86/mach-summit/mach_mpparse.h	Thu Apr 06 00:59:18 2006 +0100
    73.3 @@ -1,8 +1,6 @@
    73.4  #ifndef __ASM_MACH_MPPARSE_H
    73.5  #define __ASM_MACH_MPPARSE_H
    73.6  
    73.7 -#include <mach_apic.h>
    73.8 -
    73.9  extern int use_cyclone;
   73.10  
   73.11  #ifdef CONFIG_X86_SUMMIT_NUMA
    74.1 --- a/xen/include/asm-x86/mach_ipi.h	Thu Apr 06 00:59:06 2006 +0100
    74.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    74.3 @@ -1,11 +0,0 @@
    74.4 -#ifndef __ASM_MACH_IPI_H
    74.5 -#define __ASM_MACH_IPI_H
    74.6 -
    74.7 -#include <asm/genapic.h>
    74.8 -
    74.9 -void send_IPI_mask_bitmask(cpumask_t mask, int vector);
   74.10 -void send_IPI_mask_sequence(cpumask_t mask, int vector);
   74.11 -
   74.12 -#define send_IPI_mask (genapic->send_ipi_mask)
   74.13 -
   74.14 -#endif /* __ASM_MACH_IPI_H */
    75.1 --- a/xen/include/asm-x86/time.h	Thu Apr 06 00:59:06 2006 +0100
    75.2 +++ b/xen/include/asm-x86/time.h	Thu Apr 06 00:59:18 2006 +0100
    75.3 @@ -7,9 +7,6 @@
    75.4  extern void calibrate_tsc_bp(void);
    75.5  extern void calibrate_tsc_ap(void);
    75.6  
    75.7 -struct domain;
    75.8 -extern void init_domain_time(struct domain *d);
    75.9 -
   75.10  typedef u64 cycles_t;
   75.11  
   75.12  static inline cycles_t get_cycles(void)
    76.1 --- a/xen/include/public/event_channel.h	Thu Apr 06 00:59:06 2006 +0100
    76.2 +++ b/xen/include/public/event_channel.h	Thu Apr 06 00:59:18 2006 +0100
    76.3 @@ -50,9 +50,13 @@ typedef struct evtchn_bind_interdomain {
    76.4   * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
    76.5   * vcpu.
    76.6   * NOTES:
    76.7 - *  1. A virtual IRQ may be bound to at most one event channel per vcpu.
    76.8 - *  2. The allocated event channel is bound to the specified vcpu. The binding
    76.9 - *     may not be changed.
   76.10 + *  1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
   76.11 + *     in xen.h for the classification of each VIRQ.
   76.12 + *  2. Global VIRQs must be allocated on VCPU0 but can subsequently be
   76.13 + *     re-bound via EVTCHNOP_bind_vcpu.
   76.14 + *  3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
   76.15 + *     The allocated event channel is bound to the specified vcpu and the
   76.16 + *     binding cannot be changed.
   76.17   */
   76.18  #define EVTCHNOP_bind_virq        1
   76.19  typedef struct evtchn_bind_virq {
   76.20 @@ -152,9 +156,11 @@ typedef struct evtchn_status {
   76.21   * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
   76.22   * event is pending.
   76.23   * NOTES:
   76.24 - *  1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
   76.25 - *     the binding. This binding cannot be changed.
   76.26 - *  2. All other channels notify vcpu0 by default. This default is set when
   76.27 + *  1. IPI-bound channels always notify the vcpu specified at bind time.
   76.28 + *     This binding cannot be changed.
   76.29 + *  2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
   76.30 + *     This binding cannot be changed.
   76.31 + *  3. All other channels notify vcpu0 by default. This default is set when
   76.32   *     the channel is allocated (a port that is freed and subsequently reused
   76.33   *     has its binding reset to vcpu0).
   76.34   */
    77.1 --- a/xen/include/public/hvm/ioreq.h	Thu Apr 06 00:59:06 2006 +0100
    77.2 +++ b/xen/include/public/hvm/ioreq.h	Thu Apr 06 00:59:18 2006 +0100
    77.3 @@ -34,6 +34,7 @@
    77.4  #define IOREQ_TYPE_AND          2
    77.5  #define IOREQ_TYPE_OR           3
    77.6  #define IOREQ_TYPE_XOR          4
    77.7 +#define IOREQ_TYPE_XCHG         5
    77.8  
    77.9  /*
   77.10   * VMExit dispatcher should cooperate with instruction decoder to
    78.1 --- a/xen/include/public/xen.h	Thu Apr 06 00:59:06 2006 +0100
    78.2 +++ b/xen/include/public/xen.h	Thu Apr 06 00:59:18 2006 +0100
    78.3 @@ -65,12 +65,17 @@
    78.4   * VIRTUAL INTERRUPTS
    78.5   * 
    78.6   * Virtual interrupts that a guest OS may receive from Xen.
    78.7 + * 
    78.8 + * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
    78.9 + * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
   78.10 + * The latter can be allocated only once per guest: they must initially be
   78.11 + * allocated to VCPU0 but can subsequently be re-bound.
   78.12   */
   78.13 -#define VIRQ_TIMER      0  /* Timebase update, and/or requested timeout.  */
   78.14 -#define VIRQ_DEBUG      1  /* Request guest to dump debug info.           */
   78.15 -#define VIRQ_CONSOLE    2  /* (DOM0) Bytes received on emergency console. */
   78.16 -#define VIRQ_DOM_EXC    3  /* (DOM0) Exceptional event for some domain.   */
   78.17 -#define VIRQ_DEBUGGER   6  /* (DOM0) A domain has paused for debugging.   */
   78.18 +#define VIRQ_TIMER      0  /* V. Timebase update, and/or requested timeout.  */
   78.19 +#define VIRQ_DEBUG      1  /* V. Request guest to dump debug info.           */
   78.20 +#define VIRQ_CONSOLE    2  /* G. (DOM0) Bytes received on emergency console. */
   78.21 +#define VIRQ_DOM_EXC    3  /* G. (DOM0) Exceptional event for some domain.   */
   78.22 +#define VIRQ_DEBUGGER   6  /* G. (DOM0) A domain has paused for debugging.   */
   78.23  #define NR_VIRQS        8
   78.24  
   78.25  /*
    79.1 --- a/xen/include/xen/event.h	Thu Apr 06 00:59:06 2006 +0100
    79.2 +++ b/xen/include/xen/event.h	Thu Apr 06 00:59:18 2006 +0100
    79.3 @@ -3,7 +3,7 @@
    79.4   * 
    79.5   * A nice interface for passing asynchronous events to guest OSes.
    79.6   * 
    79.7 - * Copyright (c) 2002-2005, K A Fraser
    79.8 + * Copyright (c) 2002-2006, K A Fraser
    79.9   */
   79.10  
   79.11  #ifndef __XEN_EVENT_H__
   79.12 @@ -18,11 +18,18 @@
   79.13  extern void evtchn_set_pending(struct vcpu *v, int port);
   79.14  
   79.15  /*
   79.16 - * send_guest_virq:
   79.17 + * send_guest_vcpu_virq: Notify guest via a per-VCPU VIRQ.
   79.18   *  @v:        VCPU to which virtual IRQ should be sent
   79.19   *  @virq:     Virtual IRQ number (VIRQ_*)
   79.20   */
   79.21 -extern void send_guest_virq(struct vcpu *v, int virq);
   79.22 +extern void send_guest_vcpu_virq(struct vcpu *v, int virq);
   79.23 +
   79.24 +/*
   79.25 + * send_guest_global_virq: Notify guest via a global VIRQ.
   79.26 + *  @d:        Domain to which virtual IRQ should be sent
   79.27 + *  @virq:     Virtual IRQ number (VIRQ_*)
   79.28 + */
   79.29 +extern void send_guest_global_virq(struct domain *d, int virq);
   79.30  
   79.31  /*
   79.32   * send_guest_pirq:
    80.1 --- a/xen/include/xen/time.h	Thu Apr 06 00:59:06 2006 +0100
    80.2 +++ b/xen/include/xen/time.h	Thu Apr 06 00:59:18 2006 +0100
    80.3 @@ -55,7 +55,9 @@ s_time_t get_s_time(void);
    80.4  #define MILLISECS(_ms)  ((s_time_t)((_ms) * 1000000ULL))
    80.5  #define MICROSECS(_us)  ((s_time_t)((_us) * 1000ULL))
    80.6  
    80.7 -extern void update_dom_time(struct vcpu *v);
    80.8 +extern void update_vcpu_system_time(struct vcpu *v);
    80.9 +extern void update_domain_wallclock_time(struct domain *d);
   80.10 +
   80.11  extern void do_settime(
   80.12      unsigned long secs, unsigned long nsecs, u64 system_time_base);
   80.13