ia64/xen-unstable

changeset 9658:c668b024b2c7

Merged.
author emellor@leeni.uk.xensource.com
date Mon Apr 10 16:36:03 2006 +0100 (2006-04-10)
parents b7802a60b09f 066a233ee365
children b454a09cddb1 bb97f5120d03
files
line diff
     1.1 --- a/Config.mk	Mon Apr 10 16:28:52 2006 +0100
     1.2 +++ b/Config.mk	Mon Apr 10 16:36:03 2006 +0100
     1.3 @@ -24,6 +24,7 @@ OBJCOPY    = $(CROSS_COMPILE)objcopy
     1.4  OBJDUMP    = $(CROSS_COMPILE)objdump
     1.5  
     1.6  DISTDIR     ?= $(XEN_ROOT)/dist
     1.7 +DESTDIR     ?= /
     1.8  
     1.9  INSTALL      = install
    1.10  INSTALL_DIR  = $(INSTALL) -d -m0755
     2.1 --- a/Makefile	Mon Apr 10 16:28:52 2006 +0100
     2.2 +++ b/Makefile	Mon Apr 10 16:36:03 2006 +0100
     2.3 @@ -115,18 +115,6 @@ distclean: clean
     2.4  # Linux name for GNU distclean
     2.5  mrproper: distclean
     2.6  
     2.7 -install-logging: LOGGING=logging-0.4.9.2
     2.8 -install-logging:
     2.9 -	[ -f $(LOGGING).tar.gz ] || wget http://www.red-dove.com/$(LOGGING).tar.gz
    2.10 -	tar -zxf $(LOGGING).tar.gz
    2.11 -	cd $(LOGGING) && python setup.py install
    2.12 -
    2.13 -# handy target to upgrade iptables (use rpm or apt-get in preference)
    2.14 -install-iptables:
    2.15 -	wget http://www.netfilter.org/files/iptables-1.2.11.tar.bz2
    2.16 -	tar -jxf iptables-1.2.11.tar.bz2
    2.17 -	$(MAKE) -C iptables-1.2.11 PREFIX= KERNEL_DIR=../linux-$(LINUX_VER)-xen0 install
    2.18 -
    2.19  help:
    2.20  	@echo 'Installation targets:'
    2.21  	@echo '  install          - build and install everything'
    2.22 @@ -147,23 +135,25 @@ help:
    2.23  	@echo '  dev-docs         - build developer-only documentation'
    2.24  	@echo ''
    2.25  	@echo 'Cleaning targets:'
    2.26 -	@echo '  clean            - clean the Xen, tools and docs (but not'
    2.27 -	@echo '                     guest kernel) trees'
    2.28 -	@echo '  distclean        - clean plus delete kernel tarballs and kernel'
    2.29 -	@echo '                     build trees'
    2.30 +	@echo '  clean            - clean the Xen, tools and docs (but not guest kernel trees)'
    2.31 +	@echo '  distclean        - clean plus delete kernel build trees and'
    2.32 +	@echo '                     local downloaded files'
    2.33  	@echo '  kdelete          - delete guest kernel build trees'
    2.34  	@echo '  kclean           - clean guest kernel build trees'
    2.35  	@echo ''
    2.36 -	@echo 'Dependency installation targets:'
    2.37 -	@echo '  install-logging  - install the Python Logging package'
    2.38 -	@echo '  install-iptables - install iptables tools'
    2.39 -	@echo ''
    2.40  	@echo 'Miscellaneous targets:'
    2.41  	@echo '  prep-kernels     - prepares kernel directories, does not build'
    2.42  	@echo '  mkpatches        - make patches against vanilla kernels from'
    2.43  	@echo '                     sparse trees'
    2.44 -	@echo '  uninstall        - attempt to remove installed Xen tools (use'
    2.45 -	@echo '                     with extreme care!)'
    2.46 +	@echo '  uninstall        - attempt to remove installed Xen tools'
    2.47 +	@echo '                     (use with extreme care!)'
    2.48 +	@echo
    2.49 +	@echo 'Environment:'
    2.50 +	@echo '  XEN_PYTHON_NATIVE_INSTALL=y'
    2.51 +	@echo '                   - native python install or dist'
    2.52 +	@echo '                     install into prefix/lib/python<VERSION>'
    2.53 +	@echo '                     instead of <PREFIX>/lib/python'
    2.54 +	@echo '                     true if set to non-empty value, false otherwise'
    2.55  
    2.56  # Use this target with extreme care!
    2.57  uninstall: D=$(DESTDIR)
     3.1 --- a/buildconfigs/Rules.mk	Mon Apr 10 16:28:52 2006 +0100
     3.2 +++ b/buildconfigs/Rules.mk	Mon Apr 10 16:36:03 2006 +0100
     3.3 @@ -40,29 +40,6 @@ patch-%.bz2:
     3.4  	@echo "Cannot find $(@F) in path $(LINUX_SRC_PATH)"
     3.5  	wget $(KERNEL_REPO)/pub/linux/kernel/v$(_LINUX_VDIR)/$(_LINUX_XDIR)/$(@F) -O./$@
     3.6  
     3.7 -# Expand NetBSD release to NetBSD version
     3.8 -NETBSD_RELEASE  ?= 2.0
     3.9 -NETBSD_VER      ?= $(patsubst netbsd-%-xen-sparse,%,$(wildcard netbsd-$(NETBSD_RELEASE)*-xen-sparse))
    3.10 -NETBSD_CVSSNAP  ?= 20050309
    3.11 -
    3.12 -# Setup NetBSD search path
    3.13 -NETBSD_SRC_PATH	?= .:..
    3.14 -vpath netbsd-%.tar.bz2 $(NETBSD_SRC_PATH)
    3.15 -
    3.16 -# download a pristine NetBSD tarball if there isn't one in NETBSD_SRC_PATH
    3.17 -netbsd-%-xen-kernel-$(NETBSD_CVSSNAP).tar.bz2:
    3.18 -	@echo "Cannot find $@ in path $(NETBSD_SRC_PATH)"
    3.19 -	wget http://www.cl.cam.ac.uk/Research/SRG/netos/xen/downloads/$@ -O./$@
    3.20 -
    3.21 -netbsd-%.tar.bz2: netbsd-%-xen-kernel-$(NETBSD_CVSSNAP).tar.bz2
    3.22 -	ln -fs $< $@
    3.23 -
    3.24 -ifeq ($(OS),linux)
    3.25 -OS_VER = $(LINUX_VER)
    3.26 -else
    3.27 -OS_VER = $(NETBSD_VER)
    3.28 -endif
    3.29 -
    3.30  pristine-%: pristine-%/.valid-pristine
    3.31  	@true
    3.32  
    3.33 @@ -124,27 +101,20 @@ linux-2.6-xen.patch: ref-linux-$(LINUX_V
    3.34  	rm -rf tmp-$@
    3.35  	cp -al $(<D) tmp-$@
    3.36  	( cd linux-2.6-xen-sparse && ./mkbuildtree ../tmp-$@ )	
    3.37 -	diff -Nurp $(<D) tmp-$@ > $@ || true
    3.38 +	diff -Nurp $(patsubst ref%,pristine%,$(<D)) tmp-$@ > $@ || true
    3.39  	rm -rf tmp-$@
    3.40  
    3.41  %-xen.patch: ref-%/.valid-ref
    3.42  	rm -rf tmp-$@
    3.43  	cp -al $(<D) tmp-$@
    3.44  	( cd $*-xen-sparse && ./mkbuildtree ../tmp-$@ )	
    3.45 -	diff -Nurp $(<D) tmp-$@ > $@ || true
    3.46 +	diff -Nurp $(patsubst ref%,pristine%,$(<D)) tmp-$@ > $@ || true
    3.47  	rm -rf tmp-$@
    3.48  
    3.49 -%-mrproper: %-mrproper-extra
    3.50 +%-mrproper:
    3.51  	rm -rf pristine-$(*)* ref-$(*)* $*.tar.bz2
    3.52  	rm -rf $*-xen.patch
    3.53  
    3.54 -netbsd-%-mrproper-extra:
    3.55 -	rm -rf netbsd-$*-tools netbsd-$*-tools.tar.bz2
    3.56 -	rm -f netbsd-$*-xen-kernel-$(NETBSD_CVSSNAP).tar.bz2
    3.57 -
    3.58 -%-mrproper-extra:
    3.59 -	@: # do nothing
    3.60 -
    3.61  config-update-pae:
    3.62  ifeq ($(XEN_TARGET_X86_PAE),y)
    3.63  	sed -e 's!^CONFIG_HIGHMEM4G=y$$!\# CONFIG_HIGHMEM4G is not set!;s!^\# CONFIG_HIGHMEM64G is not set$$!CONFIG_HIGHMEM64G=y!' $(CONFIG_FILE) > $(CONFIG_FILE)- && mv $(CONFIG_FILE)- $(CONFIG_FILE)
     4.1 --- a/buildconfigs/linux-defconfig_xen0_x86_32	Mon Apr 10 16:28:52 2006 +0100
     4.2 +++ b/buildconfigs/linux-defconfig_xen0_x86_32	Mon Apr 10 16:36:03 2006 +0100
     4.3 @@ -1,7 +1,7 @@
     4.4  #
     4.5  # Automatically generated make config: don't edit
     4.6 -# Linux kernel version: 2.6.16-rc3-xen0
     4.7 -# Thu Feb 16 22:52:42 2006
     4.8 +# Linux kernel version: 2.6.16-xen0
     4.9 +# Sat Apr  8 11:34:07 2006
    4.10  #
    4.11  CONFIG_X86_32=y
    4.12  CONFIG_SEMAPHORE_SLEEPERS=y
    4.13 @@ -208,7 +208,6 @@ CONFIG_ACPI_BLACKLIST_YEAR=0
    4.14  CONFIG_ACPI_EC=y
    4.15  CONFIG_ACPI_POWER=y
    4.16  CONFIG_ACPI_SYSTEM=y
    4.17 -# CONFIG_X86_PM_TIMER is not set
    4.18  # CONFIG_ACPI_CONTAINER is not set
    4.19  
    4.20  #
    4.21 @@ -392,7 +391,13 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
    4.22  #
    4.23  # Plug and Play support
    4.24  #
    4.25 -# CONFIG_PNP is not set
    4.26 +CONFIG_PNP=y
    4.27 +CONFIG_PNP_DEBUG=y
    4.28 +
    4.29 +#
    4.30 +# Protocols
    4.31 +#
    4.32 +CONFIG_PNPACPI=y
    4.33  
    4.34  #
    4.35  # Block devices
    4.36 @@ -440,6 +445,7 @@ CONFIG_BLK_DEV_IDECD=y
    4.37  #
    4.38  CONFIG_IDE_GENERIC=y
    4.39  # CONFIG_BLK_DEV_CMD640 is not set
    4.40 +# CONFIG_BLK_DEV_IDEPNP is not set
    4.41  CONFIG_BLK_DEV_IDEPCI=y
    4.42  # CONFIG_IDEPCI_SHARE_IRQ is not set
    4.43  # CONFIG_BLK_DEV_OFFBOARD is not set
    4.44 @@ -623,6 +629,7 @@ CONFIG_NETDEVICES=y
    4.45  # CONFIG_BONDING is not set
    4.46  # CONFIG_EQUALIZER is not set
    4.47  CONFIG_TUN=y
    4.48 +# CONFIG_NET_SB1000 is not set
    4.49  
    4.50  #
    4.51  # ARCnet devices
    4.52 @@ -1064,11 +1071,7 @@ CONFIG_USB_MON=y
    4.53  # CONFIG_INFINIBAND is not set
    4.54  
    4.55  #
    4.56 -# SN Devices
    4.57 -#
    4.58 -
    4.59 -#
    4.60 -# EDAC - error detection and reporting (RAS)
    4.61 +# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
    4.62  #
    4.63  # CONFIG_EDAC is not set
    4.64  
    4.65 @@ -1306,7 +1309,7 @@ CONFIG_CRYPTO_CRC32C=m
    4.66  #
    4.67  # CONFIG_CRYPTO_DEV_PADLOCK is not set
    4.68  CONFIG_XEN=y
    4.69 -CONFIG_NO_IDLE_HZ=y
    4.70 +CONFIG_XEN_INTERFACE_VERSION=0x00030101
    4.71  
    4.72  #
    4.73  # XEN
    4.74 @@ -1332,6 +1335,7 @@ CONFIG_XEN_DISABLE_SERIAL=y
    4.75  CONFIG_XEN_SYSFS=y
    4.76  CONFIG_HAVE_ARCH_ALLOC_SKB=y
    4.77  CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
    4.78 +CONFIG_NO_IDLE_HZ=y
    4.79  
    4.80  #
    4.81  # Library routines
    4.82 @@ -1344,4 +1348,6 @@ CONFIG_ZLIB_INFLATE=y
    4.83  CONFIG_GENERIC_HARDIRQS=y
    4.84  CONFIG_GENERIC_IRQ_PROBE=y
    4.85  CONFIG_X86_BIOS_REBOOT=y
    4.86 +CONFIG_X86_NO_TSS=y
    4.87 +CONFIG_X86_NO_IDT=y
    4.88  CONFIG_KTIME_SCALAR=y
     5.1 --- a/buildconfigs/linux-defconfig_xen0_x86_64	Mon Apr 10 16:28:52 2006 +0100
     5.2 +++ b/buildconfigs/linux-defconfig_xen0_x86_64	Mon Apr 10 16:36:03 2006 +0100
     5.3 @@ -327,7 +327,13 @@ CONFIG_STANDALONE=y
     5.4  #
     5.5  # Plug and Play support
     5.6  #
     5.7 -# CONFIG_PNP is not set
     5.8 +CONFIG_PNP=y
     5.9 +CONFIG_PNP_DEBUG=y
    5.10 +
    5.11 +#
    5.12 +# Protocols
    5.13 +#
    5.14 +CONFIG_PNPACPI=y
    5.15  
    5.16  #
    5.17  # Block devices
    5.18 @@ -375,6 +381,7 @@ CONFIG_BLK_DEV_IDECD=y
    5.19  #
    5.20  CONFIG_IDE_GENERIC=y
    5.21  # CONFIG_BLK_DEV_CMD640 is not set
    5.22 +# CONFIG_BLK_DEV_IDEPNP is not set
    5.23  CONFIG_BLK_DEV_IDEPCI=y
    5.24  # CONFIG_IDEPCI_SHARE_IRQ is not set
    5.25  # CONFIG_BLK_DEV_OFFBOARD is not set
    5.26 @@ -559,6 +566,7 @@ CONFIG_NETDEVICES=y
    5.27  # CONFIG_BONDING is not set
    5.28  # CONFIG_EQUALIZER is not set
    5.29  CONFIG_TUN=y
    5.30 +# CONFIG_NET_SB1000 is not set
    5.31  
    5.32  #
    5.33  # ARCnet devices
     6.1 --- a/buildconfigs/linux-defconfig_xen_x86_32	Mon Apr 10 16:28:52 2006 +0100
     6.2 +++ b/buildconfigs/linux-defconfig_xen_x86_32	Mon Apr 10 16:36:03 2006 +0100
     6.3 @@ -912,12 +912,12 @@ CONFIG_PARPORT_1284=y
     6.4  # Plug and Play support
     6.5  #
     6.6  CONFIG_PNP=y
     6.7 -# CONFIG_PNP_DEBUG is not set
     6.8 +CONFIG_PNP_DEBUG=y
     6.9  
    6.10  #
    6.11  # Protocols
    6.12  #
    6.13 -# CONFIG_PNPACPI is not set
    6.14 +CONFIG_PNPACPI=y
    6.15  
    6.16  #
    6.17  # Block devices
     7.1 --- a/buildconfigs/linux-defconfig_xen_x86_64	Mon Apr 10 16:28:52 2006 +0100
     7.2 +++ b/buildconfigs/linux-defconfig_xen_x86_64	Mon Apr 10 16:36:03 2006 +0100
     7.3 @@ -776,7 +776,13 @@ CONFIG_PARPORT_1284=y
     7.4  #
     7.5  # Plug and Play support
     7.6  #
     7.7 -# CONFIG_PNP is not set
     7.8 +CONFIG_PNP=y
     7.9 +CONFIG_PNP_DEBUG=y
    7.10 +
    7.11 +#
    7.12 +# Protocols
    7.13 +#
    7.14 +CONFIG_PNPACPI=y
    7.15  
    7.16  #
    7.17  # Block devices
    7.18 @@ -857,6 +863,7 @@ CONFIG_BLK_DEV_IDESCSI=m
    7.19  CONFIG_IDE_GENERIC=y
    7.20  CONFIG_BLK_DEV_CMD640=y
    7.21  CONFIG_BLK_DEV_CMD640_ENHANCED=y
    7.22 +CONFIG_BLK_DEV_IDEPNP=y
    7.23  CONFIG_BLK_DEV_IDEPCI=y
    7.24  CONFIG_IDEPCI_SHARE_IRQ=y
    7.25  # CONFIG_BLK_DEV_OFFBOARD is not set
    7.26 @@ -1088,6 +1095,7 @@ CONFIG_DUMMY=m
    7.27  CONFIG_BONDING=m
    7.28  CONFIG_EQUALIZER=m
    7.29  CONFIG_TUN=m
    7.30 +CONFIG_NET_SB1000=m
    7.31  
    7.32  #
    7.33  # ARCnet devices
     8.1 --- a/buildconfigs/mk.linux-2.6-xen	Mon Apr 10 16:28:52 2006 +0100
     8.2 +++ b/buildconfigs/mk.linux-2.6-xen	Mon Apr 10 16:36:03 2006 +0100
     8.3 @@ -1,13 +1,10 @@
     8.4 -
     8.5 -OS           = linux
     8.6 -
     8.7  LINUX_SERIES = 2.6
     8.8  LINUX_VER    = 2.6.16
     8.9  LINUX_SRCS = linux-2.6.16.tar.bz2
    8.10  
    8.11  EXTRAVERSION ?= xen
    8.12  
    8.13 -LINUX_DIR    = $(OS)-$(LINUX_VER)-$(EXTRAVERSION)
    8.14 +LINUX_DIR    = linux-$(LINUX_VER)-$(EXTRAVERSION)
    8.15  
    8.16  include buildconfigs/Rules.mk
    8.17  
    8.18 @@ -22,7 +19,7 @@ build: $(LINUX_DIR)/include/linux/autoco
    8.19  	$(MAKE) -C $(LINUX_DIR) ARCH=$(LINUX_ARCH) INSTALL_PATH=$(DESTDIR) vmlinuz
    8.20  	$(MAKE) -C $(LINUX_DIR) ARCH=$(LINUX_ARCH) INSTALL_PATH=$(DESTDIR) install
    8.21  
    8.22 -$(LINUX_DIR)/include/linux/autoconf.h: ref-$(OS)-$(LINUX_VER)/.valid-ref
    8.23 +$(LINUX_DIR)/include/linux/autoconf.h: ref-linux-$(LINUX_VER)/.valid-ref
    8.24  	rm -rf $(LINUX_DIR)
    8.25  	cp -al $(<D) $(LINUX_DIR)
    8.26  	# Apply arch-xen patches
    8.27 @@ -52,4 +49,4 @@ clean::
    8.28  	$(MAKE) -C $(LINUX_DIR) ARCH=$(LINUX_ARCH) clean
    8.29  
    8.30  delete: 
    8.31 -	rm -rf tmp-$(OS)-$(LINUX_VER) $(LINUX_DIR) 
    8.32 +	rm -rf tmp-linux-$(LINUX_VER) $(LINUX_DIR) 
     9.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c	Mon Apr 10 16:28:52 2006 +0100
     9.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/pci-dma-xen.c	Mon Apr 10 16:36:03 2006 +0100
     9.3 @@ -69,7 +69,7 @@ dma_map_sg(struct device *hwdev, struct 
     9.4  	} else {
     9.5  		for (i = 0; i < nents; i++ ) {
     9.6  			sg[i].dma_address =
     9.7 -				page_to_phys(sg[i].page) + sg[i].offset;
     9.8 +				page_to_bus(sg[i].page) + sg[i].offset;
     9.9  			sg[i].dma_length  = sg[i].length;
    9.10  			BUG_ON(!sg[i].page);
    9.11  			IOMMU_BUG_ON(address_needs_mapping(
    9.12 @@ -105,7 +105,7 @@ dma_map_page(struct device *dev, struct 
    9.13  		dma_addr = swiotlb_map_page(
    9.14  			dev, page, offset, size, direction);
    9.15  	} else {
    9.16 -		dma_addr = page_to_phys(page) + offset;
    9.17 +		dma_addr = page_to_bus(page) + offset;
    9.18  		IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
    9.19  	}
    9.20  
    10.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/setup-xen.c	Mon Apr 10 16:28:52 2006 +0100
    10.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/setup-xen.c	Mon Apr 10 16:36:03 2006 +0100
    10.3 @@ -1848,10 +1848,6 @@ void __init setup_arch(char **cmdline_p)
    10.4  		get_smp_config();
    10.5  #endif
    10.6  
    10.7 -	/* XXX Disable irqdebug until we have a way to avoid interrupt
    10.8 -	 * conflicts. */
    10.9 -	noirqdebug_setup("");
   10.10 -
   10.11  	register_memory();
   10.12  
   10.13  	if (xen_start_info->flags & SIF_INITDOMAIN) {
    11.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c	Mon Apr 10 16:28:52 2006 +0100
    11.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c	Mon Apr 10 16:36:03 2006 +0100
    11.3 @@ -32,7 +32,7 @@ EXPORT_SYMBOL(swiotlb);
    11.4  
    11.5  #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
    11.6  
    11.7 -#define SG_ENT_PHYS_ADDRESS(sg)	(page_to_phys((sg)->page) + (sg)->offset)
    11.8 +#define SG_ENT_PHYS_ADDRESS(sg)	(page_to_bus((sg)->page) + (sg)->offset)
    11.9  
   11.10  /*
   11.11   * Maximum allowable number of contiguous slabs to map,
   11.12 @@ -607,7 +607,7 @@ swiotlb_map_page(struct device *hwdev, s
   11.13  	dma_addr_t dev_addr;
   11.14  	char *map;
   11.15  
   11.16 -	dev_addr = page_to_phys(page) + offset;
   11.17 +	dev_addr = page_to_bus(page) + offset;
   11.18  	if (address_needs_mapping(hwdev, dev_addr)) {
   11.19  		buffer.page   = page;
   11.20  		buffer.offset = offset;
    12.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/reboot.c	Mon Apr 10 16:28:52 2006 +0100
    12.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/reboot.c	Mon Apr 10 16:36:03 2006 +0100
    12.3 @@ -59,6 +59,10 @@ void machine_power_off(void)
    12.4  {
    12.5  	/* We really want to get pending console data out before we die. */
    12.6  	xencons_force_flush();
    12.7 +#if defined(__i386__) || defined(__x86_64__)
    12.8 +	if (pm_power_off)
    12.9 +		pm_power_off();
   12.10 +#endif
   12.11  	HYPERVISOR_shutdown(SHUTDOWN_poweroff);
   12.12  }
   12.13  
    13.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/io.h	Mon Apr 10 16:28:52 2006 +0100
    13.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/io.h	Mon Apr 10 16:36:03 2006 +0100
    13.3 @@ -102,6 +102,7 @@ static inline void * phys_to_virt(unsign
    13.4   */
    13.5  #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
    13.6  #define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
    13.7 +#define page_to_bus(page)	 (phys_to_machine(page_to_pseudophys(page)))
    13.8  
    13.9  #define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
   13.10  				  (unsigned long) bio_offset((bio)))
    14.1 --- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/io.h	Mon Apr 10 16:28:52 2006 +0100
    14.2 +++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/io.h	Mon Apr 10 16:36:03 2006 +0100
    14.3 @@ -130,6 +130,7 @@ static inline void * phys_to_virt(unsign
    14.4   */
    14.5  #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
    14.6  #define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
    14.7 +#define page_to_bus(page)	 (phys_to_machine(page_to_pseudophys(page)))
    14.8  
    14.9  #define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
   14.10  				  (unsigned long) bio_offset((bio)))
    15.1 --- a/tools/ioemu/hw/vga.c	Mon Apr 10 16:28:52 2006 +0100
    15.2 +++ b/tools/ioemu/hw/vga.c	Mon Apr 10 16:36:03 2006 +0100
    15.3 @@ -1369,10 +1369,10 @@ static inline unsigned int cpuid_edx(uns
    15.4  {
    15.5      unsigned int eax, edx;
    15.6  
    15.7 -    __asm__("cpuid"
    15.8 +    __asm__("pushl %%ebx; cpuid; popl %%ebx"
    15.9              : "=a" (eax), "=d" (edx)
   15.10              : "0" (op)
   15.11 -            : "bx", "cx");
   15.12 +            : "cx");
   15.13  
   15.14      return edx;
   15.15  }
    16.1 --- a/tools/libxc/xc_linux_build.c	Mon Apr 10 16:28:52 2006 +0100
    16.2 +++ b/tools/libxc/xc_linux_build.c	Mon Apr 10 16:36:03 2006 +0100
    16.3 @@ -110,10 +110,10 @@ static int parse_features(
    16.4  
    16.5          if ( i == XENFEAT_NR_SUBMAPS*32 )
    16.6          {
    16.7 -            ERROR("Unknown feature \"%.*s\".\n", (int)(p-feats), feats);
    16.8 +            ERROR("Unknown feature \"%.*s\".", (int)(p-feats), feats);
    16.9              if ( req )
   16.10              {
   16.11 -                ERROR("Kernel requires an unknown hypervisor feature.\n");
   16.12 +                ERROR("Kernel requires an unknown hypervisor feature.");
   16.13                  return -EINVAL;
   16.14              }
   16.15          }
   16.16 @@ -579,6 +579,31 @@ static int setup_guest(int xc_handle,
   16.17      return -1;
   16.18  }
   16.19  #else /* x86 */
   16.20 +
   16.21 +/* Check if the platform supports the guest kernel format */
   16.22 +static int compat_check(int xc_handle, struct domain_setup_info *dsi)
   16.23 +{
   16.24 +    xen_capabilities_info_t xen_caps = "";
   16.25 +
   16.26 +    if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0) {
   16.27 +        ERROR("Cannot determine host capabilities.");
   16.28 +        return 0;
   16.29 +    }
   16.30 +
   16.31 +    if (strstr(xen_caps, "xen-3.0-x86_32p")) {
   16.32 +        if (!dsi->pae_kernel) {
   16.33 +            ERROR("Non PAE-kernel on PAE host.");
   16.34 +            return 0;
   16.35 +        }
   16.36 +    } else if (dsi->pae_kernel) {
   16.37 +        ERROR("PAE-kernel on non-PAE host.");
   16.38 +        return 0;
   16.39 +    }
   16.40 +
   16.41 +    return 1;
   16.42 +}
   16.43 +
   16.44 +
   16.45  static int setup_guest(int xc_handle,
   16.46                         uint32_t dom,
   16.47                         const char *image, unsigned long image_size,
   16.48 @@ -635,10 +660,13 @@ static int setup_guest(int xc_handle,
   16.49  
   16.50      if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
   16.51      {
   16.52 -        PERROR("Guest OS must load to a page boundary.\n");
   16.53 +        PERROR("Guest OS must load to a page boundary.");
   16.54          goto error_out;
   16.55      }
   16.56  
   16.57 +    if (!compat_check(xc_handle, &dsi))
   16.58 +        goto error_out;
   16.59 +
   16.60      /* Parse and validate kernel features. */
   16.61      p = strstr(dsi.xen_guest_string, "FEATURES=");
   16.62      if ( p != NULL )
   16.63 @@ -647,7 +675,7 @@ static int setup_guest(int xc_handle,
   16.64                               supported_features,
   16.65                               required_features) )
   16.66          {
   16.67 -            ERROR("Failed to parse guest kernel features.\n");
   16.68 +            ERROR("Failed to parse guest kernel features.");
   16.69              goto error_out;
   16.70          }
   16.71  
   16.72 @@ -659,7 +687,7 @@ static int setup_guest(int xc_handle,
   16.73      {
   16.74          if ( (supported_features[i]&required_features[i]) != required_features[i] )
   16.75          {
   16.76 -            ERROR("Guest kernel does not support a required feature.\n");
   16.77 +            ERROR("Guest kernel does not support a required feature.");
   16.78              goto error_out;
   16.79          }
   16.80      }
    17.1 --- a/tools/libxc/xc_load_elf.c	Mon Apr 10 16:28:52 2006 +0100
    17.2 +++ b/tools/libxc/xc_load_elf.c	Mon Apr 10 16:36:03 2006 +0100
    17.3 @@ -69,6 +69,21 @@ static int parseelfimage(const char *ima
    17.4          return -EINVAL;
    17.5      }
    17.6  
    17.7 +    if (
    17.8 +#if defined(__i386__)
    17.9 +        (ehdr->e_ident[EI_CLASS] != ELFCLASS32) ||
   17.10 +        (ehdr->e_machine != EM_386) ||
   17.11 +#elif defined(__x86_64__)
   17.12 +        (ehdr->e_ident[EI_CLASS] != ELFCLASS64) ||
   17.13 +        (ehdr->e_machine != EM_X86_64) ||
   17.14 +#endif
   17.15 +        (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) ||
   17.16 +        (ehdr->e_type != ET_EXEC) )
   17.17 +    {
   17.18 +        ERROR("Kernel not a Xen-compatible Elf image.");
   17.19 +        return -EINVAL;
   17.20 +    }
   17.21 +
   17.22      if ( (ehdr->e_phoff + (ehdr->e_phnum * ehdr->e_phentsize)) > elfsize )
   17.23      {
   17.24          ERROR("ELF program headers extend beyond end of image.");
    18.1 --- a/tools/python/xen/xend/server/pciif.py	Mon Apr 10 16:28:52 2006 +0100
    18.2 +++ b/tools/python/xen/xend/server/pciif.py	Mon Apr 10 16:36:03 2006 +0100
    18.3 @@ -115,7 +115,7 @@ class PciController(DevController):
    18.4              dev = PciDevice(domain, bus, slot, func)
    18.5          except Exception, e:
    18.6              raise VmError("pci: failed to locate device and "+
    18.7 -                    "parse it's resources - %s"+str(e))
    18.8 +                    "parse it's resources - "+str(e))
    18.9  
   18.10          if dev.driver!='pciback':
   18.11              raise VmError(("pci: PCI Backend does not own device "+ \
   18.12 @@ -131,7 +131,7 @@ class PciController(DevController):
   18.13                      nr_ports = size, allow_access = True)
   18.14              if rc<0:
   18.15                  raise VmError(('pci: failed to configure I/O ports on device '+
   18.16 -                            '%s - errno=%d')&(dev.name,rc))
   18.17 +                            '%s - errno=%d')%(dev.name,rc))
   18.18              
   18.19          for (start, size) in dev.iomem:
   18.20              # Convert start/size from bytes to page frame sizes
   18.21 @@ -147,7 +147,7 @@ class PciController(DevController):
   18.22                      allow_access = True)
   18.23              if rc<0:
   18.24                  raise VmError(('pci: failed to configure I/O memory on device '+
   18.25 -                            '%s - errno=%d')&(dev.name,rc))
   18.26 +                            '%s - errno=%d')%(dev.name,rc))
   18.27  
   18.28          if dev.irq>0:
   18.29              log.debug('pci: enabling irq %d'%dev.irq)
   18.30 @@ -155,7 +155,7 @@ class PciController(DevController):
   18.31                      allow_access = True)
   18.32              if rc<0:
   18.33                  raise VmError(('pci: failed to configure irq on device '+
   18.34 -                            '%s - errno=%d')&(dev.name,rc))
   18.35 +                            '%s - errno=%d')%(dev.name,rc))
   18.36  
   18.37      def waitForBackend(self,devid):
   18.38          return (0, "ok - no hotplug")
    19.1 --- a/xen/Rules.mk	Mon Apr 10 16:28:52 2006 +0100
    19.2 +++ b/xen/Rules.mk	Mon Apr 10 16:36:03 2006 +0100
    19.3 @@ -74,7 +74,7 @@ subdir-n := $(patsubst %,%/,$(patsubst %
    19.4  subdir-y := $(patsubst %,%/,$(patsubst %/,%,$(subdir-y)))
    19.5  
    19.6  # Add explicitly declared subdirectories to the object list.
    19.7 -obj-y += $(patsubst %,%/built_in.o,$(subdir-y))
    19.8 +obj-y += $(patsubst %/,%/built_in.o,$(subdir-y))
    19.9  
   19.10  # Add implicitly declared subdirectories (in the object list) to the
   19.11  # subdirectory list, and rewrite the object-list entry.
    20.1 --- a/xen/arch/ia64/xen/irq.c	Mon Apr 10 16:28:52 2006 +0100
    20.2 +++ b/xen/arch/ia64/xen/irq.c	Mon Apr 10 16:36:03 2006 +0100
    20.3 @@ -1358,25 +1358,20 @@ static void __do_IRQ_guest(int irq)
    20.4  int pirq_guest_unmask(struct domain *d)
    20.5  {
    20.6      irq_desc_t    *desc;
    20.7 -    int            i, j, pirq;
    20.8 -    u32            m;
    20.9 +    int            pirq;
   20.10      shared_info_t *s = d->shared_info;
   20.11  
   20.12 -    for ( i = 0; i < ARRAY_SIZE(d->pirq_mask); i++ )
   20.13 +    for ( pirq = find_first_bit(d->pirq_mask, NR_PIRQS);
   20.14 +          pirq < NR_PIRQS;
   20.15 +          pirq = find_next_bit(d->pirq_mask, NR_PIRQS, pirq+1) )
   20.16      {
   20.17 -        m = d->pirq_mask[i];
   20.18 -        while ( (j = ffs(m)) != 0 )
   20.19 -        {
   20.20 -            m &= ~(1 << --j);
   20.21 -            pirq = (i << 5) + j;
   20.22 -            desc = &irq_desc[pirq];
   20.23 -            spin_lock_irq(&desc->lock);
   20.24 -            if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
   20.25 -                 test_and_clear_bit(pirq, &d->pirq_mask) &&
   20.26 -                 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
   20.27 -                desc->handler->end(pirq);
   20.28 -            spin_unlock_irq(&desc->lock);
   20.29 -        }
   20.30 +        desc = &irq_desc[pirq];
   20.31 +        spin_lock_irq(&desc->lock);
   20.32 +        if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
   20.33 +             test_and_clear_bit(pirq, &d->pirq_mask) &&
   20.34 +             (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
   20.35 +            desc->handler->end(pirq);
   20.36 +        spin_unlock_irq(&desc->lock);
   20.37      }
   20.38  
   20.39      return 0;
    21.1 --- a/xen/arch/x86/io_apic.c	Mon Apr 10 16:28:52 2006 +0100
    21.2 +++ b/xen/arch/x86/io_apic.c	Mon Apr 10 16:36:03 2006 +0100
    21.3 @@ -75,6 +75,7 @@ int disable_timer_pin_1 __initdata;
    21.4  static struct irq_pin_list {
    21.5      int apic, pin, next;
    21.6  } irq_2_pin[PIN_MAP_SIZE];
    21.7 +static int irq_2_pin_free_entry = NR_IRQS;
    21.8  
    21.9  int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
   21.10  
   21.11 @@ -85,22 +86,59 @@ int vector_irq[NR_VECTORS] __read_mostly
   21.12   */
   21.13  static void add_pin_to_irq(unsigned int irq, int apic, int pin)
   21.14  {
   21.15 -    static int first_free_entry = NR_IRQS;
   21.16      struct irq_pin_list *entry = irq_2_pin + irq;
   21.17  
   21.18 -    while (entry->next)
   21.19 +    while (entry->next) {
   21.20 +        BUG_ON((entry->apic == apic) && (entry->pin == pin));
   21.21          entry = irq_2_pin + entry->next;
   21.22 +    }
   21.23 +
   21.24 +    BUG_ON((entry->apic == apic) && (entry->pin == pin));
   21.25  
   21.26      if (entry->pin != -1) {
   21.27 -        entry->next = first_free_entry;
   21.28 +        if (irq_2_pin_free_entry >= PIN_MAP_SIZE)
   21.29 +            panic("io_apic.c: whoops");
   21.30 +        entry->next = irq_2_pin_free_entry;
   21.31          entry = irq_2_pin + entry->next;
   21.32 -        if (++first_free_entry >= PIN_MAP_SIZE)
   21.33 -            panic("io_apic.c: whoops");
   21.34 +        irq_2_pin_free_entry = entry->next;
   21.35 +        entry->next = 0;
   21.36      }
   21.37      entry->apic = apic;
   21.38      entry->pin = pin;
   21.39  }
   21.40  
   21.41 +static void remove_pin_at_irq(unsigned int irq, int apic, int pin)
   21.42 +{
   21.43 +    struct irq_pin_list *entry, *prev;
   21.44 +
   21.45 +    for (entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next]) {
   21.46 +        if ((entry->apic == apic) && (entry->pin == pin))
   21.47 +            break;
   21.48 +        if (!entry->next)
   21.49 +            BUG();
   21.50 +    }
   21.51 +
   21.52 +    entry->pin = entry->apic = -1;
   21.53 +    
   21.54 +    if (entry != &irq_2_pin[irq]) {
   21.55 +        /* Removed entry is not at head of list. */
   21.56 +        prev = &irq_2_pin[irq];
   21.57 +        while (&irq_2_pin[prev->next] != entry)
   21.58 +            prev = &irq_2_pin[prev->next];
   21.59 +        prev->next = entry->next;
   21.60 +        entry->next = irq_2_pin_free_entry;
   21.61 +        irq_2_pin_free_entry = entry - irq_2_pin;
   21.62 +    } else if (entry->next != 0) {
   21.63 +        /* Removed entry is at head of multi-item list. */
   21.64 +        prev  = entry;
   21.65 +        entry = &irq_2_pin[entry->next];
   21.66 +        *prev = *entry;
   21.67 +        entry->pin = entry->apic = -1;
   21.68 +        entry->next = irq_2_pin_free_entry;
   21.69 +        irq_2_pin_free_entry = entry - irq_2_pin;
   21.70 +    }
   21.71 +}
   21.72 +
   21.73  /*
   21.74   * Reroute an IRQ to a different pin.
   21.75   */
   21.76 @@ -959,6 +997,10 @@ static void __init enable_IO_APIC(void)
   21.77          irq_2_pin[i].next = 0;
   21.78      }
   21.79  
   21.80 +    /* Initialise dynamic irq_2_pin free list. */
   21.81 +    for (i = NR_IRQS; i < PIN_MAP_SIZE; i++)
   21.82 +        irq_2_pin[i].next = i + 1;
   21.83 +
   21.84      /*
   21.85       * The number of IO-APIC IRQ registers (== #pins):
   21.86       */
   21.87 @@ -1854,11 +1896,17 @@ int ioapic_guest_read(unsigned long phys
   21.88      return 0;
   21.89  }
   21.90  
   21.91 +#define WARN_BOGUS_WRITE(f, a...)                                       \
   21.92 +    DPRINTK("\n%s: apic=%d, pin=%d, old_irq=%d, new_irq=%d\n"           \
   21.93 +            "%s: old_entry=%08x, new_entry=%08x\n"                      \
   21.94 +            "%s: " f, __FUNCTION__, apic, pin, old_irq, new_irq,        \
   21.95 +            __FUNCTION__, *(u32 *)&old_rte, *(u32 *)&new_rte,           \
   21.96 +            __FUNCTION__ , ##a )
   21.97 +
   21.98  int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
   21.99  {
  21.100 -    int apic, pin, irq;
  21.101 -    struct IO_APIC_route_entry rte = { 0 };
  21.102 -    struct irq_pin_list *entry;
  21.103 +    int apic, pin, old_irq = -1, new_irq = -1;
  21.104 +    struct IO_APIC_route_entry old_rte = { 0 }, new_rte = { 0 };
  21.105      unsigned long flags;
  21.106  
  21.107      if ( (apic = ioapic_physbase_to_id(physbase)) < 0 )
  21.108 @@ -1870,8 +1918,9 @@ int ioapic_guest_write(unsigned long phy
  21.109      
  21.110      pin = (reg - 0x10) >> 1;
  21.111  
  21.112 -    *(u32 *)&rte = val;
  21.113 -    rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
  21.114 +    /* Write first half from guest; second half is target info. */
  21.115 +    *(u32 *)&new_rte = val;
  21.116 +    new_rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
  21.117  
  21.118      /*
  21.119       * What about weird destination types?
  21.120 @@ -1881,7 +1930,7 @@ int ioapic_guest_write(unsigned long phy
  21.121       *  ExtINT: Ignore? Linux only asserts this at start of day.
  21.122       * For now, print a message and return an error. We can fix up on demand.
  21.123       */
  21.124 -    if ( rte.delivery_mode > dest_LowestPrio )
  21.125 +    if ( new_rte.delivery_mode > dest_LowestPrio )
  21.126      {
  21.127          printk("ERROR: Attempt to write weird IOAPIC destination mode!\n");
  21.128          printk("       APIC=%d/%d, lo-reg=%x\n", apic, pin, val);
  21.129 @@ -1892,36 +1941,69 @@ int ioapic_guest_write(unsigned long phy
  21.130       * The guest does not know physical APIC arrangement (flat vs. cluster).
  21.131       * Apply genapic conventions for this platform.
  21.132       */
  21.133 -    rte.delivery_mode = INT_DELIVERY_MODE;
  21.134 -    rte.dest_mode     = INT_DEST_MODE;
  21.135 -
  21.136 -    if ( rte.vector >= FIRST_DEVICE_VECTOR )
  21.137 -    {
  21.138 -        /* Is there a valid irq mapped to this vector? */
  21.139 -        irq = vector_irq[rte.vector];
  21.140 -        if ( !IO_APIC_IRQ(irq) )
  21.141 -            return 0;
  21.142 +    new_rte.delivery_mode = INT_DELIVERY_MODE;
  21.143 +    new_rte.dest_mode     = INT_DEST_MODE;
  21.144  
  21.145 -        /* Set the correct irq-handling type. */
  21.146 -        irq_desc[IO_APIC_VECTOR(irq)].handler = rte.trigger ? 
  21.147 -            &ioapic_level_type: &ioapic_edge_type;
  21.148 +    spin_lock_irqsave(&ioapic_lock, flags);
  21.149  
  21.150 -        /* Record the pin<->irq mapping. */
  21.151 -        for ( entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next] )
  21.152 -        {
  21.153 -            if ( (entry->apic == apic) && (entry->pin == pin) )
  21.154 -                break;
  21.155 -            if ( !entry->next )
  21.156 -            {
  21.157 -                add_pin_to_irq(irq, apic, pin);
  21.158 -                break;
  21.159 -            }
  21.160 -        }
  21.161 +    /* Read first (interesting) half of current routing entry. */
  21.162 +    *(u32 *)&old_rte = io_apic_read(apic, 0x10 + 2 * pin);
  21.163 +
  21.164 +    /* No change to the first half of the routing entry? Bail quietly. */
  21.165 +    if ( *(u32 *)&old_rte == *(u32 *)&new_rte )
  21.166 +    {
  21.167 +        spin_unlock_irqrestore(&ioapic_lock, flags);
  21.168 +        return 0;
  21.169      }
  21.170  
  21.171 -    spin_lock_irqsave(&ioapic_lock, flags);
  21.172 -    io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&rte) + 0));
  21.173 -    io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&rte) + 1));
  21.174 +    if ( old_rte.vector >= FIRST_DEVICE_VECTOR )
  21.175 +        old_irq = vector_irq[old_rte.vector];
  21.176 +    if ( new_rte.vector >= FIRST_DEVICE_VECTOR )
  21.177 +        new_irq = vector_irq[new_rte.vector];
  21.178 +
  21.179 +    if ( (old_irq != new_irq) && (old_irq != -1) && IO_APIC_IRQ(old_irq) )
  21.180 +    {
  21.181 +        if ( irq_desc[IO_APIC_VECTOR(old_irq)].action )
  21.182 +        {
  21.183 +            WARN_BOGUS_WRITE("Attempt to remove IO-APIC pin of in-use IRQ!\n");
  21.184 +            spin_unlock_irqrestore(&ioapic_lock, flags);
  21.185 +            return 0;
  21.186 +        }
  21.187 +
  21.188 +        remove_pin_at_irq(old_irq, apic, pin);
  21.189 +    }
  21.190 +
  21.191 +    if ( (new_irq != -1) && IO_APIC_IRQ(new_irq) )
  21.192 +    {
  21.193 +        if ( irq_desc[IO_APIC_VECTOR(new_irq)].action )
  21.194 +        {
  21.195 +            WARN_BOGUS_WRITE("Attempt to %s IO-APIC pin for in-use IRQ!\n",
  21.196 +                             (old_irq != new_irq) ? "add" : "modify");
  21.197 +            spin_unlock_irqrestore(&ioapic_lock, flags);
  21.198 +            return 0;
  21.199 +        }
  21.200 +        
  21.201 +        /* Set the correct irq-handling type. */
  21.202 +        irq_desc[IO_APIC_VECTOR(new_irq)].handler = new_rte.trigger ? 
  21.203 +            &ioapic_level_type: &ioapic_edge_type;
  21.204 +        
  21.205 +        if ( old_irq != new_irq )
  21.206 +            add_pin_to_irq(new_irq, apic, pin);
  21.207 +
  21.208 +        /* Mask iff level triggered. */
  21.209 +        new_rte.mask = new_rte.trigger;
  21.210 +    }
  21.211 +    else if ( !new_rte.mask )
  21.212 +    {
  21.213 +        /* This pin leads nowhere but the guest has not masked it. */
  21.214 +        WARN_BOGUS_WRITE("Installing bogus unmasked IO-APIC entry!\n");
  21.215 +        new_rte.mask = 1;
  21.216 +    }
  21.217 +
  21.218 +
  21.219 +    io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&new_rte) + 0));
  21.220 +    io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&new_rte) + 1));
  21.221 +
  21.222      spin_unlock_irqrestore(&ioapic_lock, flags);
  21.223  
  21.224      return 0;
    22.1 --- a/xen/arch/x86/irq.c	Mon Apr 10 16:28:52 2006 +0100
    22.2 +++ b/xen/arch/x86/irq.c	Mon Apr 10 16:36:03 2006 +0100
    22.3 @@ -171,26 +171,20 @@ static void __do_IRQ_guest(int vector)
    22.4  int pirq_guest_unmask(struct domain *d)
    22.5  {
    22.6      irq_desc_t    *desc;
    22.7 -    unsigned int   i, j, pirq;
    22.8 -    u32            m;
    22.9 +    unsigned int   pirq;
   22.10      shared_info_t *s = d->shared_info;
   22.11  
   22.12 -    for ( i = 0; i < ARRAY_SIZE(d->pirq_mask); i++ )
   22.13 +    for ( pirq = find_first_bit(d->pirq_mask, NR_PIRQS);
   22.14 +          pirq < NR_PIRQS;
   22.15 +          pirq = find_next_bit(d->pirq_mask, NR_PIRQS, pirq+1) )
   22.16      {
   22.17 -        m = d->pirq_mask[i];
   22.18 -        while ( m != 0 )
   22.19 -        {
   22.20 -            j = find_first_set_bit(m);
   22.21 -            m &= ~(1 << j);
   22.22 -            pirq = (i << 5) + j;
   22.23 -            desc = &irq_desc[irq_to_vector(pirq)];
   22.24 -            spin_lock_irq(&desc->lock);
   22.25 -            if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
   22.26 -                 test_and_clear_bit(pirq, &d->pirq_mask) &&
   22.27 -                 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
   22.28 -                desc->handler->end(irq_to_vector(pirq));
   22.29 -            spin_unlock_irq(&desc->lock);
   22.30 -        }
   22.31 +        desc = &irq_desc[irq_to_vector(pirq)];
   22.32 +        spin_lock_irq(&desc->lock);
   22.33 +        if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
   22.34 +             test_and_clear_bit(pirq, &d->pirq_mask) &&
   22.35 +             (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
   22.36 +            desc->handler->end(irq_to_vector(pirq));
   22.37 +        spin_unlock_irq(&desc->lock);
   22.38      }
   22.39  
   22.40      return 0;
    23.1 --- a/xen/common/sched_sedf.c	Mon Apr 10 16:28:52 2006 +0100
    23.2 +++ b/xen/common/sched_sedf.c	Mon Apr 10 16:36:03 2006 +0100
    23.3 @@ -15,35 +15,24 @@
    23.4  
    23.5  /*verbosity settings*/
    23.6  #define SEDFLEVEL 0
    23.7 -#define PRINT(_f, _a...)  \
    23.8 -    if ((_f)<=SEDFLEVEL) printk(_a );
    23.9 +#define PRINT(_f, _a...)                        \
   23.10 +    do {                                        \
   23.11 +        if ( (_f) <= SEDFLEVEL )                \
   23.12 +            printk(_a );                        \
   23.13 +    } while ( 0 )
   23.14  
   23.15  #ifndef NDEBUG
   23.16  #define SEDF_STATS
   23.17 -#define CHECK(_p) if ( !(_p) ) \
   23.18 - { printk("Check '%s' failed, line %d, file %s\n", #_p , __LINE__,\
   23.19 - __FILE__);}
   23.20 +#define CHECK(_p)                                           \
   23.21 +    do {                                                    \
   23.22 +        if ( !(_p) )                                        \
   23.23 +            printk("Check '%s' failed, line %d, file %s\n", \
   23.24 +                   #_p , __LINE__, __FILE__);               \
   23.25 +    } while ( 0 )
   23.26  #else
   23.27  #define CHECK(_p) ((void)0)
   23.28  #endif
   23.29  
   23.30 -/*various ways of unblocking domains*/
   23.31 -#define UNBLOCK_ISOCHRONOUS_EDF 1
   23.32 -#define UNBLOCK_EDF 2
   23.33 -#define UNBLOCK_ATROPOS 3
   23.34 -#define UNBLOCK_SHORT_RESUME 4
   23.35 -#define UNBLOCK_BURST 5
   23.36 -#define UNBLOCK_EXTRA_SUPPORT 6
   23.37 -#define UNBLOCK UNBLOCK_EXTRA_SUPPORT
   23.38 -
   23.39 -/*various ways of treating extra-time*/
   23.40 -#define EXTRA_OFF 1
   23.41 -#define EXTRA_ROUNDR 2
   23.42 -#define EXTRA_SLICE_WEIGHT 3
   23.43 -#define EXTRA_BLOCK_WEIGHT 4
   23.44 -
   23.45 -#define EXTRA EXTRA_BLOCK_WEIGHT
   23.46 -
   23.47  #define EXTRA_NONE (0)
   23.48  #define EXTRA_AWARE (1)
   23.49  #define EXTRA_RUN_PEN (2)
   23.50 @@ -68,8 +57,8 @@
   23.51  struct sedf_dom_info {
   23.52      struct domain  *domain;
   23.53  };
   23.54 -struct sedf_vcpu_info
   23.55 -{
   23.56 +
   23.57 +struct sedf_vcpu_info {
   23.58      struct vcpu *vcpu;
   23.59      struct list_head list;
   23.60      struct list_head extralist[2];
   23.61 @@ -85,10 +74,10 @@ struct sedf_vcpu_info
   23.62      s_time_t  latency;
   23.63   
   23.64      /*status of domain*/
   23.65 -    int   status;
   23.66 +    int       status;
   23.67      /*weights for "Scheduling for beginners/ lazy/ etc." ;)*/
   23.68 -    short   weight;
   23.69 -    short                   extraweight;
   23.70 +    short     weight;
   23.71 +    short     extraweight;
   23.72      /*Bookkeeping*/
   23.73      s_time_t  deadl_abs;
   23.74      s_time_t  sched_start_abs;
   23.75 @@ -123,28 +112,29 @@ struct sedf_cpu_info {
   23.76      s_time_t         current_slice_expires;
   23.77  };
   23.78  
   23.79 -#define EDOM_INFO(d)  ((struct sedf_vcpu_info *)((d)->sched_priv))
   23.80 -#define CPU_INFO(cpu) ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
   23.81 -#define LIST(d)   (&EDOM_INFO(d)->list)
   23.82 -#define EXTRALIST(d,i)  (&(EDOM_INFO(d)->extralist[i]))
   23.83 -#define RUNQ(cpu)     (&CPU_INFO(cpu)->runnableq)
   23.84 +#define EDOM_INFO(d)   ((struct sedf_vcpu_info *)((d)->sched_priv))
   23.85 +#define CPU_INFO(cpu)  ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
   23.86 +#define LIST(d)        (&EDOM_INFO(d)->list)
   23.87 +#define EXTRALIST(d,i) (&(EDOM_INFO(d)->extralist[i]))
   23.88 +#define RUNQ(cpu)      (&CPU_INFO(cpu)->runnableq)
   23.89  #define WAITQ(cpu)     (&CPU_INFO(cpu)->waitq)
   23.90 -#define EXTRAQ(cpu,i)    (&(CPU_INFO(cpu)->extraq[i]))
   23.91 +#define EXTRAQ(cpu,i)  (&(CPU_INFO(cpu)->extraq[i]))
   23.92  #define IDLETASK(cpu)  ((struct vcpu *)schedule_data[cpu].idle)
   23.93  
   23.94  #define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period)
   23.95  
   23.96 -#define MIN(x,y) (((x)<(y))?(x):(y))
   23.97 +#define MIN(x,y)    (((x)<(y))?(x):(y))
   23.98  #define DIV_UP(x,y) (((x) + (y) - 1) / y)
   23.99  
  23.100 -#define extra_runs(inf) ((inf->status) & 6)
  23.101 +#define extra_runs(inf)      ((inf->status) & 6)
  23.102  #define extra_get_cur_q(inf) (((inf->status & 6) >> 1)-1)
  23.103 -#define sedf_runnable(edom) (!(EDOM_INFO(edom)->status & SEDF_ASLEEP))
  23.104 +#define sedf_runnable(edom)  (!(EDOM_INFO(edom)->status & SEDF_ASLEEP))
  23.105  
  23.106  
  23.107  static void sedf_dump_cpu_state(int i);
  23.108  
  23.109 -static inline int extraq_on(struct vcpu *d, int i) {
  23.110 +static inline int extraq_on(struct vcpu *d, int i)
  23.111 +{
  23.112      return ((EXTRALIST(d,i)->next != NULL) &&
  23.113              (EXTRALIST(d,i)->next != EXTRALIST(d,i)));
  23.114  }
  23.115 @@ -165,8 +155,8 @@ static inline void extraq_del(struct vcp
  23.116  {
  23.117      struct list_head *list = EXTRALIST(d,i);
  23.118      ASSERT(extraq_on(d,i));
  23.119 -    PRINT(3, "Removing domain %i.%i from L%i extraq\n", d->domain->domain_id,
  23.120 -          d->vcpu_id, i); 
  23.121 +    PRINT(3, "Removing domain %i.%i from L%i extraq\n",
  23.122 +          d->domain->domain_id, d->vcpu_id, i); 
  23.123      list_del(list);
  23.124      list->next = NULL;
  23.125      ASSERT(!extraq_on(d, i));
  23.126 @@ -178,94 +168,96 @@ static inline void extraq_del(struct vcp
  23.127     each entry, in order to avoid overflow. The algorithm works by simply
  23.128     charging each domain that recieved extratime with an inverse of its weight.
  23.129   */ 
  23.130 -static inline void extraq_add_sort_update(struct vcpu *d, int i, int sub) {
  23.131 +static inline void extraq_add_sort_update(struct vcpu *d, int i, int sub)
  23.132 +{
  23.133      struct list_head      *cur;
  23.134      struct sedf_vcpu_info *curinf;
  23.135   
  23.136      ASSERT(!extraq_on(d,i));
  23.137 +
  23.138      PRINT(3, "Adding domain %i.%i (score= %i, short_pen= %"PRIi64")"
  23.139            " to L%i extraq\n",
  23.140            d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->score[i],
  23.141            EDOM_INFO(d)->short_block_lost_tot, i); 
  23.142 -    /*iterate through all elements to find our "hole" and on our way
  23.143 -      update all the other scores*/
  23.144 -    list_for_each(cur,EXTRAQ(d->processor,i)){
  23.145 +
  23.146 +    /*
  23.147 +     * Iterate through all elements to find our "hole" and on our way
  23.148 +     * update all the other scores.
  23.149 +     */
  23.150 +    list_for_each ( cur, EXTRAQ(d->processor, i) )
  23.151 +    {
  23.152          curinf = list_entry(cur,struct sedf_vcpu_info,extralist[i]);
  23.153          curinf->score[i] -= sub;
  23.154 -        if (EDOM_INFO(d)->score[i] < curinf->score[i])
  23.155 +        if ( EDOM_INFO(d)->score[i] < curinf->score[i] )
  23.156              break;
  23.157 -        else
  23.158 -            PRINT(4,"\tbehind domain %i.%i (score= %i)\n",
  23.159 -                  curinf->vcpu->domain->domain_id,
  23.160 -                  curinf->vcpu->vcpu_id, curinf->score[i]);
  23.161 +        PRINT(4,"\tbehind domain %i.%i (score= %i)\n",
  23.162 +              curinf->vcpu->domain->domain_id,
  23.163 +              curinf->vcpu->vcpu_id, curinf->score[i]);
  23.164      }
  23.165 -    /*cur now contains the element, before which we'll enqueue*/
  23.166 +
  23.167 +    /* cur now contains the element, before which we'll enqueue. */
  23.168      PRINT(3, "\tlist_add to %p\n", cur->prev);
  23.169      list_add(EXTRALIST(d,i),cur->prev);
  23.170   
  23.171 -    /*continue updating the extraq*/
  23.172 -    if ((cur != EXTRAQ(d->processor,i)) && sub)
  23.173 -        for (cur = cur->next; cur != EXTRAQ(d->processor,i);
  23.174 -             cur = cur-> next) {
  23.175 -            curinf = list_entry(cur,struct sedf_vcpu_info,
  23.176 -                                extralist[i]);
  23.177 +    /* Continue updating the extraq. */
  23.178 +    if ( (cur != EXTRAQ(d->processor,i)) && sub )
  23.179 +    {
  23.180 +        for ( cur = cur->next; cur != EXTRAQ(d->processor,i); cur = cur->next )
  23.181 +        {
  23.182 +            curinf = list_entry(cur,struct sedf_vcpu_info, extralist[i]);
  23.183              curinf->score[i] -= sub;
  23.184              PRINT(4, "\tupdating domain %i.%i (score= %u)\n",
  23.185                    curinf->vcpu->domain->domain_id, 
  23.186                    curinf->vcpu->vcpu_id, curinf->score[i]);
  23.187          }
  23.188 +    }
  23.189 +
  23.190      ASSERT(extraq_on(d,i));
  23.191  }
  23.192 -static inline void extraq_check(struct vcpu *d) {
  23.193 -    if (extraq_on(d, EXTRA_UTIL_Q)) {
  23.194 -        PRINT(2,"Dom %i.%i is on L1 extraQ\n",d->domain->domain_id, d->vcpu_id);
  23.195 -        if (!(EDOM_INFO(d)->status & EXTRA_AWARE) &&
  23.196 -            !extra_runs(EDOM_INFO(d))) {
  23.197 +static inline void extraq_check(struct vcpu *d)
  23.198 +{
  23.199 +    if ( extraq_on(d, EXTRA_UTIL_Q) )
  23.200 +    {
  23.201 +        PRINT(2,"Dom %i.%i is on L1 extraQ\n",
  23.202 +              d->domain->domain_id, d->vcpu_id);
  23.203 +
  23.204 +        if ( !(EDOM_INFO(d)->status & EXTRA_AWARE) &&
  23.205 +             !extra_runs(EDOM_INFO(d)) )
  23.206 +        {
  23.207              extraq_del(d, EXTRA_UTIL_Q);
  23.208              PRINT(2,"Removed dom %i.%i from L1 extraQ\n",
  23.209                    d->domain->domain_id, d->vcpu_id);
  23.210          }
  23.211 -    } else {
  23.212 -        PRINT(2,"Dom %i.%i is NOT on L1 extraQ\n",d->domain->domain_id,
  23.213 +    }
  23.214 +    else
  23.215 +    {
  23.216 +        PRINT(2, "Dom %i.%i is NOT on L1 extraQ\n",
  23.217 +              d->domain->domain_id,
  23.218                d->vcpu_id);
  23.219 -        if ((EDOM_INFO(d)->status & EXTRA_AWARE) && sedf_runnable(d))
  23.220 +
  23.221 +        if ( (EDOM_INFO(d)->status & EXTRA_AWARE) && sedf_runnable(d) )
  23.222          {
  23.223 -#if (EXTRA == EXTRA_ROUNDR)
  23.224 -            extraq_add_tail(d, EXTRA_UTIL_Q);
  23.225 -#elif (EXTRA == EXTRA_SLICE_WEIGHT || \
  23.226 -          EXTRA == EXTRA_BLOCK_WEIGHT)
  23.227              extraq_add_sort_update(d, EXTRA_UTIL_Q, 0);
  23.228 -#elif
  23.229 -            ;
  23.230 -#endif
  23.231 -            PRINT(2,"Added dom %i.%i to L1 extraQ\n",d->domain->domain_id,
  23.232 -                  d->vcpu_id);
  23.233 +            PRINT(2,"Added dom %i.%i to L1 extraQ\n",
  23.234 +                  d->domain->domain_id, d->vcpu_id);
  23.235          }
  23.236      }
  23.237  }
  23.238  
  23.239 -static inline void extraq_check_add_unblocked(struct vcpu *d, 
  23.240 -                                              int priority) {
  23.241 +static inline void extraq_check_add_unblocked(struct vcpu *d, int priority)
  23.242 +{
  23.243      struct sedf_vcpu_info *inf = EDOM_INFO(d);
  23.244 -    if (inf->status & EXTRA_AWARE) 
  23.245 -#if (EXTRA == EXTRA_ROUNDR)
  23.246 -        if (priority)
  23.247 -            extraq_add_head(d,EXTRA_UTIL_Q);
  23.248 -        else
  23.249 -            extraq_add_tail(d,EXTRA_UTIL_Q);
  23.250 -#elif (EXTRA == EXTRA_SLICE_WEIGHT \
  23.251 -     || EXTRA == EXTRA_BLOCK_WEIGHT)
  23.252 -    /*put in on the weighted extraq, 
  23.253 -    without updating any scores*/
  23.254 -    extraq_add_sort_update(d, EXTRA_UTIL_Q, 0);
  23.255 -#else
  23.256 -    ;
  23.257 -#endif
  23.258 +
  23.259 +    if ( inf->status & EXTRA_AWARE )
  23.260 +        /* Put on the weighted extraq without updating any scores. */
  23.261 +        extraq_add_sort_update(d, EXTRA_UTIL_Q, 0);
  23.262  }
  23.263  
  23.264 -static inline int __task_on_queue(struct vcpu *d) {
  23.265 +static inline int __task_on_queue(struct vcpu *d)
  23.266 +{
  23.267      return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d)));
  23.268  }
  23.269 +
  23.270  static inline void __del_from_queue(struct vcpu *d)
  23.271  {
  23.272      struct list_head *list = LIST(d);
  23.273 @@ -279,42 +271,47 @@ static inline void __del_from_queue(stru
  23.274  
  23.275  typedef int(*list_comparer)(struct list_head* el1, struct list_head* el2);
  23.276  
  23.277 -static inline void list_insert_sort(struct list_head *list,
  23.278 -                                    struct list_head *element, list_comparer comp) {
  23.279 +static inline void list_insert_sort(
  23.280 +    struct list_head *list, struct list_head *element, list_comparer comp)
  23.281 +{
  23.282      struct list_head     *cur;
  23.283 -    /*iterate through all elements to find our "hole"*/
  23.284 -    list_for_each(cur,list){
  23.285 -        if (comp(element, cur) < 0)
  23.286 +
  23.287 +    /* Iterate through all elements to find our "hole". */
  23.288 +    list_for_each( cur, list )
  23.289 +        if ( comp(element, cur) < 0 )
  23.290              break;
  23.291 -    }
  23.292 -    /*cur now contains the element, before which we'll enqueue*/
  23.293 +
  23.294 +    /* cur now contains the element, before which we'll enqueue. */
  23.295      PRINT(3,"\tlist_add to %p\n",cur->prev);
  23.296      list_add(element, cur->prev);
  23.297 -}  
  23.298 +}
  23.299 +
  23.300  #define DOMAIN_COMPARER(name, field, comp1, comp2)          \
  23.301  int name##_comp(struct list_head* el1, struct list_head* el2) \
  23.302  {                                                           \
  23.303 - struct sedf_vcpu_info *d1, *d2;                     \
  23.304 - d1 = list_entry(el1,struct sedf_vcpu_info, field);  \
  23.305 - d2 = list_entry(el2,struct sedf_vcpu_info, field);  \
  23.306 - if ((comp1) == (comp2))                             \
  23.307 -  return 0;                                   \
  23.308 - if ((comp1) < (comp2))                              \
  23.309 -  return -1;                                  \
  23.310 - else                                                \
  23.311 -  return 1;                                   \
  23.312 +    struct sedf_vcpu_info *d1, *d2;                     \
  23.313 +    d1 = list_entry(el1,struct sedf_vcpu_info, field);  \
  23.314 +    d2 = list_entry(el2,struct sedf_vcpu_info, field);  \
  23.315 +    if ( (comp1) == (comp2) )                             \
  23.316 +        return 0;                                   \
  23.317 +    if ( (comp1) < (comp2) )                              \
  23.318 +        return -1;                                  \
  23.319 +    else                                                \
  23.320 +        return 1;                                   \
  23.321  }
  23.322 +
  23.323  /* adds a domain to the queue of processes which wait for the beginning of the
  23.324     next period; this list is therefore sortet by this time, which is simply
  23.325     absol. deadline - period
  23.326   */ 
  23.327 -DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2))
  23.328 -    static inline void __add_to_waitqueue_sort(struct vcpu *d) {
  23.329 -    ASSERT(!__task_on_queue(d));
  23.330 +DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2));
  23.331 +static inline void __add_to_waitqueue_sort(struct vcpu *v)
  23.332 +{
  23.333 +    ASSERT(!__task_on_queue(v));
  23.334      PRINT(3,"Adding domain %i.%i (bop= %"PRIu64") to waitq\n",
  23.335 -          d->domain->domain_id, d->vcpu_id, PERIOD_BEGIN(EDOM_INFO(d)));
  23.336 -    list_insert_sort(WAITQ(d->processor), LIST(d), waitq_comp);
  23.337 -    ASSERT(__task_on_queue(d));
  23.338 +          v->domain->domain_id, v->vcpu_id, PERIOD_BEGIN(EDOM_INFO(v)));
  23.339 +    list_insert_sort(WAITQ(v->processor), LIST(v), waitq_comp);
  23.340 +    ASSERT(__task_on_queue(v));
  23.341  }
  23.342  
  23.343  /* adds a domain to the queue of processes which have started their current
  23.344 @@ -322,60 +319,62 @@ DOMAIN_COMPARER(waitq, list, PERIOD_BEGI
  23.345     on this list is running on the processor, if the list is empty the idle
  23.346     task will run. As we are implementing EDF, this list is sorted by deadlines.
  23.347   */ 
  23.348 -DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs)
  23.349 -    static inline void __add_to_runqueue_sort(struct vcpu *d) {
  23.350 +DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs);
  23.351 +static inline void __add_to_runqueue_sort(struct vcpu *v)
  23.352 +{
  23.353      PRINT(3,"Adding domain %i.%i (deadl= %"PRIu64") to runq\n",
  23.354 -          d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->deadl_abs);
  23.355 -    list_insert_sort(RUNQ(d->processor), LIST(d), runq_comp);
  23.356 +          v->domain->domain_id, v->vcpu_id, EDOM_INFO(v)->deadl_abs);
  23.357 +    list_insert_sort(RUNQ(v->processor), LIST(v), runq_comp);
  23.358  }
  23.359  
  23.360  
  23.361  /* Allocates memory for per domain private scheduling data*/
  23.362 -static int sedf_alloc_task(struct vcpu *d)
  23.363 +static int sedf_alloc_task(struct vcpu *v)
  23.364  {
  23.365      PRINT(2, "sedf_alloc_task was called, domain-id %i.%i\n",
  23.366 -          d->domain->domain_id, d->vcpu_id);
  23.367 +          v->domain->domain_id, v->vcpu_id);
  23.368  
  23.369 -    if ( d->domain->sched_priv == NULL )
  23.370 +    if ( v->domain->sched_priv == NULL )
  23.371      {
  23.372 -        d->domain->sched_priv = xmalloc(struct sedf_dom_info);
  23.373 -        if ( d->domain->sched_priv == NULL )
  23.374 +        v->domain->sched_priv = xmalloc(struct sedf_dom_info);
  23.375 +        if ( v->domain->sched_priv == NULL )
  23.376              return -1;
  23.377 -        memset(d->domain->sched_priv, 0, sizeof(struct sedf_dom_info));
  23.378 +        memset(v->domain->sched_priv, 0, sizeof(struct sedf_dom_info));
  23.379      }
  23.380  
  23.381 -    if ( (d->sched_priv = xmalloc(struct sedf_vcpu_info)) == NULL )
  23.382 +    if ( (v->sched_priv = xmalloc(struct sedf_vcpu_info)) == NULL )
  23.383          return -1;
  23.384  
  23.385 -    memset(d->sched_priv, 0, sizeof(struct sedf_vcpu_info));
  23.386 +    memset(v->sched_priv, 0, sizeof(struct sedf_vcpu_info));
  23.387  
  23.388      return 0;
  23.389  }
  23.390  
  23.391  
  23.392  /* Setup the sedf_dom_info */
  23.393 -static void sedf_add_task(struct vcpu *d)
  23.394 +static void sedf_add_task(struct vcpu *v)
  23.395  {
  23.396 -    struct sedf_vcpu_info *inf = EDOM_INFO(d);
  23.397 -    inf->vcpu = d;
  23.398 +    struct sedf_vcpu_info *inf = EDOM_INFO(v);
  23.399 +
  23.400 +    inf->vcpu = v;
  23.401   
  23.402 -    PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",d->domain->domain_id,
  23.403 -          d->vcpu_id);
  23.404 +    PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",
  23.405 +          v->domain->domain_id, v->vcpu_id);
  23.406  
  23.407      /* Allocate per-CPU context if this is the first domain to be added. */
  23.408 -    if ( unlikely(schedule_data[d->processor].sched_priv == NULL) )
  23.409 +    if ( unlikely(schedule_data[v->processor].sched_priv == NULL) )
  23.410      {
  23.411 -        schedule_data[d->processor].sched_priv = 
  23.412 +        schedule_data[v->processor].sched_priv = 
  23.413              xmalloc(struct sedf_cpu_info);
  23.414 -        BUG_ON(schedule_data[d->processor].sched_priv == NULL);
  23.415 -        memset(CPU_INFO(d->processor), 0, sizeof(*CPU_INFO(d->processor)));
  23.416 -        INIT_LIST_HEAD(WAITQ(d->processor));
  23.417 -        INIT_LIST_HEAD(RUNQ(d->processor));
  23.418 -        INIT_LIST_HEAD(EXTRAQ(d->processor,EXTRA_PEN_Q));
  23.419 -        INIT_LIST_HEAD(EXTRAQ(d->processor,EXTRA_UTIL_Q));
  23.420 +        BUG_ON(schedule_data[v->processor].sched_priv == NULL);
  23.421 +        memset(CPU_INFO(v->processor), 0, sizeof(*CPU_INFO(v->processor)));
  23.422 +        INIT_LIST_HEAD(WAITQ(v->processor));
  23.423 +        INIT_LIST_HEAD(RUNQ(v->processor));
  23.424 +        INIT_LIST_HEAD(EXTRAQ(v->processor,EXTRA_PEN_Q));
  23.425 +        INIT_LIST_HEAD(EXTRAQ(v->processor,EXTRA_UTIL_Q));
  23.426      }
  23.427         
  23.428 -    if ( d->domain->domain_id == 0 )
  23.429 +    if ( v->domain->domain_id == 0 )
  23.430      {
  23.431          /*set dom0 to something useful to boot the machine*/
  23.432          inf->period    = MILLISECS(20);
  23.433 @@ -400,14 +399,14 @@ static void sedf_add_task(struct vcpu *d
  23.434      INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q]));
  23.435      INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q]));
  23.436   
  23.437 -    if ( !is_idle_vcpu(d) )
  23.438 +    if ( !is_idle_vcpu(v) )
  23.439      {
  23.440 -        extraq_check(d);
  23.441 +        extraq_check(v);
  23.442      }
  23.443      else
  23.444      {
  23.445 -        EDOM_INFO(d)->deadl_abs = 0;
  23.446 -        EDOM_INFO(d)->status &= ~SEDF_ASLEEP;
  23.447 +        EDOM_INFO(v)->deadl_abs = 0;
  23.448 +        EDOM_INFO(v)->status &= ~SEDF_ASLEEP;
  23.449      }
  23.450  }
  23.451  
  23.452 @@ -418,17 +417,11 @@ static void sedf_free_task(struct domain
  23.453  
  23.454      PRINT(2,"sedf_free_task was called, domain-id %i\n",d->domain_id);
  23.455  
  23.456 -    ASSERT(d->sched_priv != NULL);
  23.457      xfree(d->sched_priv);
  23.458   
  23.459      for ( i = 0; i < MAX_VIRT_CPUS; i++ )
  23.460 -    {
  23.461          if ( d->vcpu[i] )
  23.462 -        {
  23.463 -            ASSERT(d->vcpu[i]->sched_priv != NULL);
  23.464              xfree(d->vcpu[i]->sched_priv);
  23.465 -        }
  23.466 -    }
  23.467  }
  23.468  
  23.469  /*
  23.470 @@ -438,64 +431,60 @@ static void sedf_free_task(struct domain
  23.471  static void desched_edf_dom(s_time_t now, struct vcpu* d)
  23.472  {
  23.473      struct sedf_vcpu_info* inf = EDOM_INFO(d);
  23.474 -    /*current domain is running in real time mode*/
  23.475 - 
  23.476 +
  23.477 +    /* Current domain is running in real time mode. */
  23.478      ASSERT(__task_on_queue(d));
  23.479 -    /*update the domains cputime*/
  23.480 +
  23.481 +    /* Update the domain's cputime. */
  23.482      inf->cputime += now - inf->sched_start_abs;
  23.483  
  23.484 -    /*scheduling decisions, which don't remove the running domain
  23.485 -      from the runq*/
  23.486 +    /*
  23.487 +     * Scheduling decisions which don't remove the running domain from the
  23.488 +     * runq. 
  23.489 +     */
  23.490      if ( (inf->cputime < inf->slice) && sedf_runnable(d) )
  23.491          return;
  23.492    
  23.493      __del_from_queue(d);
  23.494    
  23.495 -    /*manage bookkeeping (i.e. calculate next deadline,
  23.496 -      memorize overun-time of slice) of finished domains*/
  23.497 +    /*
  23.498 +     * Manage bookkeeping (i.e. calculate next deadline, memorise
  23.499 +     * overrun-time of slice) of finished domains.
  23.500 +     */
  23.501      if ( inf->cputime >= inf->slice )
  23.502      {
  23.503          inf->cputime -= inf->slice;
  23.504    
  23.505          if ( inf->period < inf->period_orig )
  23.506          {
  23.507 -            /*this domain runs in latency scaling or burst mode*/
  23.508 -#if (UNBLOCK == UNBLOCK_BURST)
  23.509 -            /*if we are runnig in burst scaling wait for two periods
  23.510 -              before scaling periods up again*/ 
  23.511 -            if ( (now - inf->unblock_abs) >= (2 * inf->period) )
  23.512 -#endif
  23.513 +            /* This domain runs in latency scaling or burst mode. */
  23.514 +            inf->period *= 2;
  23.515 +            inf->slice  *= 2;
  23.516 +            if ( (inf->period > inf->period_orig) ||
  23.517 +                 (inf->slice > inf->slice_orig) )
  23.518              {
  23.519 -                inf->period *= 2; inf->slice *= 2;
  23.520 -                if ( (inf->period > inf->period_orig) ||
  23.521 -                     (inf->slice > inf->slice_orig) )
  23.522 -                {
  23.523 -                    /*reset slice & period*/
  23.524 -                    inf->period = inf->period_orig;
  23.525 -                    inf->slice = inf->slice_orig;
  23.526 -                }
  23.527 +                /* Reset slice and period. */
  23.528 +                inf->period = inf->period_orig;
  23.529 +                inf->slice = inf->slice_orig;
  23.530              }
  23.531          }
  23.532 -        /*set next deadline*/
  23.533 +
  23.534 +        /* Set next deadline. */
  23.535          inf->deadl_abs += inf->period;
  23.536      }
  23.537   
  23.538 -    /*add a runnable domain to the waitqueue*/
  23.539 +    /* Add a runnable domain to the waitqueue. */
  23.540      if ( sedf_runnable(d) )
  23.541      {
  23.542          __add_to_waitqueue_sort(d);
  23.543      }
  23.544      else
  23.545      {
  23.546 -        /*we have a blocked realtime task -> remove it from exqs too*/
  23.547 -#if (EXTRA > EXTRA_OFF)
  23.548 -#if (EXTRA == EXTRA_BLOCK_WEIGHT)
  23.549 +        /* We have a blocked realtime task -> remove it from exqs too. */
  23.550          if ( extraq_on(d, EXTRA_PEN_Q) )
  23.551              extraq_del(d, EXTRA_PEN_Q);
  23.552 -#endif
  23.553          if ( extraq_on(d, EXTRA_UTIL_Q) )
  23.554              extraq_del(d, EXTRA_UTIL_Q);
  23.555 -#endif
  23.556      }
  23.557  
  23.558      ASSERT(EQ(sedf_runnable(d), __task_on_queue(d)));
  23.559 @@ -513,58 +502,57 @@ static void update_queues(
  23.560   
  23.561      PRINT(3,"Updating waitq..\n");
  23.562  
  23.563 -    /*check for the first elements of the waitqueue, whether their
  23.564 -      next period has already started*/
  23.565 -    list_for_each_safe(cur, tmp, waitq) {
  23.566 +    /*
  23.567 +     * Check for the first elements of the waitqueue, whether their
  23.568 +     * next period has already started.
  23.569 +     */
  23.570 +    list_for_each_safe ( cur, tmp, waitq )
  23.571 +    {
  23.572          curinf = list_entry(cur, struct sedf_vcpu_info, list);
  23.573          PRINT(4,"\tLooking @ dom %i.%i\n",
  23.574                curinf->vcpu->domain->domain_id, curinf->vcpu->vcpu_id);
  23.575 -        if ( PERIOD_BEGIN(curinf) <= now )
  23.576 -        {
  23.577 -            __del_from_queue(curinf->vcpu);
  23.578 -            __add_to_runqueue_sort(curinf->vcpu);
  23.579 -        }
  23.580 -        else
  23.581 +        if ( PERIOD_BEGIN(curinf) > now )
  23.582              break;
  23.583 +        __del_from_queue(curinf->vcpu);
  23.584 +        __add_to_runqueue_sort(curinf->vcpu);
  23.585      }
  23.586   
  23.587      PRINT(3,"Updating runq..\n");
  23.588  
  23.589 -    /*process the runq, find domains that are on
  23.590 -      the runqueue which shouldn't be there*/
  23.591 -    list_for_each_safe(cur, tmp, runq) {
  23.592 +    /* Process the runq, find domains that are on the runq that shouldn't. */
  23.593 +    list_for_each_safe ( cur, tmp, runq )
  23.594 +    {
  23.595          curinf = list_entry(cur,struct sedf_vcpu_info,list);
  23.596          PRINT(4,"\tLooking @ dom %i.%i\n",
  23.597                curinf->vcpu->domain->domain_id, curinf->vcpu->vcpu_id);
  23.598  
  23.599          if ( unlikely(curinf->slice == 0) )
  23.600          {
  23.601 -            /*ignore domains with empty slice*/
  23.602 +            /* Ignore domains with empty slice. */
  23.603              PRINT(4,"\tUpdating zero-slice domain %i.%i\n",
  23.604                    curinf->vcpu->domain->domain_id,
  23.605                    curinf->vcpu->vcpu_id);
  23.606              __del_from_queue(curinf->vcpu);
  23.607  
  23.608 -            /*move them to their next period*/
  23.609 +            /* Move them to their next period. */
  23.610              curinf->deadl_abs += curinf->period;
  23.611 -            /*ensure that the start of the next period is in the future*/
  23.612 +
  23.613 +            /* Ensure that the start of the next period is in the future. */
  23.614              if ( unlikely(PERIOD_BEGIN(curinf) < now) )
  23.615 -            {
  23.616                  curinf->deadl_abs += 
  23.617                      (DIV_UP(now - PERIOD_BEGIN(curinf),
  23.618 -                           curinf->period)) * curinf->period;
  23.619 -            }
  23.620 -            /*and put them back into the queue*/
  23.621 +                            curinf->period)) * curinf->period;
  23.622 +
  23.623 +            /* Put them back into the queue. */
  23.624              __add_to_waitqueue_sort(curinf->vcpu);
  23.625 -            continue;
  23.626          }
  23.627 -
  23.628 -        if ( unlikely((curinf->deadl_abs < now) ||
  23.629 -                      (curinf->cputime > curinf->slice)) )
  23.630 +        else if ( unlikely((curinf->deadl_abs < now) ||
  23.631 +                           (curinf->cputime > curinf->slice)) )
  23.632          {
  23.633 -            /*we missed the deadline or the slice was
  23.634 -              already finished... might hapen because
  23.635 -              of dom_adj.*/
  23.636 +            /*
  23.637 +             * We missed the deadline or the slice was already finished.
  23.638 +             * Might hapen because of dom_adj.
  23.639 +             */
  23.640              PRINT(4,"\tDomain %i.%i exceeded it's deadline/"
  23.641                    "slice (%"PRIu64" / %"PRIu64") now: %"PRIu64
  23.642                    " cputime: %"PRIu64"\n",
  23.643 @@ -573,20 +561,23 @@ static void update_queues(
  23.644                    curinf->deadl_abs, curinf->slice, now,
  23.645                    curinf->cputime);
  23.646              __del_from_queue(curinf->vcpu);
  23.647 -            /*common case: we miss one period!*/
  23.648 +
  23.649 +            /* Common case: we miss one period. */
  23.650              curinf->deadl_abs += curinf->period;
  23.651     
  23.652 -            /*if we are still behind: modulo arithmetic,
  23.653 -              force deadline to be in future and
  23.654 -              aligned to period borders!*/
  23.655 -            if (unlikely(curinf->deadl_abs < now))
  23.656 +            /*
  23.657 +             * If we are still behind: modulo arithmetic, force deadline
  23.658 +             * to be in future and aligned to period borders.
  23.659 +             */
  23.660 +            if ( unlikely(curinf->deadl_abs < now) )
  23.661                  curinf->deadl_abs += 
  23.662                      DIV_UP(now - curinf->deadl_abs,
  23.663                             curinf->period) * curinf->period;
  23.664              ASSERT(curinf->deadl_abs >= now);
  23.665 -            /*give a fresh slice*/
  23.666 +
  23.667 +            /* Give a fresh slice. */
  23.668              curinf->cputime = 0;
  23.669 -            if (PERIOD_BEGIN(curinf) > now)
  23.670 +            if ( PERIOD_BEGIN(curinf) > now )
  23.671                  __add_to_waitqueue_sort(curinf->vcpu);
  23.672              else
  23.673                  __add_to_runqueue_sort(curinf->vcpu);
  23.674 @@ -594,43 +585,36 @@ static void update_queues(
  23.675          else
  23.676              break;
  23.677      }
  23.678 +
  23.679      PRINT(3,"done updating the queues\n");
  23.680  }
  23.681  
  23.682  
  23.683 -#if (EXTRA > EXTRA_OFF)
  23.684  /* removes a domain from the head of the according extraQ and
  23.685     requeues it at a specified position:
  23.686       round-robin extratime: end of extraQ
  23.687       weighted ext.: insert in sorted list by score
  23.688     if the domain is blocked / has regained its short-block-loss
  23.689     time it is not put on any queue */
  23.690 -static void desched_extra_dom(s_time_t now, struct vcpu* d)
  23.691 +static void desched_extra_dom(s_time_t now, struct vcpu *d)
  23.692  {
  23.693      struct sedf_vcpu_info *inf = EDOM_INFO(d);
  23.694      int i = extra_get_cur_q(inf);
  23.695 - 
  23.696 -#if (EXTRA == EXTRA_SLICE_WEIGHT || EXTRA == EXTRA_BLOCK_WEIGHT)
  23.697 -    unsigned long         oldscore;
  23.698 -#endif
  23.699 +    unsigned long oldscore;
  23.700 +
  23.701      ASSERT(extraq_on(d, i));
  23.702 -    /*unset all running flags*/
  23.703 +
  23.704 +    /* Unset all running flags. */
  23.705      inf->status  &= ~(EXTRA_RUN_PEN | EXTRA_RUN_UTIL);
  23.706 -    /*fresh slice for the next run*/
  23.707 +    /* Fresh slice for the next run. */
  23.708      inf->cputime = 0;
  23.709 -    /*accumulate total extratime*/
  23.710 +    /* Accumulate total extratime. */
  23.711      inf->extra_time_tot += now - inf->sched_start_abs;
  23.712 -    /*remove extradomain from head of the queue*/
  23.713 +    /* Remove extradomain from head of the queue. */
  23.714      extraq_del(d, i);
  23.715  
  23.716 -#if (EXTRA == EXTRA_ROUNDR)
  23.717 -    if ( sedf_runnable(d) && (inf->status & EXTRA_AWARE) )
  23.718 -        /*add to the tail if it is runnable => round-robin*/
  23.719 -        extraq_add_tail(d, EXTRA_UTIL_Q);
  23.720 -#elif (EXTRA == EXTRA_SLICE_WEIGHT || EXTRA == EXTRA_BLOCK_WEIGHT)
  23.721 -    /*update the score*/
  23.722 +    /* Update the score. */
  23.723      oldscore = inf->score[i];
  23.724 -#if (EXTRA == EXTRA_BLOCK_WEIGHT)
  23.725      if ( i == EXTRA_PEN_Q )
  23.726      {
  23.727          /*domain was running in L0 extraq*/
  23.728 @@ -640,7 +624,8 @@ static void desched_extra_dom(s_time_t n
  23.729          PRINT(3,"Domain %i.%i: Short_block_loss: %"PRIi64"\n", 
  23.730                inf->vcpu->domain->domain_id, inf->vcpu->vcpu_id,
  23.731                inf->short_block_lost_tot);
  23.732 -        if (inf->short_block_lost_tot <= 0) {
  23.733 +        if ( inf->short_block_lost_tot <= 0 )
  23.734 +        {
  23.735              PRINT(4,"Domain %i.%i compensated short block loss!\n",
  23.736                    inf->vcpu->domain->domain_id, inf->vcpu->vcpu_id);
  23.737              /*we have (over-)compensated our block penalty*/
  23.738 @@ -649,6 +634,7 @@ static void desched_extra_dom(s_time_t n
  23.739              inf->status &= ~EXTRA_WANT_PEN_Q;
  23.740              goto check_extra_queues;
  23.741          }
  23.742 +
  23.743          /*we have to go again for another try in the block-extraq,
  23.744            the score is not used incremantally here, as this is
  23.745            already done by recalculating the block_lost*/
  23.746 @@ -657,7 +643,6 @@ static void desched_extra_dom(s_time_t n
  23.747          oldscore = 0;
  23.748      }
  23.749      else
  23.750 -#endif
  23.751      {
  23.752          /*domain was running in L1 extraq => score is inverse of
  23.753            utilization and is used somewhat incremental!*/
  23.754 @@ -684,7 +669,6 @@ static void desched_extra_dom(s_time_t n
  23.755      {
  23.756          /*remove this blocked domain from the waitq!*/
  23.757          __del_from_queue(d);
  23.758 -#if (EXTRA == EXTRA_BLOCK_WEIGHT)
  23.759          /*make sure that we remove a blocked domain from the other
  23.760            extraq too*/
  23.761          if ( i == EXTRA_PEN_Q )
  23.762 @@ -697,14 +681,12 @@ static void desched_extra_dom(s_time_t n
  23.763              if ( extraq_on(d, EXTRA_PEN_Q) )
  23.764                  extraq_del(d, EXTRA_PEN_Q);
  23.765          }
  23.766 -#endif
  23.767      }
  23.768 -#endif
  23.769 +
  23.770      ASSERT(EQ(sedf_runnable(d), __task_on_queue(d)));
  23.771      ASSERT(IMPLY(extraq_on(d, EXTRA_UTIL_Q) || extraq_on(d, EXTRA_PEN_Q), 
  23.772                   sedf_runnable(d)));
  23.773  }
  23.774 -#endif
  23.775  
  23.776  
  23.777  static struct task_slice sedf_do_extra_schedule(
  23.778 @@ -718,7 +700,6 @@ static struct task_slice sedf_do_extra_s
  23.779      if ( end_xt - now < EXTRA_QUANTUM )
  23.780          goto return_idle;
  23.781  
  23.782 -#if (EXTRA == EXTRA_BLOCK_WEIGHT)
  23.783      if ( !list_empty(extraq[EXTRA_PEN_Q]) )
  23.784      {
  23.785          /*we still have elements on the level 0 extraq 
  23.786 @@ -733,7 +714,6 @@ static struct task_slice sedf_do_extra_s
  23.787  #endif
  23.788      }
  23.789      else
  23.790 -#endif
  23.791      {
  23.792          if ( !list_empty(extraq[EXTRA_UTIL_Q]) )
  23.793          {
  23.794 @@ -772,11 +752,9 @@ static struct task_slice sedf_do_schedul
  23.795      int                   cpu      = smp_processor_id();
  23.796      struct list_head     *runq     = RUNQ(cpu);
  23.797      struct list_head     *waitq    = WAITQ(cpu);
  23.798 -#if (EXTRA > EXTRA_OFF)
  23.799      struct sedf_vcpu_info *inf     = EDOM_INFO(current);
  23.800      struct list_head      *extraq[] = {
  23.801          EXTRAQ(cpu, EXTRA_PEN_Q), EXTRAQ(cpu, EXTRA_UTIL_Q)};
  23.802 -#endif
  23.803      struct sedf_vcpu_info *runinf, *waitinf;
  23.804      struct task_slice      ret;
  23.805  
  23.806 @@ -793,14 +771,12 @@ static struct task_slice sedf_do_schedul
  23.807      if ( inf->status & SEDF_ASLEEP )
  23.808          inf->block_abs = now;
  23.809  
  23.810 -#if (EXTRA > EXTRA_OFF)
  23.811      if ( unlikely(extra_runs(inf)) )
  23.812      {
  23.813          /*special treatment of domains running in extra time*/
  23.814          desched_extra_dom(now, current);
  23.815      }
  23.816      else 
  23.817 -#endif
  23.818      {
  23.819          desched_edf_dom(now, current);
  23.820      }
  23.821 @@ -837,13 +813,8 @@ static struct task_slice sedf_do_schedul
  23.822          waitinf  = list_entry(waitq->next,struct sedf_vcpu_info, list);
  23.823          /*we could not find any suitable domain 
  23.824            => look for domains that are aware of extratime*/
  23.825 -#if (EXTRA > EXTRA_OFF)
  23.826          ret = sedf_do_extra_schedule(now, PERIOD_BEGIN(waitinf),
  23.827                                       extraq, cpu);
  23.828 -#else
  23.829 -        ret.task = IDLETASK(cpu);
  23.830 -        ret.time = PERIOD_BEGIN(waitinf) - now;
  23.831 -#endif
  23.832          CHECK(ret.time > 0);
  23.833      }
  23.834      else
  23.835 @@ -891,14 +862,10 @@ static void sedf_sleep(struct vcpu *d)
  23.836      {
  23.837          if ( __task_on_queue(d) )
  23.838              __del_from_queue(d);
  23.839 -#if (EXTRA > EXTRA_OFF)
  23.840          if ( extraq_on(d, EXTRA_UTIL_Q) ) 
  23.841              extraq_del(d, EXTRA_UTIL_Q);
  23.842 -#endif
  23.843 -#if (EXTRA == EXTRA_BLOCK_WEIGHT)
  23.844          if ( extraq_on(d, EXTRA_PEN_Q) )
  23.845              extraq_del(d, EXTRA_PEN_Q);
  23.846 -#endif
  23.847      }
  23.848  }
  23.849  
  23.850 @@ -939,7 +906,7 @@ static void sedf_sleep(struct vcpu *d)
  23.851   *     -addition: experiments have shown that this may have a HUGE impact on
  23.852   *      performance of other domains, becaus it can lead to excessive context
  23.853   *      switches
  23.854 - 
  23.855 + *
  23.856   *    Part2: Long Unblocking
  23.857   *    Part 2a
  23.858   *     -it is obvious that such accounting of block time, applied when
  23.859 @@ -974,32 +941,6 @@ static void sedf_sleep(struct vcpu *d)
  23.860   *     -either behaviour can lead to missed deadlines in other domains as
  23.861   *      opposed to approaches 1,2a,2b
  23.862   */
  23.863 -#if (UNBLOCK <= UNBLOCK_SHORT_RESUME)
  23.864 -static void unblock_short_vcons(struct sedf_vcpu_info* inf, s_time_t now)
  23.865 -{
  23.866 -    inf->deadl_abs += inf->period;
  23.867 -    inf->cputime = 0;
  23.868 -}
  23.869 -#endif
  23.870 -
  23.871 -#if (UNBLOCK == UNBLOCK_SHORT_RESUME)
  23.872 -static void unblock_short_cons(struct sedf_vcpu_info* inf, s_time_t now)
  23.873 -{
  23.874 -    /*treat blocked time as consumed by the domain*/
  23.875 -    inf->cputime += now - inf->block_abs; 
  23.876 -    if ( (inf->cputime + EXTRA_QUANTUM) > inf->slice )
  23.877 -    {
  23.878 -        /*we don't have a reasonable amount of time in 
  23.879 -          our slice left :( => start in next period!*/
  23.880 -        unblock_short_vcons(inf, now);
  23.881 -    }
  23.882 -#ifdef SEDF_STATS
  23.883 -    else
  23.884 -        inf->short_cont++;
  23.885 -#endif
  23.886 -}
  23.887 -#endif
  23.888 -
  23.889  static void unblock_short_extra_support(
  23.890      struct sedf_vcpu_info* inf, s_time_t now)
  23.891  {
  23.892 @@ -1051,33 +992,6 @@ static void unblock_short_extra_support(
  23.893  }
  23.894  
  23.895  
  23.896 -#if (UNBLOCK == UNBLOCK_ISOCHRONOUS_EDF)
  23.897 -static void unblock_long_vcons(struct sedf_vcpu_info* inf, s_time_t now)
  23.898 -{
  23.899 -    /* align to next future period */
  23.900 -    inf->deadl_abs += (DIV_UP(now - inf->deadl_abs, inf->period) +1)
  23.901 -        * inf->period;
  23.902 -    inf->cputime = 0;
  23.903 -}
  23.904 -#endif
  23.905 -
  23.906 -
  23.907 -#if 0
  23.908 -static void unblock_long_cons_a (struct sedf_vcpu_info* inf, s_time_t now)
  23.909 -{
  23.910 -    /*treat the time the domain was blocked in the
  23.911 -     CURRENT period as consumed by the domain*/
  23.912 -    inf->cputime = (now - inf->deadl_abs) % inf->period; 
  23.913 -    if ( (inf->cputime + EXTRA_QUANTUM) > inf->slice )
  23.914 -    {
  23.915 -        /*we don't have a reasonable amount of time in our slice
  23.916 -          left :( => start in next period!*/
  23.917 -        unblock_long_vcons(inf, now);
  23.918 -    }
  23.919 -}
  23.920 -#endif
  23.921 -
  23.922 -
  23.923  static void unblock_long_cons_b(struct sedf_vcpu_info* inf,s_time_t now)
  23.924  {
  23.925      /*Conservative 2b*/
  23.926 @@ -1087,110 +1001,6 @@ static void unblock_long_cons_b(struct s
  23.927  }
  23.928  
  23.929  
  23.930 -#if (UNBLOCK == UNBLOCK_ATROPOS)
  23.931 -static void unblock_long_cons_c(struct sedf_vcpu_info* inf,s_time_t now)
  23.932 -{
  23.933 -    if ( likely(inf->latency) )
  23.934 -    {
  23.935 -        /*scale the slice and period accordingly to the latency hint*/
  23.936 -        /*reduce period temporarily to the latency hint*/
  23.937 -        inf->period = inf->latency;
  23.938 -        /*this results in max. 4s slice/period length*/
  23.939 -        ASSERT((inf->period < ULONG_MAX)
  23.940 -               && (inf->slice_orig < ULONG_MAX));
  23.941 -        /*scale slice accordingly, so that utilisation stays the same*/
  23.942 -        inf->slice = (inf->period * inf->slice_orig)
  23.943 -            / inf->period_orig;
  23.944 -        inf->deadl_abs = now + inf->period;
  23.945 -        inf->cputime = 0;
  23.946 -    } 
  23.947 -    else
  23.948 -    {
  23.949 -        /*we don't have a latency hint.. use some other technique*/
  23.950 -        unblock_long_cons_b(inf, now);
  23.951 -    }
  23.952 -}
  23.953 -#endif
  23.954 -
  23.955 -
  23.956 -#if (UNBLOCK == UNBLOCK_BURST)
  23.957 -/*a new idea of dealing with short blocks: burst period scaling*/
  23.958 -static void unblock_short_burst(struct sedf_vcpu_info* inf, s_time_t now)
  23.959 -{
  23.960 -    /*treat blocked time as consumed by the domain*/
  23.961 -    inf->cputime += now - inf->block_abs;
  23.962 - 
  23.963 -    if ( (inf->cputime + EXTRA_QUANTUM) <= inf->slice )
  23.964 -    {
  23.965 -        /*if we can still use some time in the current slice
  23.966 -          then use it!*/
  23.967 -#ifdef SEDF_STATS
  23.968 -        /*we let the domain run in the current period*/
  23.969 -        inf->short_cont++;
  23.970 -#endif
  23.971 -    }
  23.972 -    else
  23.973 -    {
  23.974 -        /*we don't have a reasonable amount of time in
  23.975 -          our slice left => switch to burst mode*/
  23.976 -        if ( likely(inf->unblock_abs) )
  23.977 -        {
  23.978 -            /*set the period-length to the current blocking
  23.979 -              interval, possible enhancements: average over last
  23.980 -              blocking intervals, user-specified minimum,...*/
  23.981 -            inf->period = now - inf->unblock_abs;
  23.982 -            /*check for overflow on multiplication*/
  23.983 -            ASSERT((inf->period < ULONG_MAX) 
  23.984 -                   && (inf->slice_orig < ULONG_MAX));
  23.985 -            /*scale slice accordingly, so that utilisation
  23.986 -              stays the same*/
  23.987 -            inf->slice = (inf->period * inf->slice_orig)
  23.988 -                / inf->period_orig;
  23.989 -            /*set new (shorter) deadline*/
  23.990 -            inf->deadl_abs += inf->period;
  23.991 -        }
  23.992 -        else
  23.993 -        {
  23.994 -            /*in case we haven't unblocked before
  23.995 -              start in next period!*/
  23.996 -            inf->cputime=0;
  23.997 -            inf->deadl_abs += inf->period;
  23.998 -        }
  23.999 -    }
 23.1000 -
 23.1001 -    inf->unblock_abs = now;
 23.1002 -}
 23.1003 -
 23.1004 -
 23.1005 -static void unblock_long_burst(struct sedf_vcpu_info* inf, s_time_t now)
 23.1006 -{
 23.1007 -    if ( unlikely(inf->latency && (inf->period > inf->latency)) )
 23.1008 -    {
 23.1009 -        /*scale the slice and period accordingly to the latency hint*/
 23.1010 -        inf->period = inf->latency;
 23.1011 -        /*check for overflows on multiplication*/
 23.1012 -        ASSERT((inf->period < ULONG_MAX)
 23.1013 -               && (inf->slice_orig < ULONG_MAX));
 23.1014 -        /*scale slice accordingly, so that utilisation stays the same*/
 23.1015 -        inf->slice = (inf->period * inf->slice_orig)
 23.1016 -            / inf->period_orig;
 23.1017 -        inf->deadl_abs = now + inf->period;
 23.1018 -        inf->cputime = 0;
 23.1019 -    }
 23.1020 -    else
 23.1021 -    {
 23.1022 -        /*we don't have a latency hint.. or we are currently in 
 23.1023 -          "burst mode": use some other technique
 23.1024 -          NB: this should be in fact the normal way of operation,
 23.1025 -          when we are in sync with the device!*/
 23.1026 -        unblock_long_cons_b(inf, now);
 23.1027 -    }
 23.1028 -
 23.1029 -    inf->unblock_abs = now;
 23.1030 -}
 23.1031 -#endif /* UNBLOCK == UNBLOCK_BURST */
 23.1032 -
 23.1033 -
 23.1034  #define DOMAIN_EDF   1
 23.1035  #define DOMAIN_EXTRA_PEN  2
 23.1036  #define DOMAIN_EXTRA_UTIL  3
 23.1037 @@ -1225,32 +1035,31 @@ static inline int should_switch(struct v
 23.1038      cur_inf   = EDOM_INFO(cur);
 23.1039      other_inf = EDOM_INFO(other);
 23.1040   
 23.1041 - /*check whether we need to make an earlier sched-decision*/
 23.1042 -    if (PERIOD_BEGIN(other_inf) < 
 23.1043 -        CPU_INFO(other->processor)->current_slice_expires)
 23.1044 +    /* Check whether we need to make an earlier scheduling decision. */
 23.1045 +    if ( PERIOD_BEGIN(other_inf) < 
 23.1046 +         CPU_INFO(other->processor)->current_slice_expires )
 23.1047          return 1;
 23.1048 -    /*no timing-based switches need to be taken into account here*/
 23.1049 -    switch (get_run_type(cur)) {
 23.1050 +
 23.1051 +    /* No timing-based switches need to be taken into account here. */
 23.1052 +    switch ( get_run_type(cur) )
 23.1053 +    {
 23.1054      case DOMAIN_EDF:
 23.1055 -        /* do not interrupt a running EDF domain */ 
 23.1056 +        /* Do not interrupt a running EDF domain. */
 23.1057          return 0;
 23.1058      case DOMAIN_EXTRA_PEN:
 23.1059 -        /*check whether we also want 
 23.1060 -          the L0 ex-q with lower score*/
 23.1061 -        if ((other_inf->status & EXTRA_WANT_PEN_Q)
 23.1062 -            &&  (other_inf->score[EXTRA_PEN_Q] < 
 23.1063 -                 cur_inf->score[EXTRA_PEN_Q]))
 23.1064 -            return 1;
 23.1065 -        else return 0;
 23.1066 +        /* Check whether we also want the L0 ex-q with lower score. */
 23.1067 +        return ((other_inf->status & EXTRA_WANT_PEN_Q) &&
 23.1068 +                (other_inf->score[EXTRA_PEN_Q] < 
 23.1069 +                 cur_inf->score[EXTRA_PEN_Q]));
 23.1070      case DOMAIN_EXTRA_UTIL:
 23.1071 -        /*check whether we want the L0 extraq, don't
 23.1072 -          switch if both domains want L1 extraq */
 23.1073 -        if (other_inf->status & EXTRA_WANT_PEN_Q)
 23.1074 -            return 1;
 23.1075 -        else return 0;
 23.1076 +        /* Check whether we want the L0 extraq. Don't
 23.1077 +         * switch if both domains want L1 extraq.
 23.1078 +         */
 23.1079 +        return !!(other_inf->status & EXTRA_WANT_PEN_Q);
 23.1080      case DOMAIN_IDLE:
 23.1081          return 1;
 23.1082      }
 23.1083 +
 23.1084      return 1;
 23.1085  }
 23.1086  
 23.1087 @@ -1295,7 +1104,6 @@ void sedf_wake(struct vcpu *d)
 23.1088      {
 23.1089          PRINT(4,"extratime unblock\n");
 23.1090          /* unblocking in extra-time! */
 23.1091 -#if (EXTRA == EXTRA_BLOCK_WEIGHT)
 23.1092          if ( inf->status & EXTRA_WANT_PEN_Q )
 23.1093          {
 23.1094              /*we have a domain that wants compensation
 23.1095 @@ -1304,7 +1112,6 @@ void sedf_wake(struct vcpu *d)
 23.1096                chance!*/
 23.1097              extraq_add_sort_update(d, EXTRA_PEN_Q, 0);
 23.1098          }
 23.1099 -#endif
 23.1100          extraq_check_add_unblocked(d, 0);
 23.1101      }  
 23.1102      else
 23.1103 @@ -1316,15 +1123,7 @@ void sedf_wake(struct vcpu *d)
 23.1104  #ifdef SEDF_STATS
 23.1105              inf->short_block_tot++;
 23.1106  #endif
 23.1107 -#if (UNBLOCK <= UNBLOCK_ATROPOS)
 23.1108 -            unblock_short_vcons(inf, now);
 23.1109 -#elif (UNBLOCK == UNBLOCK_SHORT_RESUME)
 23.1110 -            unblock_short_cons(inf, now);
 23.1111 -#elif (UNBLOCK == UNBLOCK_BURST)
 23.1112 -            unblock_short_burst(inf, now);
 23.1113 -#elif (UNBLOCK == UNBLOCK_EXTRA_SUPPORT)
 23.1114              unblock_short_extra_support(inf, now);
 23.1115 -#endif
 23.1116  
 23.1117              extraq_check_add_unblocked(d, 1);
 23.1118          }
 23.1119 @@ -1335,18 +1134,7 @@ void sedf_wake(struct vcpu *d)
 23.1120  #ifdef SEDF_STATS
 23.1121              inf->long_block_tot++;
 23.1122  #endif
 23.1123 -#if (UNBLOCK == UNBLOCK_ISOCHRONOUS_EDF)
 23.1124 -            unblock_long_vcons(inf, now);
 23.1125 -#elif (UNBLOCK == UNBLOCK_EDF \
 23.1126 -       || UNBLOCK == UNBLOCK_EXTRA_SUPPORT)
 23.1127              unblock_long_cons_b(inf, now);
 23.1128 -#elif (UNBLOCK == UNBLOCK_ATROPOS)
 23.1129 -            unblock_long_cons_c(inf, now);
 23.1130 -#elif (UNBLOCK == UNBLOCK_SHORT_RESUME)
 23.1131 -            unblock_long_cons_b(inf, now);
 23.1132 -#elif (UNBLOCK == UNBLOCK_BURST)
 23.1133 -            unblock_long_burst(inf, now);
 23.1134 -#endif
 23.1135  
 23.1136              extraq_check_add_unblocked(d, 1);
 23.1137          }
 23.1138 @@ -1528,7 +1316,7 @@ static int sedf_adjust_weights(struct sc
 23.1139          sumt[cpu] = 0;
 23.1140      }
 23.1141  
 23.1142 -    /* sum up all weights */
 23.1143 +    /* Sum across all weights. */
 23.1144      for_each_domain( d )
 23.1145      {
 23.1146          for_each_vcpu( d, p )
 23.1147 @@ -1553,7 +1341,7 @@ static int sedf_adjust_weights(struct sc
 23.1148          }
 23.1149      }
 23.1150  
 23.1151 -    /* adjust all slices (and periods) to the new weight */
 23.1152 +    /* Adjust all slices (and periods) to the new weight. */
 23.1153      for_each_domain( d )
 23.1154      {
 23.1155          for_each_vcpu ( d, p )
 23.1156 @@ -1580,35 +1368,42 @@ static int sedf_adjdom(struct domain *p,
 23.1157  {
 23.1158      struct vcpu *v;
 23.1159  
 23.1160 -    PRINT(2,"sedf_adjdom was called, domain-id %i new period %"PRIu64" "\
 23.1161 +    PRINT(2,"sedf_adjdom was called, domain-id %i new period %"PRIu64" "
 23.1162            "new slice %"PRIu64"\nlatency %"PRIu64" extra:%s\n",
 23.1163            p->domain_id, cmd->u.sedf.period, cmd->u.sedf.slice,
 23.1164            cmd->u.sedf.latency, (cmd->u.sedf.extratime)?"yes":"no");
 23.1165  
 23.1166      if ( cmd->direction == SCHED_INFO_PUT )
 23.1167      {
 23.1168 -        /*check for sane parameters*/
 23.1169 -        if (!cmd->u.sedf.period && !cmd->u.sedf.weight)
 23.1170 +        /* Check for sane parameters. */
 23.1171 +        if ( !cmd->u.sedf.period && !cmd->u.sedf.weight )
 23.1172              return -EINVAL;
 23.1173 -        if (cmd->u.sedf.weight) {
 23.1174 -            if ((cmd->u.sedf.extratime & EXTRA_AWARE) &&
 23.1175 -                (! cmd->u.sedf.period)) {
 23.1176 -                /*weight driven domains with xtime ONLY!*/
 23.1177 -                for_each_vcpu(p, v) {
 23.1178 +        if ( cmd->u.sedf.weight )
 23.1179 +        {
 23.1180 +            if ( (cmd->u.sedf.extratime & EXTRA_AWARE) &&
 23.1181 +                 (!cmd->u.sedf.period) )
 23.1182 +            {
 23.1183 +                /* Weight-driven domains with extratime only. */
 23.1184 +                for_each_vcpu ( p, v )
 23.1185 +                {
 23.1186                      EDOM_INFO(v)->extraweight = cmd->u.sedf.weight;
 23.1187                      EDOM_INFO(v)->weight = 0;
 23.1188                      EDOM_INFO(v)->slice = 0;
 23.1189                      EDOM_INFO(v)->period = WEIGHT_PERIOD;
 23.1190                  }
 23.1191 -            } else {
 23.1192 -                /*weight driven domains with real-time execution*/
 23.1193 -                for_each_vcpu(p, v)
 23.1194 +            }
 23.1195 +            else
 23.1196 +            {
 23.1197 +                /* Weight-driven domains with real-time execution. */
 23.1198 +                for_each_vcpu ( p, v )
 23.1199                      EDOM_INFO(v)->weight = cmd->u.sedf.weight;
 23.1200              }
 23.1201          }
 23.1202 -        else {
 23.1203 -            /*time driven domains*/
 23.1204 -            for_each_vcpu(p, v) {
 23.1205 +        else
 23.1206 +        {
 23.1207 +            /* Time-driven domains. */
 23.1208 +            for_each_vcpu ( p, v )
 23.1209 +            {
 23.1210                  /*
 23.1211                   * Sanity checking: note that disabling extra weight requires
 23.1212                   * that we set a non-zero slice.
 23.1213 @@ -1626,10 +1421,12 @@ static int sedf_adjdom(struct domain *p,
 23.1214                      EDOM_INFO(v)->slice   = cmd->u.sedf.slice;
 23.1215              }
 23.1216          }
 23.1217 -        if (sedf_adjust_weights(cmd))
 23.1218 +
 23.1219 +        if ( sedf_adjust_weights(cmd) )
 23.1220              return -EINVAL;
 23.1221 -   
 23.1222 -        for_each_vcpu(p, v) {
 23.1223 +
 23.1224 +        for_each_vcpu ( p, v )
 23.1225 +        {
 23.1226              EDOM_INFO(v)->status  = 
 23.1227                  (EDOM_INFO(v)->status &
 23.1228                   ~EXTRA_AWARE) | (cmd->u.sedf.extratime & EXTRA_AWARE);
 23.1229 @@ -1641,11 +1438,11 @@ static int sedf_adjdom(struct domain *p,
 23.1230      {
 23.1231          cmd->u.sedf.period    = EDOM_INFO(p->vcpu[0])->period;
 23.1232          cmd->u.sedf.slice     = EDOM_INFO(p->vcpu[0])->slice;
 23.1233 -        cmd->u.sedf.extratime = EDOM_INFO(p->vcpu[0])->status
 23.1234 -            & EXTRA_AWARE;
 23.1235 +        cmd->u.sedf.extratime = EDOM_INFO(p->vcpu[0])->status & EXTRA_AWARE;
 23.1236          cmd->u.sedf.latency   = EDOM_INFO(p->vcpu[0])->latency;
 23.1237          cmd->u.sedf.weight    = EDOM_INFO(p->vcpu[0])->weight;
 23.1238      }
 23.1239 +
 23.1240      PRINT(2,"sedf_adjdom_finished\n");
 23.1241      return 0;
 23.1242  }
    24.1 --- a/xen/common/trace.c	Mon Apr 10 16:28:52 2006 +0100
    24.2 +++ b/xen/common/trace.c	Mon Apr 10 16:36:03 2006 +0100
    24.3 @@ -27,6 +27,8 @@
    24.4  #include <xen/smp.h>
    24.5  #include <xen/trace.h>
    24.6  #include <xen/errno.h>
    24.7 +#include <xen/event.h>
    24.8 +#include <xen/softirq.h>
    24.9  #include <xen/init.h>
   24.10  #include <asm/atomic.h>
   24.11  #include <public/dom0_ops.h>
   24.12 @@ -40,6 +42,11 @@ static struct t_buf *t_bufs[NR_CPUS];
   24.13  static struct t_rec *t_recs[NR_CPUS];
   24.14  static int nr_recs;
   24.15  
   24.16 +/* High water mark for trace buffers; */
   24.17 +/* Send virtual interrupt when buffer level reaches this point */
   24.18 +static int t_buf_highwater;
   24.19 +
   24.20 +
   24.21  /* a flag recording whether initialization has been done */
   24.22  /* or more properly, if the tbuf subsystem is enabled right now */
   24.23  int tb_init_done;
   24.24 @@ -50,6 +57,12 @@ static unsigned long tb_cpu_mask = (~0UL
   24.25  /* which tracing events are enabled */
   24.26  static u32 tb_event_mask = TRC_ALL;
   24.27  
   24.28 +static void trace_notify_guest(void)
   24.29 +{
   24.30 +    send_guest_global_virq(dom0, VIRQ_TBUF);
   24.31 +}
   24.32 +
   24.33 +
   24.34  /**
   24.35   * alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
   24.36   *
   24.37 @@ -93,6 +106,9 @@ static int alloc_trace_bufs(void)
   24.38          t_recs[i] = (struct t_rec *)(buf + 1);
   24.39      }
   24.40  
   24.41 +    t_buf_highwater = nr_recs >> 1; /* 50% high water */
   24.42 +    open_softirq(TRACE_SOFTIRQ, trace_notify_guest);
   24.43 +
   24.44      return 0;
   24.45  }
   24.46  
   24.47 @@ -272,6 +288,13 @@ void trace(u32 event, unsigned long d1, 
   24.48      buf->prod++;
   24.49  
   24.50      local_irq_restore(flags);
   24.51 +
   24.52 +    /*
   24.53 +     * Notify trace buffer consumer that we've reached the high water mark.
   24.54 +     *
   24.55 +     */
   24.56 +    if ( (buf->prod - buf->cons) == t_buf_highwater )
   24.57 +        raise_softirq(TRACE_SOFTIRQ);
   24.58  }
   24.59  
   24.60  /*
    25.1 --- a/xen/include/public/xen.h	Mon Apr 10 16:28:52 2006 +0100
    25.2 +++ b/xen/include/public/xen.h	Mon Apr 10 16:36:03 2006 +0100
    25.3 @@ -77,6 +77,7 @@
    25.4  #define VIRQ_DEBUG      1  /* V. Request guest to dump debug info.           */
    25.5  #define VIRQ_CONSOLE    2  /* G. (DOM0) Bytes received on emergency console. */
    25.6  #define VIRQ_DOM_EXC    3  /* G. (DOM0) Exceptional event for some domain.   */
    25.7 +#define VIRQ_TBUF       4  /* G. (DOM0) Trace buffer has records available.  */
    25.8  #define VIRQ_DEBUGGER   6  /* G. (DOM0) A domain has paused for debugging.   */
    25.9  #define VIRQ_XENOPROF   7  /* V. XenOprofile interrupt: new sample available */
   25.10  #define NR_VIRQS        8
    26.1 --- a/xen/include/xen/sched.h	Mon Apr 10 16:28:52 2006 +0100
    26.2 +++ b/xen/include/xen/sched.h	Mon Apr 10 16:36:03 2006 +0100
    26.3 @@ -134,7 +134,7 @@ struct domain
    26.4       */
    26.5  #define NR_PIRQS 256 /* Put this somewhere sane! */
    26.6      u16              pirq_to_evtchn[NR_PIRQS];
    26.7 -    u32              pirq_mask[NR_PIRQS/32];
    26.8 +    DECLARE_BITMAP(pirq_mask, NR_PIRQS);
    26.9  
   26.10      /* I/O capabilities (access to IRQs and memory-mapped I/O). */
   26.11      struct rangeset *iomem_caps;
    27.1 --- a/xen/include/xen/softirq.h	Mon Apr 10 16:28:52 2006 +0100
    27.2 +++ b/xen/include/xen/softirq.h	Mon Apr 10 16:36:03 2006 +0100
    27.3 @@ -9,7 +9,8 @@
    27.4  #define NMI_SOFTIRQ                       4
    27.5  #define PAGE_SCRUB_SOFTIRQ                5
    27.6  #define DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ  6
    27.7 -#define NR_SOFTIRQS                       7
    27.8 +#define TRACE_SOFTIRQ                     7
    27.9 +#define NR_SOFTIRQS                       8
   27.10  
   27.11  #ifndef __ASSEMBLY__
   27.12