ia64/xen-unstable

changeset 2028:6f08c0b6c8cd

bitkeeper revision 1.1108.50.1 (410e4d841op690UkyuQmjW5t-Sx0CQ)

Merged a bunch of 2.4 and 2.6 files that were mostly common.
author kaf24@scramble.cl.cam.ac.uk
date Mon Aug 02 14:19:48 2004 +0000 (2004-08-02)
parents 01ddb3c0f57e
children 3b0ab2e570a0
files .rootkeys Makefile linux-2.4.26-xen-sparse/arch/xen/drivers/evtchn/evtchn.c linux-2.4.26-xen-sparse/arch/xen/kernel/ctrl_if.c linux-2.4.26-xen-sparse/arch/xen/kernel/evtchn.c linux-2.4.26-xen-sparse/arch/xen/mm/hypervisor.c linux-2.4.26-xen-sparse/include/asm-xen/ctrl_if.h linux-2.4.26-xen-sparse/include/asm-xen/evtchn.h linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h linux-2.4.26-xen-sparse/include/asm-xen/multicall.h linux-2.4.26-xen-sparse/include/asm-xen/proc_cmd.h linux-2.4.26-xen-sparse/include/asm-xen/suspend.h linux-2.4.26-xen-sparse/mkbuildtree linux-2.6.7-xen-sparse/arch/xen/i386/kernel/Makefile linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/common.c linux-2.6.7-xen-sparse/arch/xen/i386/kernel/evtchn.c linux-2.6.7-xen-sparse/arch/xen/i386/kernel/setup.c linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c linux-2.6.7-xen-sparse/arch/xen/i386/mm/init.c linux-2.6.7-xen-sparse/arch/xen/kernel/Makefile linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c linux-2.6.7-xen-sparse/arch/xen/kernel/empty.c linux-2.6.7-xen-sparse/arch/xen/kernel/evtchn.c linux-2.6.7-xen-sparse/arch/xen/kernel/reboot.c linux-2.6.7-xen-sparse/drivers/xen/blkback/common.h linux-2.6.7-xen-sparse/drivers/xen/console/console.c linux-2.6.7-xen-sparse/drivers/xen/evtchn/evtchn.c linux-2.6.7-xen-sparse/drivers/xen/netback/common.h linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/hypervisor.h linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/msr.h linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/system.h linux-2.6.7-xen-sparse/include/asm-xen/ctrl_if.h linux-2.6.7-xen-sparse/include/asm-xen/evtchn.h linux-2.6.7-xen-sparse/include/asm-xen/hypervisor.h linux-2.6.7-xen-sparse/include/asm-xen/multicall.h linux-2.6.7-xen-sparse/include/asm-xen/suspend.h linux-2.6.7-xen-sparse/include/asm-xen/xen.h xen/common/dom_mem_ops.c
line diff
     1.1 --- a/.rootkeys	Mon Aug 02 10:30:38 2004 +0000
     1.2 +++ b/.rootkeys	Mon Aug 02 14:19:48 2004 +0000
     1.3 @@ -63,14 +63,11 @@ 4075806dibjCcfuXv6CINMhxWTw3jQ linux-2.4
     1.4  3e5a4e65G3e2s0ghPMgiJ-gBTUJ0uQ linux-2.4.26-xen-sparse/arch/xen/drivers/console/Makefile
     1.5  3e5a4e656nfFISThfbyXQOA6HN6YHw linux-2.4.26-xen-sparse/arch/xen/drivers/dom0/Makefile
     1.6  40420a6ebRqDjufoN1WSJvolEW2Wjw linux-2.4.26-xen-sparse/arch/xen/drivers/evtchn/Makefile
     1.7 -40420a73Wou6JlsZDiu6YwjYomsm7A linux-2.4.26-xen-sparse/arch/xen/drivers/evtchn/evtchn.c
     1.8  4083dc16-Kd5y9psK_yk161sme5j5Q linux-2.4.26-xen-sparse/arch/xen/drivers/netif/Makefile
     1.9  4083dc16UmHXxS9g_UFVnkUpN-oP2Q linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/Makefile
    1.10  405853f2wg7JXZJNltspMwOZJklxgw linux-2.4.26-xen-sparse/arch/xen/drivers/netif/frontend/Makefile
    1.11  3e5a4e65lWzkiPXsZdzPt2RNnJGG1g linux-2.4.26-xen-sparse/arch/xen/kernel/Makefile
    1.12 -4075806dE5mQwlVUf8-t3YXjiMMWDQ linux-2.4.26-xen-sparse/arch/xen/kernel/ctrl_if.c
    1.13  3e5a4e65_hqfuxtGG8IUy6wRM86Ecg linux-2.4.26-xen-sparse/arch/xen/kernel/entry.S
    1.14 -3e5a4e65ibVQmwlOn0j3sVH_j_6hAg linux-2.4.26-xen-sparse/arch/xen/kernel/evtchn.c
    1.15  3e5a4e65Hy_1iUvMTPsNqGNXd9uFpg linux-2.4.26-xen-sparse/arch/xen/kernel/head.S
    1.16  3e5a4e65RMGcuA-HCn3-wNx3fFQwdg linux-2.4.26-xen-sparse/arch/xen/kernel/i386_ksyms.c
    1.17  3e5a4e653U6cELGv528IxOLHvCq8iA linux-2.4.26-xen-sparse/arch/xen/kernel/irq.c
    1.18 @@ -86,7 +83,6 @@ 3e5a4e66-9_NczrVMbuQkoSLyXckIw linux-2.4
    1.19  3e5a4e6637ZDk0BvFEC-aFQs599-ng linux-2.4.26-xen-sparse/arch/xen/lib/delay.c
    1.20  3e5a4e66croVgpcJyJuF2ycQw0HuJw linux-2.4.26-xen-sparse/arch/xen/mm/Makefile
    1.21  3e5a4e66l8Q5Tv-6B3lQIRmaVbFPzg linux-2.4.26-xen-sparse/arch/xen/mm/fault.c
    1.22 -3e5a4e668SE9rixq4ahho9rNhLUUFQ linux-2.4.26-xen-sparse/arch/xen/mm/hypervisor.c
    1.23  3e5a4e661gLzzff25pJooKIIWe7IWg linux-2.4.26-xen-sparse/arch/xen/mm/init.c
    1.24  3f0bed43UUdQichXAiVNrjV-y2Kzcg linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c
    1.25  3e5a4e66qRlSTcjafidMB6ulECADvg linux-2.4.26-xen-sparse/arch/xen/vmlinux.lds
    1.26 @@ -97,31 +93,25 @@ 3e5a4e66rw65CxyolW9PKz4GG42RcA linux-2.4
    1.27  40c9c0c1pPwYE3-4i-oI3ubUu7UgvQ linux-2.4.26-xen-sparse/drivers/scsi/aic7xxx/Makefile
    1.28  3e5a4e669uzIE54VwucPYtGwXLAbzA linux-2.4.26-xen-sparse/fs/exec.c
    1.29  3e5a4e66wbeCpsJgVf_U8Jde-CNcsA linux-2.4.26-xen-sparse/include/asm-xen/bugs.h
    1.30 -4048c0ddxnIa2GpBAVR-mY6mNSdeJg linux-2.4.26-xen-sparse/include/asm-xen/ctrl_if.h
    1.31  3e5a4e66HdSkvIV6SJ1evG_xmTmXHA linux-2.4.26-xen-sparse/include/asm-xen/desc.h
    1.32 -4048c0e0_P2wUTiT6UqgPhn0s7yFcA linux-2.4.26-xen-sparse/include/asm-xen/evtchn.h
    1.33  3e5a4e66SYp_UpAVcF8Lc1wa3Qtgzw linux-2.4.26-xen-sparse/include/asm-xen/fixmap.h
    1.34  406aeeaaQvl4RNtmd9hDEugBURbFpQ linux-2.4.26-xen-sparse/include/asm-xen/highmem.h
    1.35  3e5a4e67YtcyDLQsShhCfQwPSELfvA linux-2.4.26-xen-sparse/include/asm-xen/hw_irq.h
    1.36 -3e5a4e677VBavzM1UZIEcH1B-RlXMA linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h
    1.37  4060044fVx7-tokvNLKBf_6qBB4lqQ linux-2.4.26-xen-sparse/include/asm-xen/io.h
    1.38  3e5a4e673p7PEOyHFm3nHkYX6HQYBg linux-2.4.26-xen-sparse/include/asm-xen/irq.h
    1.39  40d70c240tW7TWArl1VUgIFH2nVO1A linux-2.4.26-xen-sparse/include/asm-xen/keyboard.h
    1.40  3e5a4e678ddsQOpbSiRdy1GRcDc9WA linux-2.4.26-xen-sparse/include/asm-xen/mmu_context.h
    1.41  40d06e5b2YWInUX1Xv9amVANwd_2Xg linux-2.4.26-xen-sparse/include/asm-xen/module.h
    1.42  3f8707e7ZmZ6TxyX0ZUEfvhA2Pb_xQ linux-2.4.26-xen-sparse/include/asm-xen/msr.h
    1.43 -3e7270deQqtGPSnFxcW4AvJZuTUWfg linux-2.4.26-xen-sparse/include/asm-xen/multicall.h
    1.44  3e5a4e67mnQfh-R8KcQCaVo2Oho6yg linux-2.4.26-xen-sparse/include/asm-xen/page.h
    1.45  409ba2e7ZfV5hqTvIzxLtpClnxtIzg linux-2.4.26-xen-sparse/include/asm-xen/pci.h
    1.46  3e5a4e67uTYU5oEnIDjxuaez8njjqg linux-2.4.26-xen-sparse/include/asm-xen/pgalloc.h
    1.47  3e5a4e67X7JyupgdYkgDX19Huj2sAw linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h
    1.48  3e5a4e67gr4NLGtQ5CvSLimMYZlkOA linux-2.4.26-xen-sparse/include/asm-xen/pgtable.h
    1.49 -3f108af1qNv8DVSGPv4zpqIU1txCkg linux-2.4.26-xen-sparse/include/asm-xen/proc_cmd.h
    1.50  3e5a4e676uK4xErTBDH6XJREn9LSyg linux-2.4.26-xen-sparse/include/asm-xen/processor.h
    1.51  3e5a4e67AJPjW-zL7p-xWuA6IVeH1g linux-2.4.26-xen-sparse/include/asm-xen/ptrace.h
    1.52  3e5a4e68uJz-xI0IBVMD7xRLQKJDFg linux-2.4.26-xen-sparse/include/asm-xen/segment.h
    1.53  3e5a4e68Nfdh6QcOKUTGCaYkf2LmYA linux-2.4.26-xen-sparse/include/asm-xen/smp.h
    1.54 -3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.4.26-xen-sparse/include/asm-xen/suspend.h
    1.55  4062f7e2PzFOUGT0PaE7A0VprTU3JQ linux-2.4.26-xen-sparse/include/asm-xen/synch_bitops.h
    1.56  3e5a4e68mTr0zcp9SXDbnd-XLrrfxw linux-2.4.26-xen-sparse/include/asm-xen/system.h
    1.57  3f1056a9L_kqHcFheV00KbKBzv9j5w linux-2.4.26-xen-sparse/include/asm-xen/vga.h
    1.58 @@ -153,7 +143,6 @@ 40f56238eczveJ86k_4hNxCLRQIF-g linux-2.6
    1.59  40f56238rXVTJQKbBuXXLH52qEArcg linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
    1.60  40f562385s4lr6Zg92gExe7UQ4A76Q linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/common.c
    1.61  40f56238XDtHSijkAFlbv1PT8Bhw_Q linux-2.6.7-xen-sparse/arch/xen/i386/kernel/entry.S
    1.62 -40f56238xFQe9T7M_U_FItM-bZIpLw linux-2.6.7-xen-sparse/arch/xen/i386/kernel/evtchn.c
    1.63  40f56238bnvciAuyzAiMkdzGErYt1A linux-2.6.7-xen-sparse/arch/xen/i386/kernel/head.S
    1.64  40f58a0d31M2EkuPbG94ns_nOi0PVA linux-2.6.7-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
    1.65  40faa751_zbZlAmLyQgCXdYekVFdWA linux-2.6.7-xen-sparse/arch/xen/i386/kernel/ioport.c
    1.66 @@ -183,6 +172,7 @@ 4107adf1s5u6249DNPUViX1YNagbUQ linux-2.6
    1.67  40f56239zOksGg_H4XD4ye6iZNtoZA linux-2.6.7-xen-sparse/arch/xen/kernel/Makefile
    1.68  40f56239bvOjuuuViZ0XMlNiREFC0A linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c
    1.69  40f56239pYRq5yshPTkv3ujXKc8K6g linux-2.6.7-xen-sparse/arch/xen/kernel/empty.c
    1.70 +40f56238xFQe9T7M_U_FItM-bZIpLw linux-2.6.7-xen-sparse/arch/xen/kernel/evtchn.c
    1.71  40f56239sFcjHiIRmnObRIDF-zaeKQ linux-2.6.7-xen-sparse/arch/xen/kernel/process.c
    1.72  40f562392LBhwmOxVPsYdkYXMxI_ZQ linux-2.6.7-xen-sparse/arch/xen/kernel/reboot.c
    1.73  3f68905c5eiA-lBMQSvXLMWS1ikDEA linux-2.6.7-xen-sparse/arch/xen/kernel/xen_proc.c
    1.74 @@ -217,7 +207,6 @@ 40f56239YAjS52QG2FIAQpHDZAdGHg linux-2.6
    1.75  4107adf1E5O4ztGHNGMzCCNhcvqNow linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
    1.76  40f5623anSzpuEHgiNmQ56fIRfCoaQ linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/e820.h
    1.77  40f5623akIoBsQ3KxSB2kufkbgONXQ linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/fixmap.h
    1.78 -40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/hypervisor.h
    1.79  40f5623aJVXQwpJMOLE99XgvGsfQ8Q linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/io.h
    1.80  40f5623am9BzluYFuV6EQfTd-so3dA linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/do_timer.h
    1.81  40f5623adZQ1IZGPxbDXONjyZGYuTA linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/mach-xen/io_ports.h
    1.82 @@ -249,9 +238,10 @@ 41062ab7uFxnCq-KtPeAm-aV8CicgA linux-2.6
    1.83  40f5623bxUbeGjkRrjDguCy_Gm8RLw linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/xor.h
    1.84  40f5623bYNP7tHE2zX6YQxp9Zq2utQ linux-2.6.7-xen-sparse/include/asm-xen/ctrl_if.h
    1.85  40f5623b3Eqs8pAc5WpPX8_jTzV2qw linux-2.6.7-xen-sparse/include/asm-xen/evtchn.h
    1.86 +40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.7-xen-sparse/include/asm-xen/hypervisor.h
    1.87  40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.7-xen-sparse/include/asm-xen/multicall.h
    1.88  3f108af1ylCIm82H052FVTfXACBHrw linux-2.6.7-xen-sparse/include/asm-xen/proc_cmd.h
    1.89 -40f5623cBiQhPHILVLrl3xa6bDBaRg linux-2.6.7-xen-sparse/include/asm-xen/xen.h
    1.90 +3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.7-xen-sparse/include/asm-xen/suspend.h
    1.91  3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.7-xen-sparse/include/asm-xen/xen_proc.h
    1.92  40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.7-xen-sparse/mkbuildtree
    1.93  410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.7-xen-sparse/mm/page_alloc.c
     2.1 --- a/Makefile	Mon Aug 02 10:30:38 2004 +0000
     2.2 +++ b/Makefile	Mon Aug 02 14:19:48 2004 +0000
     2.3 @@ -30,7 +30,7 @@ dist: all
     2.4  LINUX_RELEASE    ?= 2.4
     2.5  LINUX_VER        ?= $(shell ( /bin/ls -ld linux-$(LINUX_RELEASE).*-xen-sparse ) 2>/dev/null | \
     2.6  		      sed -e 's!^.*linux-\(.\+\)-xen-sparse!\1!' )
     2.7 -LINUX24_VER      ?= $(shell ( /bin/ls -ld linux-2.4.*-xen-sparse ) 2>/dev/null | \
     2.8 +LINUX26_VER      ?= $(shell ( /bin/ls -ld linux-2.6.*-xen-sparse ) 2>/dev/null | \
     2.9  		      sed -e 's!^.*linux-\(.\+\)-xen-sparse!\1!' )
    2.10  LINUX_CONFIG_DIR ?= $(INSTALL_DIR)/boot
    2.11  LINUX_SRC_PATH   ?= .:..
    2.12 @@ -133,7 +133,7 @@ mrproper: clean
    2.13  	rm -rf install/* patches $(LINUX_TREES) linux-$(LINUX_VER).tar.*
    2.14  
    2.15  make-symlinks: delete-symlinks
    2.16 -	ln -sf linux-$(LINUX24_VER)-xen-sparse linux-xen-sparse
    2.17 +	ln -sf linux-$(LINUX26_VER)-xen-sparse linux-xen-sparse
    2.18  
    2.19  delete-symlinks:
    2.20  	$(RM) linux-xen-sparse
     3.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/evtchn/evtchn.c	Mon Aug 02 10:30:38 2004 +0000
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,366 +0,0 @@
     3.4 -/******************************************************************************
     3.5 - * evtchn.c
     3.6 - * 
     3.7 - * Xenolinux driver for receiving and demuxing event-channel signals.
     3.8 - * 
     3.9 - * Copyright (c) 2004, K A Fraser
    3.10 - */
    3.11 -
    3.12 -#include <linux/config.h>
    3.13 -#include <linux/module.h>
    3.14 -#include <linux/kernel.h>
    3.15 -#include <linux/sched.h>
    3.16 -#include <linux/slab.h>
    3.17 -#include <linux/string.h>
    3.18 -#include <linux/errno.h>
    3.19 -#include <linux/fs.h>
    3.20 -#include <linux/errno.h>
    3.21 -#include <linux/miscdevice.h>
    3.22 -#include <linux/major.h>
    3.23 -#include <linux/proc_fs.h>
    3.24 -#include <linux/devfs_fs_kernel.h>
    3.25 -#include <linux/stat.h>
    3.26 -#include <linux/poll.h>
    3.27 -#include <linux/irq.h>
    3.28 -#include <asm/evtchn.h>
    3.29 -
    3.30 -/* NB. This must be shared amongst drivers if more things go in /dev/xen */
    3.31 -static devfs_handle_t xen_dev_dir;
    3.32 -
    3.33 -/* Only one process may open /dev/xen/evtchn at any time. */
    3.34 -static unsigned long evtchn_dev_inuse;
    3.35 -
    3.36 -/* Notification ring, accessed via /dev/xen/evtchn. */
    3.37 -#define RING_SIZE     2048  /* 2048 16-bit entries */
    3.38 -#define RING_MASK(_i) ((_i)&(RING_SIZE-1))
    3.39 -static u16 *ring;
    3.40 -static unsigned int ring_cons, ring_prod, ring_overflow;
    3.41 -
    3.42 -/* Processes wait on this queue via /dev/xen/evtchn when ring is empty. */
    3.43 -static DECLARE_WAIT_QUEUE_HEAD(evtchn_wait);
    3.44 -static struct fasync_struct *evtchn_async_queue;
    3.45 -
    3.46 -/* Which ports is user-space bound to? */
    3.47 -static u32 bound_ports[32];
    3.48 -
    3.49 -static spinlock_t lock;
    3.50 -
    3.51 -void evtchn_device_upcall(int port)
    3.52 -{
    3.53 -    shared_info_t *s = HYPERVISOR_shared_info;
    3.54 -
    3.55 -    spin_lock(&lock);
    3.56 -
    3.57 -    mask_evtchn(port);
    3.58 -    clear_evtchn(port);
    3.59 -
    3.60 -    if ( ring != NULL )
    3.61 -    {
    3.62 -        if ( (ring_prod - ring_cons) < RING_SIZE )
    3.63 -        {
    3.64 -            ring[RING_MASK(ring_prod)] = (u16)port;
    3.65 -            if ( ring_cons == ring_prod++ )
    3.66 -            {
    3.67 -                wake_up_interruptible(&evtchn_wait);
    3.68 -                kill_fasync(&evtchn_async_queue, SIGIO, POLL_IN);
    3.69 -            }
    3.70 -        }
    3.71 -        else
    3.72 -        {
    3.73 -            ring_overflow = 1;
    3.74 -        }
    3.75 -    }
    3.76 -
    3.77 -    spin_unlock(&lock);
    3.78 -}
    3.79 -
    3.80 -static void __evtchn_reset_buffer_ring(void)
    3.81 -{
    3.82 -    /* Initialise the ring to empty. Clear errors. */
    3.83 -    ring_cons = ring_prod = ring_overflow = 0;
    3.84 -}
    3.85 -
    3.86 -static ssize_t evtchn_read(struct file *file, char *buf,
    3.87 -                           size_t count, loff_t *ppos)
    3.88 -{
    3.89 -    int rc;
    3.90 -    unsigned int c, p, bytes1 = 0, bytes2 = 0;
    3.91 -    DECLARE_WAITQUEUE(wait, current);
    3.92 -
    3.93 -    add_wait_queue(&evtchn_wait, &wait);
    3.94 -
    3.95 -    count &= ~1; /* even number of bytes */
    3.96 -
    3.97 -    if ( count == 0 )
    3.98 -    {
    3.99 -        rc = 0;
   3.100 -        goto out;
   3.101 -    }
   3.102 -
   3.103 -    if ( count > PAGE_SIZE )
   3.104 -        count = PAGE_SIZE;
   3.105 -
   3.106 -    for ( ; ; )
   3.107 -    {
   3.108 -        set_current_state(TASK_INTERRUPTIBLE);
   3.109 -
   3.110 -        if ( (c = ring_cons) != (p = ring_prod) )
   3.111 -            break;
   3.112 -
   3.113 -        if ( ring_overflow )
   3.114 -        {
   3.115 -            rc = -EFBIG;
   3.116 -            goto out;
   3.117 -        }
   3.118 -
   3.119 -        if ( file->f_flags & O_NONBLOCK )
   3.120 -        {
   3.121 -            rc = -EAGAIN;
   3.122 -            goto out;
   3.123 -        }
   3.124 -
   3.125 -        if ( signal_pending(current) )
   3.126 -        {
   3.127 -            rc = -ERESTARTSYS;
   3.128 -            goto out;
   3.129 -        }
   3.130 -
   3.131 -        schedule();
   3.132 -    }
   3.133 -
   3.134 -    /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
   3.135 -    if ( ((c ^ p) & RING_SIZE) != 0 )
   3.136 -    {
   3.137 -        bytes1 = (RING_SIZE - RING_MASK(c)) * sizeof(u16);
   3.138 -        bytes2 = RING_MASK(p) * sizeof(u16);
   3.139 -    }
   3.140 -    else
   3.141 -    {
   3.142 -        bytes1 = (p - c) * sizeof(u16);
   3.143 -        bytes2 = 0;
   3.144 -    }
   3.145 -
   3.146 -    /* Truncate chunks according to caller's maximum byte count. */
   3.147 -    if ( bytes1 > count )
   3.148 -    {
   3.149 -        bytes1 = count;
   3.150 -        bytes2 = 0;
   3.151 -    }
   3.152 -    else if ( (bytes1 + bytes2) > count )
   3.153 -    {
   3.154 -        bytes2 = count - bytes1;
   3.155 -    }
   3.156 -
   3.157 -    if ( copy_to_user(buf, &ring[RING_MASK(c)], bytes1) ||
   3.158 -         ((bytes2 != 0) && copy_to_user(&buf[bytes1], &ring[0], bytes2)) )
   3.159 -    {
   3.160 -        rc = -EFAULT;
   3.161 -        goto out;
   3.162 -    }
   3.163 -
   3.164 -    ring_cons += (bytes1 + bytes2) / sizeof(u16);
   3.165 -
   3.166 -    rc = bytes1 + bytes2;
   3.167 -
   3.168 - out:
   3.169 -    __set_current_state(TASK_RUNNING);
   3.170 -    remove_wait_queue(&evtchn_wait, &wait);
   3.171 -    return rc;
   3.172 -}
   3.173 -
   3.174 -static ssize_t evtchn_write(struct file *file, const char *buf,
   3.175 -                            size_t count, loff_t *ppos)
   3.176 -{
   3.177 -    int  rc, i;
   3.178 -    u16 *kbuf = (u16 *)get_free_page(GFP_KERNEL);
   3.179 -
   3.180 -    if ( kbuf == NULL )
   3.181 -        return -ENOMEM;
   3.182 -
   3.183 -    count &= ~1; /* even number of bytes */
   3.184 -
   3.185 -    if ( count == 0 )
   3.186 -    {
   3.187 -        rc = 0;
   3.188 -        goto out;
   3.189 -    }
   3.190 -
   3.191 -    if ( count > PAGE_SIZE )
   3.192 -        count = PAGE_SIZE;
   3.193 -
   3.194 -    if ( copy_from_user(kbuf, buf, count) != 0 )
   3.195 -    {
   3.196 -        rc = -EFAULT;
   3.197 -        goto out;
   3.198 -    }
   3.199 -
   3.200 -    spin_lock_irq(&lock);
   3.201 -    for ( i = 0; i < (count/2); i++ )
   3.202 -        if ( test_bit(kbuf[i], &bound_ports[0]) )
   3.203 -            unmask_evtchn(kbuf[i]);
   3.204 -    spin_unlock_irq(&lock);
   3.205 -
   3.206 -    rc = count;
   3.207 -
   3.208 - out:
   3.209 -    free_page((unsigned long)kbuf);
   3.210 -    return rc;
   3.211 -}
   3.212 -
   3.213 -static int evtchn_ioctl(struct inode *inode, struct file *file,
   3.214 -                        unsigned int cmd, unsigned long arg)
   3.215 -{
   3.216 -    int rc = 0;
   3.217 -    
   3.218 -    spin_lock_irq(&lock);
   3.219 -    
   3.220 -    switch ( cmd )
   3.221 -    {
   3.222 -    case EVTCHN_RESET:
   3.223 -        __evtchn_reset_buffer_ring();
   3.224 -        break;
   3.225 -    case EVTCHN_BIND:
   3.226 -        if ( !test_and_set_bit(arg, &bound_ports[0]) )
   3.227 -            unmask_evtchn(arg);
   3.228 -        else
   3.229 -            rc = -EINVAL;
   3.230 -        break;
   3.231 -    case EVTCHN_UNBIND:
   3.232 -        if ( test_and_clear_bit(arg, &bound_ports[0]) )
   3.233 -            mask_evtchn(arg);
   3.234 -        else
   3.235 -            rc = -EINVAL;
   3.236 -        break;
   3.237 -    default:
   3.238 -        rc = -ENOSYS;
   3.239 -        break;
   3.240 -    }
   3.241 -
   3.242 -    spin_unlock_irq(&lock);   
   3.243 -
   3.244 -    return rc;
   3.245 -}
   3.246 -
   3.247 -static unsigned int evtchn_poll(struct file *file, poll_table *wait)
   3.248 -{
   3.249 -    unsigned int mask = POLLOUT | POLLWRNORM;
   3.250 -    poll_wait(file, &evtchn_wait, wait);
   3.251 -    if ( ring_cons != ring_prod )
   3.252 -        mask |= POLLIN | POLLRDNORM;
   3.253 -    if ( ring_overflow )
   3.254 -        mask = POLLERR;
   3.255 -    return mask;
   3.256 -}
   3.257 -
   3.258 -static int evtchn_fasync(int fd, struct file *filp, int on)
   3.259 -{
   3.260 -    return fasync_helper(fd, filp, on, &evtchn_async_queue);
   3.261 -}
   3.262 -
   3.263 -static int evtchn_open(struct inode *inode, struct file *filp)
   3.264 -{
   3.265 -    u16 *_ring;
   3.266 -
   3.267 -    if ( test_and_set_bit(0, &evtchn_dev_inuse) )
   3.268 -        return -EBUSY;
   3.269 -
   3.270 -    /* Allocate outside locked region so that we can use GFP_KERNEL. */
   3.271 -    if ( (_ring = (u16 *)get_free_page(GFP_KERNEL)) == NULL )
   3.272 -        return -ENOMEM;
   3.273 -
   3.274 -    spin_lock_irq(&lock);
   3.275 -    ring = _ring;
   3.276 -    __evtchn_reset_buffer_ring();
   3.277 -    spin_unlock_irq(&lock);
   3.278 -
   3.279 -    MOD_INC_USE_COUNT;
   3.280 -
   3.281 -    return 0;
   3.282 -}
   3.283 -
   3.284 -static int evtchn_release(struct inode *inode, struct file *filp)
   3.285 -{
   3.286 -    int i;
   3.287 -
   3.288 -    spin_lock_irq(&lock);
   3.289 -    if ( ring != NULL )
   3.290 -    {
   3.291 -        free_page((unsigned long)ring);
   3.292 -        ring = NULL;
   3.293 -    }
   3.294 -    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
   3.295 -        if ( test_and_clear_bit(i, &bound_ports[0]) )
   3.296 -            mask_evtchn(i);
   3.297 -    spin_unlock_irq(&lock);
   3.298 -
   3.299 -    evtchn_dev_inuse = 0;
   3.300 -
   3.301 -    MOD_DEC_USE_COUNT;
   3.302 -
   3.303 -    return 0;
   3.304 -}
   3.305 -
   3.306 -static struct file_operations evtchn_fops = {
   3.307 -    owner:    THIS_MODULE,
   3.308 -    read:     evtchn_read,
   3.309 -    write:    evtchn_write,
   3.310 -    ioctl:    evtchn_ioctl,
   3.311 -    poll:     evtchn_poll,
   3.312 -    fasync:   evtchn_fasync,
   3.313 -    open:     evtchn_open,
   3.314 -    release:  evtchn_release
   3.315 -};
   3.316 -
   3.317 -static struct miscdevice evtchn_miscdev = {
   3.318 -    minor:    EVTCHN_MINOR,
   3.319 -    name:     "evtchn",
   3.320 -    fops:     &evtchn_fops
   3.321 -};
   3.322 -
   3.323 -static int __init init_module(void)
   3.324 -{
   3.325 -    devfs_handle_t symlink_handle;
   3.326 -    int            err, pos;
   3.327 -    char           link_dest[64];
   3.328 -
   3.329 -    /* (DEVFS) create '/dev/misc/evtchn'. */
   3.330 -    err = misc_register(&evtchn_miscdev);
   3.331 -    if ( err != 0 )
   3.332 -    {
   3.333 -        printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
   3.334 -        return err;
   3.335 -    }
   3.336 -
   3.337 -    /* (DEVFS) create directory '/dev/xen'. */
   3.338 -    xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
   3.339 -
   3.340 -    /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
   3.341 -    pos = devfs_generate_path(evtchn_miscdev.devfs_handle, 
   3.342 -                              &link_dest[3], 
   3.343 -                              sizeof(link_dest) - 3);
   3.344 -    if ( pos >= 0 )
   3.345 -        strncpy(&link_dest[pos], "../", 3);
   3.346 -
   3.347 -    /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
   3.348 -    (void)devfs_mk_symlink(xen_dev_dir, 
   3.349 -                           "evtchn", 
   3.350 -                           DEVFS_FL_DEFAULT, 
   3.351 -                           &link_dest[pos],
   3.352 -                           &symlink_handle, 
   3.353 -                           NULL);
   3.354 -
   3.355 -    /* (DEVFS) automatically destroy the symlink with its destination. */
   3.356 -    devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
   3.357 -
   3.358 -    printk("Event-channel device installed.\n");
   3.359 -
   3.360 -    return 0;
   3.361 -}
   3.362 -
   3.363 -static void cleanup_module(void)
   3.364 -{
   3.365 -    misc_deregister(&evtchn_miscdev);
   3.366 -}
   3.367 -
   3.368 -module_init(init_module);
   3.369 -module_exit(cleanup_module);
     4.1 --- a/linux-2.4.26-xen-sparse/arch/xen/kernel/ctrl_if.c	Mon Aug 02 10:30:38 2004 +0000
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,447 +0,0 @@
     4.4 -/******************************************************************************
     4.5 - * ctrl_if.c
     4.6 - * 
     4.7 - * Management functions for special interface to the domain controller.
     4.8 - * 
     4.9 - * Copyright (c) 2004, K A Fraser
    4.10 - */
    4.11 -
    4.12 -#include <linux/config.h>
    4.13 -#include <linux/kernel.h>
    4.14 -#include <linux/sched.h>
    4.15 -#include <linux/slab.h>
    4.16 -#include <linux/string.h>
    4.17 -#include <linux/errno.h>
    4.18 -#include <linux/irq.h>
    4.19 -#include <linux/interrupt.h>
    4.20 -#include <asm/ctrl_if.h>
    4.21 -#include <asm/evtchn.h>
    4.22 -
    4.23 -#if 0
    4.24 -#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
    4.25 -                           __FILE__ , __LINE__ , ## _a )
    4.26 -#else
    4.27 -#define DPRINTK(_f, _a...) ((void)0)
    4.28 -#endif
    4.29 -
    4.30 -/*
    4.31 - * Only used by initial domain which must create its own control-interface
    4.32 - * event channel. This value is picked up by the user-space domain controller
    4.33 - * via an ioctl.
    4.34 - */
    4.35 -int initdom_ctrlif_domcontroller_port = -1;
    4.36 -
    4.37 -static int        ctrl_if_evtchn;
    4.38 -static int        ctrl_if_irq;
    4.39 -static spinlock_t ctrl_if_lock;
    4.40 -
    4.41 -static struct irqaction ctrl_if_irq_action;
    4.42 -
    4.43 -static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
    4.44 -static CONTROL_RING_IDX ctrl_if_rx_req_cons;
    4.45 -
    4.46 -/* Incoming message requests. */
    4.47 -    /* Primary message type -> message handler. */
    4.48 -static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
    4.49 -    /* Primary message type -> callback in process context? */
    4.50 -static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
    4.51 -    /* Is it late enough during bootstrap to use schedule_task()? */
    4.52 -static int safe_to_schedule_task;
    4.53 -    /* Passed to schedule_task(). */
    4.54 -static struct tq_struct ctrl_if_rxmsg_deferred_tq;
    4.55 -    /* Queue up messages to be handled in process context. */
    4.56 -static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
    4.57 -static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
    4.58 -static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
    4.59 -
    4.60 -/* Incoming message responses: message identifier -> message handler/id. */
    4.61 -static struct {
    4.62 -    ctrl_msg_handler_t fn;
    4.63 -    unsigned long      id;
    4.64 -} ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
    4.65 -
    4.66 -static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
    4.67 -static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
    4.68 -static void __ctrl_if_tx_tasklet(unsigned long data);
    4.69 -static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
    4.70 -
    4.71 -static void __ctrl_if_rx_tasklet(unsigned long data);
    4.72 -static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
    4.73 -
    4.74 -#define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
    4.75 -#define TX_FULL(_c)   \
    4.76 -    (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
    4.77 -
    4.78 -static void ctrl_if_notify_controller(void)
    4.79 -{
    4.80 -    notify_via_evtchn(ctrl_if_evtchn);
    4.81 -}
    4.82 -
    4.83 -static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
    4.84 -{
    4.85 -    msg->length = 0;
    4.86 -    ctrl_if_send_response(msg);
    4.87 -}
    4.88 -
    4.89 -static void __ctrl_if_tx_tasklet(unsigned long data)
    4.90 -{
    4.91 -    control_if_t *ctrl_if = get_ctrl_if();
    4.92 -    ctrl_msg_t   *msg;
    4.93 -    int           was_full = TX_FULL(ctrl_if);
    4.94 -
    4.95 -    while ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
    4.96 -    {
    4.97 -        msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
    4.98 -
    4.99 -        DPRINTK("Rx-Rsp %u/%u :: %d/%d\n", 
   4.100 -                ctrl_if_tx_resp_cons,
   4.101 -                ctrl_if->tx_resp_prod,
   4.102 -                msg->type, msg->subtype);
   4.103 -
   4.104 -        /* Execute the callback handler, if one was specified. */
   4.105 -        if ( msg->id != 0xFF )
   4.106 -        {
   4.107 -            (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
   4.108 -                msg, ctrl_if_txmsg_id_mapping[msg->id].id);
   4.109 -            smp_mb(); /* Execute, /then/ free. */
   4.110 -            ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
   4.111 -        }
   4.112 -
   4.113 -        /*
   4.114 -         * Step over the message in the ring /after/ finishing reading it. As 
   4.115 -         * soon as the index is updated then the message may get blown away.
   4.116 -         */
   4.117 -        smp_mb();
   4.118 -        ctrl_if_tx_resp_cons++;
   4.119 -    }
   4.120 -
   4.121 -    if ( was_full && !TX_FULL(ctrl_if) )
   4.122 -    {
   4.123 -        wake_up(&ctrl_if_tx_wait);
   4.124 -        run_task_queue(&ctrl_if_tx_tq);
   4.125 -    }
   4.126 -}
   4.127 -
   4.128 -static void __ctrl_if_rxmsg_deferred(void *unused)
   4.129 -{
   4.130 -    ctrl_msg_t *msg;
   4.131 -
   4.132 -    while ( ctrl_if_rxmsg_deferred_cons != ctrl_if_rxmsg_deferred_prod )
   4.133 -    {
   4.134 -        msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
   4.135 -            ctrl_if_rxmsg_deferred_cons++)];
   4.136 -        (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
   4.137 -    }
   4.138 -}
   4.139 -
   4.140 -static void __ctrl_if_rx_tasklet(unsigned long data)
   4.141 -{
   4.142 -    control_if_t *ctrl_if = get_ctrl_if();
   4.143 -    ctrl_msg_t    msg, *pmsg;
   4.144 -
   4.145 -    while ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
   4.146 -    {
   4.147 -        pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
   4.148 -        memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
   4.149 -
   4.150 -        DPRINTK("Rx-Req %u/%u :: %d/%d\n", 
   4.151 -                ctrl_if_rx_req_cons-1,
   4.152 -                ctrl_if->rx_req_prod,
   4.153 -                msg.type, msg.subtype);
   4.154 -
   4.155 -        if ( msg.length != 0 )
   4.156 -            memcpy(msg.msg, pmsg->msg, msg.length);
   4.157 -
   4.158 -        if ( test_bit(msg.type, &ctrl_if_rxmsg_blocking_context) )
   4.159 -        {
   4.160 -            pmsg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
   4.161 -                ctrl_if_rxmsg_deferred_prod++)];
   4.162 -            memcpy(pmsg, &msg, offsetof(ctrl_msg_t, msg) + msg.length);
   4.163 -            schedule_task(&ctrl_if_rxmsg_deferred_tq);
   4.164 -        }
   4.165 -        else
   4.166 -        {
   4.167 -            (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
   4.168 -        }
   4.169 -    }
   4.170 -}
   4.171 -
   4.172 -static void ctrl_if_interrupt(int irq, void *dev_id, struct pt_regs *regs)
   4.173 -{
   4.174 -    control_if_t *ctrl_if = get_ctrl_if();
   4.175 -
   4.176 -    if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
   4.177 -        tasklet_schedule(&ctrl_if_tx_tasklet);
   4.178 -
   4.179 -    if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
   4.180 -        tasklet_schedule(&ctrl_if_rx_tasklet);
   4.181 -}
   4.182 -
   4.183 -int ctrl_if_send_message_noblock(
   4.184 -    ctrl_msg_t *msg, 
   4.185 -    ctrl_msg_handler_t hnd,
   4.186 -    unsigned long id)
   4.187 -{
   4.188 -    control_if_t *ctrl_if = get_ctrl_if();
   4.189 -    unsigned long flags;
   4.190 -    int           i;
   4.191 -
   4.192 -    spin_lock_irqsave(&ctrl_if_lock, flags);
   4.193 -
   4.194 -    if ( TX_FULL(ctrl_if) )
   4.195 -    {
   4.196 -        spin_unlock_irqrestore(&ctrl_if_lock, flags);
   4.197 -        return -EAGAIN;
   4.198 -    }
   4.199 -
   4.200 -    msg->id = 0xFF;
   4.201 -    if ( hnd != NULL )
   4.202 -    {
   4.203 -        for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
   4.204 -            continue;
   4.205 -        ctrl_if_txmsg_id_mapping[i].fn = hnd;
   4.206 -        ctrl_if_txmsg_id_mapping[i].id = id;
   4.207 -        msg->id = i;
   4.208 -    }
   4.209 -
   4.210 -    DPRINTK("Tx-Req %u/%u :: %d/%d\n", 
   4.211 -            ctrl_if->tx_req_prod, 
   4.212 -            ctrl_if_tx_resp_cons,
   4.213 -            msg->type, msg->subtype);
   4.214 -
   4.215 -    memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)], 
   4.216 -           msg, sizeof(*msg));
   4.217 -    wmb(); /* Write the message before letting the controller peek at it. */
   4.218 -    ctrl_if->tx_req_prod++;
   4.219 -
   4.220 -    spin_unlock_irqrestore(&ctrl_if_lock, flags);
   4.221 -
   4.222 -    ctrl_if_notify_controller();
   4.223 -
   4.224 -    return 0;
   4.225 -}
   4.226 -
   4.227 -int ctrl_if_send_message_block(
   4.228 -    ctrl_msg_t *msg, 
   4.229 -    ctrl_msg_handler_t hnd, 
   4.230 -    unsigned long id,
   4.231 -    long wait_state)
   4.232 -{
   4.233 -    DECLARE_WAITQUEUE(wait, current);
   4.234 -    int rc;
   4.235 -
   4.236 -    /* Fast path. */
   4.237 -    if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
   4.238 -        return rc;
   4.239 -
   4.240 -    add_wait_queue(&ctrl_if_tx_wait, &wait);
   4.241 -
   4.242 -    for ( ; ; )
   4.243 -    {
   4.244 -        set_current_state(wait_state);
   4.245 -
   4.246 -        if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
   4.247 -            break;
   4.248 -
   4.249 -        rc = -ERESTARTSYS;
   4.250 -        if ( signal_pending(current) && (wait_state == TASK_INTERRUPTIBLE) )
   4.251 -            break;
   4.252 -
   4.253 -        schedule();
   4.254 -    }
   4.255 -
   4.256 -    set_current_state(TASK_RUNNING);
   4.257 -    remove_wait_queue(&ctrl_if_tx_wait, &wait);
   4.258 -
   4.259 -    return rc;
   4.260 -}
   4.261 -
   4.262 -int ctrl_if_enqueue_space_callback(struct tq_struct *task)
   4.263 -{
   4.264 -    control_if_t *ctrl_if = get_ctrl_if();
   4.265 -
   4.266 -    /* Fast path. */
   4.267 -    if ( !TX_FULL(ctrl_if) )
   4.268 -        return 0;
   4.269 -
   4.270 -    (void)queue_task(task, &ctrl_if_tx_tq);
   4.271 -
   4.272 -    /*
   4.273 -     * We may race execution of the task queue, so return re-checked status. If
   4.274 -     * the task is not executed despite the ring being non-full then we will
   4.275 -     * certainly return 'not full'.
   4.276 -     */
   4.277 -    smp_mb();
   4.278 -    return TX_FULL(ctrl_if);
   4.279 -}
   4.280 -
   4.281 -void ctrl_if_send_response(ctrl_msg_t *msg)
   4.282 -{
   4.283 -    control_if_t *ctrl_if = get_ctrl_if();
   4.284 -    unsigned long flags;
   4.285 -    ctrl_msg_t   *dmsg;
   4.286 -
   4.287 -    /*
   4.288 -     * NB. The response may the original request message, modified in-place.
   4.289 -     * In this situation we may have src==dst, so no copying is required.
   4.290 -     */
   4.291 -    spin_lock_irqsave(&ctrl_if_lock, flags);
   4.292 -
   4.293 -    DPRINTK("Tx-Rsp %u :: %d/%d\n", 
   4.294 -            ctrl_if->rx_resp_prod, 
   4.295 -            msg->type, msg->subtype);
   4.296 -
   4.297 -    dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
   4.298 -    if ( dmsg != msg )
   4.299 -        memcpy(dmsg, msg, sizeof(*msg));
   4.300 -
   4.301 -    wmb(); /* Write the message before letting the controller peek at it. */
   4.302 -    ctrl_if->rx_resp_prod++;
   4.303 -
   4.304 -    spin_unlock_irqrestore(&ctrl_if_lock, flags);
   4.305 -
   4.306 -    ctrl_if_notify_controller();
   4.307 -}
   4.308 -
   4.309 -int ctrl_if_register_receiver(
   4.310 -    u8 type, 
   4.311 -    ctrl_msg_handler_t hnd, 
   4.312 -    unsigned int flags)
   4.313 -{
   4.314 -    unsigned long _flags;
   4.315 -    int inuse;
   4.316 -
   4.317 -    spin_lock_irqsave(&ctrl_if_lock, _flags);
   4.318 -
   4.319 -    inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
   4.320 -
   4.321 -    if ( inuse )
   4.322 -    {
   4.323 -        printk(KERN_INFO "Receiver %p already established for control "
   4.324 -               "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
   4.325 -    }
   4.326 -    else
   4.327 -    {
   4.328 -        ctrl_if_rxmsg_handler[type] = hnd;
   4.329 -        clear_bit(type, &ctrl_if_rxmsg_blocking_context);
   4.330 -        if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
   4.331 -        {
   4.332 -            set_bit(type, &ctrl_if_rxmsg_blocking_context);
   4.333 -            if ( !safe_to_schedule_task )
   4.334 -                BUG();
   4.335 -        }
   4.336 -    }
   4.337 -
   4.338 -    spin_unlock_irqrestore(&ctrl_if_lock, _flags);
   4.339 -
   4.340 -    return !inuse;
   4.341 -}
   4.342 -
   4.343 -void ctrl_if_unregister_receiver(u8 type, ctrl_msg_handler_t hnd)
   4.344 -{
   4.345 -    unsigned long flags;
   4.346 -
   4.347 -    spin_lock_irqsave(&ctrl_if_lock, flags);
   4.348 -
   4.349 -    if ( ctrl_if_rxmsg_handler[type] != hnd )
   4.350 -        printk(KERN_INFO "Receiver %p is not registered for control "
   4.351 -               "messages of type %d.\n", hnd, type);
   4.352 -    else
   4.353 -        ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
   4.354 -
   4.355 -    spin_unlock_irqrestore(&ctrl_if_lock, flags);
   4.356 -
   4.357 -    /* Ensure that @hnd will not be executed after this function returns. */
   4.358 -    tasklet_unlock_wait(&ctrl_if_rx_tasklet);
   4.359 -}
   4.360 -
   4.361 -void ctrl_if_suspend(void)
   4.362 -{
   4.363 -    free_irq(ctrl_if_irq, NULL);
   4.364 -    unbind_evtchn_from_irq(ctrl_if_evtchn);
   4.365 -}
   4.366 -
   4.367 -/** Reset the control interface progress pointers.
   4.368 - * Marks the queues empty if 'clear' non-zero.
   4.369 - */
   4.370 -void ctrl_if_reset(int clear){
   4.371 -    control_if_t *ctrl_if = get_ctrl_if();
   4.372 -
   4.373 -    if(clear){
   4.374 -        *ctrl_if = (control_if_t){};
   4.375 -    }
   4.376 -    ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
   4.377 -    ctrl_if_rx_req_cons  = ctrl_if->rx_resp_prod;
   4.378 -}
   4.379 -
   4.380 -void ctrl_if_resume(void)
   4.381 -{
   4.382 -    if ( start_info.flags & SIF_INITDOMAIN )
   4.383 -    {
   4.384 -        /*
   4.385 -         * The initial domain must create its own domain-controller link.
   4.386 -         * The controller is probably not running at this point, but will
   4.387 -         * pick up its end of the event channel from 
   4.388 -         */
   4.389 -        evtchn_op_t op;
   4.390 -        op.cmd = EVTCHNOP_bind_interdomain;
   4.391 -        op.u.bind_interdomain.dom1 = DOMID_SELF;
   4.392 -        op.u.bind_interdomain.dom2 = DOMID_SELF;
   4.393 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   4.394 -            BUG();
   4.395 -        start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
   4.396 -        initdom_ctrlif_domcontroller_port   = op.u.bind_interdomain.port2;
   4.397 -    }
   4.398 -
   4.399 -    ctrl_if_reset(0);
   4.400 -
   4.401 -    ctrl_if_evtchn = start_info.domain_controller_evtchn;
   4.402 -    ctrl_if_irq    = bind_evtchn_to_irq(ctrl_if_evtchn);
   4.403 -
   4.404 -#define SA_STATIC_ACTION 0x01000000 /* so that free_irq() doesn't do kfree() */
   4.405 -    memset(&ctrl_if_irq_action, 0, sizeof(ctrl_if_irq_action));
   4.406 -    ctrl_if_irq_action.handler = ctrl_if_interrupt;
   4.407 -    ctrl_if_irq_action.name    = "ctrl-if";
   4.408 -    ctrl_if_irq_action.flags   = SA_STATIC_ACTION;
   4.409 -    (void)setup_irq(ctrl_if_irq, &ctrl_if_irq_action);
   4.410 -}
   4.411 -
   4.412 -void __init ctrl_if_init(void)
   4.413 -{
   4.414 -        int i;
   4.415 -
   4.416 -    for ( i = 0; i < 256; i++ )
   4.417 -        ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
   4.418 -    ctrl_if_rxmsg_deferred_tq.routine = __ctrl_if_rxmsg_deferred;
   4.419 -
   4.420 -    spin_lock_init(&ctrl_if_lock);
   4.421 -
   4.422 -    ctrl_if_reset(1);
   4.423 -    ctrl_if_resume();
   4.424 -}
   4.425 -
   4.426 -
   4.427 -/* This is called after it is safe to call schedule_task(). */
   4.428 -static int __init ctrl_if_late_setup(void)
   4.429 -{
   4.430 -    safe_to_schedule_task = 1;
   4.431 -    return 0;
   4.432 -}
   4.433 -__initcall(ctrl_if_late_setup);
   4.434 -
   4.435 -
   4.436 -/*
   4.437 - * !! The following are DANGEROUS FUNCTIONS !!
   4.438 - * Use with care [for example, see xencons_force_flush()].
   4.439 - */
   4.440 -
   4.441 -int ctrl_if_transmitter_empty(void)
   4.442 -{
   4.443 -    return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
   4.444 -}
   4.445 -
   4.446 -void ctrl_if_discard_responses(void)
   4.447 -{
   4.448 -    ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
   4.449 -}
   4.450 -
     5.1 --- a/linux-2.4.26-xen-sparse/arch/xen/kernel/evtchn.c	Mon Aug 02 10:30:38 2004 +0000
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,477 +0,0 @@
     5.4 -/******************************************************************************
     5.5 - * evtchn.c
     5.6 - * 
     5.7 - * Communication via Xen event channels.
     5.8 - * 
     5.9 - * Copyright (c) 2002-2004, K A Fraser
    5.10 - */
    5.11 -
    5.12 -#include <linux/config.h>
    5.13 -#include <linux/irq.h>
    5.14 -#include <linux/interrupt.h>
    5.15 -#include <linux/sched.h>
    5.16 -#include <linux/kernel_stat.h>
    5.17 -#include <asm/atomic.h>
    5.18 -#include <asm/system.h>
    5.19 -#include <asm/ptrace.h>
    5.20 -#include <asm/synch_bitops.h>
    5.21 -#include <asm/ctrl_if.h>
    5.22 -#include <asm/hypervisor.h>
    5.23 -#include <asm/hypervisor-ifs/event_channel.h>
    5.24 -#include <asm/hypervisor-ifs/physdev.h>
    5.25 -
    5.26 -/*
    5.27 - * This lock protects updates to the following mapping and reference-count
    5.28 - * arrays. The lock does not need to be acquired to read the mapping tables.
    5.29 - */
    5.30 -static spinlock_t irq_mapping_update_lock;
    5.31 -
    5.32 -/* IRQ <-> event-channel mappings. */
    5.33 -static int evtchn_to_irq[NR_EVENT_CHANNELS];
    5.34 -static int irq_to_evtchn[NR_IRQS];
    5.35 -
    5.36 -/* IRQ <-> VIRQ mapping. */
    5.37 -static int virq_to_irq[NR_VIRQS];
    5.38 -
    5.39 -/* Reference counts for bindings to IRQs. */
    5.40 -static int irq_bindcount[NR_IRQS];
    5.41 -
    5.42 -/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
    5.43 -static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
    5.44 -
    5.45 -/* Upcall to generic IRQ layer. */
    5.46 -extern asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs);
    5.47 -
    5.48 -#define VALID_EVTCHN(_chn) ((_chn) != -1)
    5.49 -
    5.50 -void evtchn_do_upcall(struct pt_regs *regs)
    5.51 -{
    5.52 -    unsigned long  l1, l2;
    5.53 -    unsigned int   l1i, l2i, port;
    5.54 -    int            irq;
    5.55 -    unsigned long  flags;
    5.56 -    shared_info_t *s = HYPERVISOR_shared_info;
    5.57 -
    5.58 -    local_irq_save(flags);
    5.59 -    
    5.60 -    while ( s->vcpu_data[0].evtchn_upcall_pending )
    5.61 -    {
    5.62 -        s->vcpu_data[0].evtchn_upcall_pending = 0;
    5.63 -        /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
    5.64 -        l1 = xchg(&s->evtchn_pending_sel, 0);
    5.65 -        while ( (l1i = ffs(l1)) != 0 )
    5.66 -        {
    5.67 -            l1i--;
    5.68 -            l1 &= ~(1 << l1i);
    5.69 -        
    5.70 -            l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
    5.71 -            while ( (l2i = ffs(l2)) != 0 )
    5.72 -            {
    5.73 -                l2i--;
    5.74 -                l2 &= ~(1 << l2i);
    5.75 -            
    5.76 -                port = (l1i << 5) + l2i;
    5.77 -                if ( (irq = evtchn_to_irq[port]) != -1 )
    5.78 -                    do_IRQ(irq, regs);
    5.79 -                else
    5.80 -                    evtchn_device_upcall(port);
    5.81 -            }
    5.82 -        }
    5.83 -    }
    5.84 -
    5.85 -    local_irq_restore(flags);
    5.86 -}
    5.87 -
    5.88 -
    5.89 -static int find_unbound_irq(void)
    5.90 -{
    5.91 -    int irq;
    5.92 -
    5.93 -    for ( irq = 0; irq < NR_IRQS; irq++ )
    5.94 -        if ( irq_bindcount[irq] == 0 )
    5.95 -            break;
    5.96 -
    5.97 -    if ( irq == NR_IRQS )
    5.98 -        panic("No available IRQ to bind to: increase NR_IRQS!\n");
    5.99 -
   5.100 -    return irq;
   5.101 -}
   5.102 -
   5.103 -int bind_virq_to_irq(int virq)
   5.104 -{
   5.105 -    evtchn_op_t op;
   5.106 -    int evtchn, irq;
   5.107 -
   5.108 -    spin_lock(&irq_mapping_update_lock);
   5.109 -
   5.110 -    if ( (irq = virq_to_irq[virq]) == -1 )
   5.111 -    {
   5.112 -        op.cmd              = EVTCHNOP_bind_virq;
   5.113 -        op.u.bind_virq.virq = virq;
   5.114 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   5.115 -            panic("Failed to bind virtual IRQ %d\n", virq);
   5.116 -        evtchn = op.u.bind_virq.port;
   5.117 -
   5.118 -        irq = find_unbound_irq();
   5.119 -        evtchn_to_irq[evtchn] = irq;
   5.120 -        irq_to_evtchn[irq]    = evtchn;
   5.121 -
   5.122 -        virq_to_irq[virq] = irq;
   5.123 -    }
   5.124 -
   5.125 -    irq_bindcount[irq]++;
   5.126 -
   5.127 -    spin_unlock(&irq_mapping_update_lock);
   5.128 -    
   5.129 -    return irq;
   5.130 -}
   5.131 -
   5.132 -void unbind_virq_from_irq(int virq)
   5.133 -{
   5.134 -    evtchn_op_t op;
   5.135 -    int irq    = virq_to_irq[virq];
   5.136 -    int evtchn = irq_to_evtchn[irq];
   5.137 -
   5.138 -    spin_lock(&irq_mapping_update_lock);
   5.139 -
   5.140 -    if ( --irq_bindcount[irq] == 0 )
   5.141 -    {
   5.142 -        op.cmd          = EVTCHNOP_close;
   5.143 -        op.u.close.dom  = DOMID_SELF;
   5.144 -        op.u.close.port = evtchn;
   5.145 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   5.146 -            panic("Failed to unbind virtual IRQ %d\n", virq);
   5.147 -
   5.148 -        evtchn_to_irq[evtchn] = -1;
   5.149 -        irq_to_evtchn[irq]    = -1;
   5.150 -        virq_to_irq[virq]     = -1;
   5.151 -    }
   5.152 -
   5.153 -    spin_unlock(&irq_mapping_update_lock);
   5.154 -}
   5.155 -
   5.156 -int bind_evtchn_to_irq(int evtchn)
   5.157 -{
   5.158 -    int irq;
   5.159 -
   5.160 -    spin_lock(&irq_mapping_update_lock);
   5.161 -
   5.162 -    if ( (irq = evtchn_to_irq[evtchn]) == -1 )
   5.163 -    {
   5.164 -        irq = find_unbound_irq();
   5.165 -        evtchn_to_irq[evtchn] = irq;
   5.166 -        irq_to_evtchn[irq]    = evtchn;
   5.167 -    }
   5.168 -
   5.169 -    irq_bindcount[irq]++;
   5.170 -
   5.171 -    spin_unlock(&irq_mapping_update_lock);
   5.172 -    
   5.173 -    return irq;
   5.174 -}
   5.175 -
   5.176 -void unbind_evtchn_from_irq(int evtchn)
   5.177 -{
   5.178 -    int irq = evtchn_to_irq[evtchn];
   5.179 -
   5.180 -    spin_lock(&irq_mapping_update_lock);
   5.181 -
   5.182 -    if ( --irq_bindcount[irq] == 0 )
   5.183 -    {
   5.184 -        evtchn_to_irq[evtchn] = -1;
   5.185 -        irq_to_evtchn[irq]    = -1;
   5.186 -    }
   5.187 -
   5.188 -    spin_unlock(&irq_mapping_update_lock);
   5.189 -}
   5.190 -
   5.191 -
   5.192 -/*
   5.193 - * Interface to generic handling in irq.c
   5.194 - */
   5.195 -
   5.196 -static unsigned int startup_dynirq(unsigned int irq)
   5.197 -{
   5.198 -    unmask_evtchn(irq_to_evtchn[irq]);
   5.199 -    return 0;
   5.200 -}
   5.201 -
   5.202 -static void shutdown_dynirq(unsigned int irq)
   5.203 -{
   5.204 -    mask_evtchn(irq_to_evtchn[irq]);
   5.205 -}
   5.206 -
   5.207 -static void enable_dynirq(unsigned int irq)
   5.208 -{
   5.209 -    unmask_evtchn(irq_to_evtchn[irq]);
   5.210 -}
   5.211 -
   5.212 -static void disable_dynirq(unsigned int irq)
   5.213 -{
   5.214 -    mask_evtchn(irq_to_evtchn[irq]);
   5.215 -}
   5.216 -
   5.217 -static void ack_dynirq(unsigned int irq)
   5.218 -{
   5.219 -    mask_evtchn(irq_to_evtchn[irq]);
   5.220 -    clear_evtchn(irq_to_evtchn[irq]);
   5.221 -}
   5.222 -
   5.223 -static void end_dynirq(unsigned int irq)
   5.224 -{
   5.225 -    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
   5.226 -        unmask_evtchn(irq_to_evtchn[irq]);
   5.227 -}
   5.228 -
   5.229 -static struct hw_interrupt_type dynirq_type = {
   5.230 -    "Dynamic-irq",
   5.231 -    startup_dynirq,
   5.232 -    shutdown_dynirq,
   5.233 -    enable_dynirq,
   5.234 -    disable_dynirq,
   5.235 -    ack_dynirq,
   5.236 -    end_dynirq,
   5.237 -    NULL
   5.238 -};
   5.239 -
   5.240 -static inline void pirq_unmask_notify(int pirq)
   5.241 -{
   5.242 -    physdev_op_t op;
   5.243 -    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
   5.244 -    {
   5.245 -        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
   5.246 -        (void)HYPERVISOR_physdev_op(&op);
   5.247 -    }
   5.248 -}
   5.249 -
   5.250 -static inline void pirq_query_unmask(int pirq)
   5.251 -{
   5.252 -    physdev_op_t op;
   5.253 -    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
   5.254 -    op.u.irq_status_query.irq = pirq;
   5.255 -    (void)HYPERVISOR_physdev_op(&op);
   5.256 -    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
   5.257 -    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
   5.258 -        set_bit(pirq, &pirq_needs_unmask_notify[0]);
   5.259 -}
   5.260 -
   5.261 -/*
   5.262 - * On startup, if there is no action associated with the IRQ then we are
   5.263 - * probing. In this case we should not share with others as it will confuse us.
   5.264 - */
   5.265 -#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
   5.266 -
   5.267 -static unsigned int startup_pirq(unsigned int irq)
   5.268 -{
   5.269 -    evtchn_op_t op;
   5.270 -    int evtchn;
   5.271 -
   5.272 -    op.cmd               = EVTCHNOP_bind_pirq;
   5.273 -    op.u.bind_pirq.pirq  = irq;
   5.274 -    /* NB. We are happy to share unless we are probing. */
   5.275 -    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
   5.276 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
   5.277 -    {
   5.278 -        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
   5.279 -            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
   5.280 -        return 0;
   5.281 -    }
   5.282 -    evtchn = op.u.bind_pirq.port;
   5.283 -
   5.284 -    pirq_query_unmask(irq_to_pirq(irq));
   5.285 -
   5.286 -    evtchn_to_irq[evtchn] = irq;
   5.287 -    irq_to_evtchn[irq]    = evtchn;
   5.288 -
   5.289 -    unmask_evtchn(evtchn);
   5.290 -    pirq_unmask_notify(irq_to_pirq(irq));
   5.291 -
   5.292 -    return 0;
   5.293 -}
   5.294 -
   5.295 -static void shutdown_pirq(unsigned int irq)
   5.296 -{
   5.297 -    evtchn_op_t op;
   5.298 -    int evtchn = irq_to_evtchn[irq];
   5.299 -
   5.300 -    if ( !VALID_EVTCHN(evtchn) )
   5.301 -        return;
   5.302 -
   5.303 -    mask_evtchn(evtchn);
   5.304 -
   5.305 -    op.cmd          = EVTCHNOP_close;
   5.306 -    op.u.close.dom  = DOMID_SELF;
   5.307 -    op.u.close.port = evtchn;
   5.308 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
   5.309 -        panic("Failed to unbind physical IRQ %d\n", irq);
   5.310 -
   5.311 -    evtchn_to_irq[evtchn] = -1;
   5.312 -    irq_to_evtchn[irq]    = -1;
   5.313 -}
   5.314 -
   5.315 -static void enable_pirq(unsigned int irq)
   5.316 -{
   5.317 -    int evtchn = irq_to_evtchn[irq];
   5.318 -    if ( !VALID_EVTCHN(evtchn) )
   5.319 -        return;
   5.320 -    unmask_evtchn(evtchn);
   5.321 -    pirq_unmask_notify(irq_to_pirq(irq));
   5.322 -}
   5.323 -
   5.324 -static void disable_pirq(unsigned int irq)
   5.325 -{
   5.326 -    int evtchn = irq_to_evtchn[irq];
   5.327 -    if ( !VALID_EVTCHN(evtchn) )
   5.328 -        return;
   5.329 -    mask_evtchn(evtchn);
   5.330 -}
   5.331 -
   5.332 -static void ack_pirq(unsigned int irq)
   5.333 -{
   5.334 -    int evtchn = irq_to_evtchn[irq];
   5.335 -    if ( !VALID_EVTCHN(evtchn) )
   5.336 -        return;
   5.337 -    mask_evtchn(evtchn);
   5.338 -    clear_evtchn(evtchn);
   5.339 -}
   5.340 -
   5.341 -static void end_pirq(unsigned int irq)
   5.342 -{
   5.343 -    int evtchn = irq_to_evtchn[irq];
   5.344 -    if ( !VALID_EVTCHN(evtchn) )
   5.345 -        return;
   5.346 -    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
   5.347 -    {
   5.348 -        unmask_evtchn(evtchn);
   5.349 -        pirq_unmask_notify(irq_to_pirq(irq));
   5.350 -    }
   5.351 -}
   5.352 -
   5.353 -static struct hw_interrupt_type pirq_type = {
   5.354 -    "Phys-irq",
   5.355 -    startup_pirq,
   5.356 -    shutdown_pirq,
   5.357 -    enable_pirq,
   5.358 -    disable_pirq,
   5.359 -    ack_pirq,
   5.360 -    end_pirq,
   5.361 -    NULL
   5.362 -};
   5.363 -
   5.364 -static void misdirect_interrupt(int irq, void *dev_id, struct pt_regs *regs)
   5.365 -{
   5.366 -    /* nothing */
   5.367 -}
   5.368 -
   5.369 -static struct irqaction misdirect_action = {
   5.370 -    misdirect_interrupt, 
   5.371 -    SA_INTERRUPT, 
   5.372 -    0, 
   5.373 -    "misdirect", 
   5.374 -    NULL, 
   5.375 -    NULL
   5.376 -};
   5.377 -
   5.378 -void irq_suspend(void)
   5.379 -{
   5.380 -    int virq, irq, evtchn;
   5.381 -
   5.382 -    /* Unbind VIRQs from event channels. */
   5.383 -    for ( virq = 0; virq < NR_VIRQS; virq++ )
   5.384 -    {
   5.385 -        if ( (irq = virq_to_irq[virq]) == -1 )
   5.386 -            continue;
   5.387 -        evtchn = irq_to_evtchn[irq];
   5.388 -
   5.389 -        /* Mark the event channel as unused in our table. */
   5.390 -        evtchn_to_irq[evtchn] = -1;
   5.391 -        irq_to_evtchn[irq]    = -1;
   5.392 -    }
   5.393 -
   5.394 -    /*
   5.395 -     * We should now be unbound from all event channels. Stale bindings to 
   5.396 -     * PIRQs and/or inter-domain event channels will cause us to barf here.
   5.397 -     */
   5.398 -    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
   5.399 -        if ( evtchn_to_irq[evtchn] != -1 )
   5.400 -            panic("Suspend attempted while bound to evtchn %d.\n", evtchn);
   5.401 -}
   5.402 -
   5.403 -
   5.404 -void irq_resume(void)
   5.405 -{
   5.406 -    evtchn_op_t op;
   5.407 -    int         virq, irq, evtchn;
   5.408 -
   5.409 -    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
   5.410 -        mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
   5.411 -
   5.412 -    for ( virq = 0; virq < NR_VIRQS; virq++ )
   5.413 -    {
   5.414 -        if ( (irq = virq_to_irq[virq]) == -1 )
   5.415 -            continue;
   5.416 -
   5.417 -        /* Get a new binding from Xen. */
   5.418 -        op.cmd              = EVTCHNOP_bind_virq;
   5.419 -        op.u.bind_virq.virq = virq;
   5.420 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   5.421 -            panic("Failed to bind virtual IRQ %d\n", virq);
   5.422 -        evtchn = op.u.bind_virq.port;
   5.423 -        
   5.424 -        /* Record the new mapping. */
   5.425 -        evtchn_to_irq[evtchn] = irq;
   5.426 -        irq_to_evtchn[irq]    = evtchn;
   5.427 -
   5.428 -        /* Ready for use. */
   5.429 -        unmask_evtchn(evtchn);
   5.430 -    }
   5.431 -}
   5.432 -
   5.433 -void __init init_IRQ(void)
   5.434 -{
   5.435 -    int i;
   5.436 -
   5.437 -    spin_lock_init(&irq_mapping_update_lock);
   5.438 -
   5.439 -    /* No VIRQ -> IRQ mappings. */
   5.440 -    for ( i = 0; i < NR_VIRQS; i++ )
   5.441 -        virq_to_irq[i] = -1;
   5.442 -
   5.443 -    /* No event-channel -> IRQ mappings. */
   5.444 -    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
   5.445 -    {
   5.446 -        evtchn_to_irq[i] = -1;
   5.447 -        mask_evtchn(i); /* No event channels are 'live' right now. */
   5.448 -    }
   5.449 -
   5.450 -    /* No IRQ -> event-channel mappings. */
   5.451 -    for ( i = 0; i < NR_IRQS; i++ )
   5.452 -        irq_to_evtchn[i] = -1;
   5.453 -
   5.454 -    for ( i = 0; i < NR_DYNIRQS; i++ )
   5.455 -    {
   5.456 -        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
   5.457 -        irq_bindcount[dynirq_to_irq(i)] = 0;
   5.458 -
   5.459 -        irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
   5.460 -        irq_desc[dynirq_to_irq(i)].action  = 0;
   5.461 -        irq_desc[dynirq_to_irq(i)].depth   = 1;
   5.462 -        irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
   5.463 -    }
   5.464 -
   5.465 -    for ( i = 0; i < NR_PIRQS; i++ )
   5.466 -    {
   5.467 -        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
   5.468 -        irq_bindcount[pirq_to_irq(i)] = 1;
   5.469 -
   5.470 -        irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
   5.471 -        irq_desc[pirq_to_irq(i)].action  = 0;
   5.472 -        irq_desc[pirq_to_irq(i)].depth   = 1;
   5.473 -        irq_desc[pirq_to_irq(i)].handler = &pirq_type;
   5.474 -    }
   5.475 -
   5.476 -    (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
   5.477 -
   5.478 -    /* This needs to be done early, but after the IRQ subsystem is alive. */
   5.479 -    ctrl_if_init();
   5.480 -}
     6.1 --- a/linux-2.4.26-xen-sparse/arch/xen/mm/hypervisor.c	Mon Aug 02 10:30:38 2004 +0000
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,348 +0,0 @@
     6.4 -/******************************************************************************
     6.5 - * xen/mm/hypervisor.c
     6.6 - * 
     6.7 - * Update page tables via the hypervisor.
     6.8 - * 
     6.9 - * Copyright (c) 2002, K A Fraser
    6.10 - */
    6.11 -
    6.12 -#include <linux/config.h>
    6.13 -#include <linux/sched.h>
    6.14 -#include <linux/mm.h>
    6.15 -#include <linux/vmalloc.h>
    6.16 -#include <asm/hypervisor.h>
    6.17 -#include <asm/page.h>
    6.18 -#include <asm/pgtable.h>
    6.19 -#include <asm/multicall.h>
    6.20 -
    6.21 -/*
    6.22 - * This suffices to protect us if we ever move to SMP domains.
    6.23 - * Further, it protects us against interrupts. At the very least, this is
    6.24 - * required for the network driver which flushes the update queue before
    6.25 - * pushing new receive buffers.
    6.26 - */
    6.27 -static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
    6.28 -
    6.29 -#define QUEUE_SIZE 2048
    6.30 -static mmu_update_t update_queue[QUEUE_SIZE];
    6.31 -unsigned int mmu_update_queue_idx = 0;
    6.32 -#define idx mmu_update_queue_idx
    6.33 -
    6.34 -#if MMU_UPDATE_DEBUG > 0
    6.35 -page_update_debug_t update_debug_queue[QUEUE_SIZE] = {{0}};
    6.36 -#undef queue_l1_entry_update
    6.37 -#undef queue_l2_entry_update
    6.38 -static void DEBUG_allow_pt_reads(void)
    6.39 -{
    6.40 -    pte_t *pte;
    6.41 -    mmu_update_t update;
    6.42 -    int i;
    6.43 -    for ( i = idx-1; i >= 0; i-- )
    6.44 -    {
    6.45 -        pte = update_debug_queue[i].ptep;
    6.46 -        if ( pte == NULL ) continue;
    6.47 -        update_debug_queue[i].ptep = NULL;
    6.48 -        update.ptr = virt_to_machine(pte);
    6.49 -        update.val = update_debug_queue[i].pteval;
    6.50 -        HYPERVISOR_mmu_update(&update, 1, NULL);
    6.51 -    }
    6.52 -}
    6.53 -static void DEBUG_disallow_pt_read(unsigned long va)
    6.54 -{
    6.55 -    pte_t *pte;
    6.56 -    pmd_t *pmd;
    6.57 -    pgd_t *pgd;
    6.58 -    unsigned long pteval;
    6.59 -    /*
    6.60 -     * We may fault because of an already outstanding update.
    6.61 -     * That's okay -- it'll get fixed up in the fault handler.
    6.62 -     */
    6.63 -    mmu_update_t update;
    6.64 -    pgd = pgd_offset_k(va);
    6.65 -    pmd = pmd_offset(pgd, va);
    6.66 -    pte = pte_offset(pmd, va);
    6.67 -    update.ptr = virt_to_machine(pte);
    6.68 -    pteval = *(unsigned long *)pte;
    6.69 -    update.val = pteval & ~_PAGE_PRESENT;
    6.70 -    HYPERVISOR_mmu_update(&update, 1, NULL);
    6.71 -    update_debug_queue[idx].ptep = pte;
    6.72 -    update_debug_queue[idx].pteval = pteval;
    6.73 -}
    6.74 -#endif
    6.75 -
    6.76 -#if MMU_UPDATE_DEBUG > 1
    6.77 -#undef queue_pt_switch
    6.78 -#undef queue_tlb_flush
    6.79 -#undef queue_invlpg
    6.80 -#undef queue_pgd_pin
    6.81 -#undef queue_pgd_unpin
    6.82 -#undef queue_pte_pin
    6.83 -#undef queue_pte_unpin
    6.84 -#endif
    6.85 -
    6.86 -
    6.87 -/*
    6.88 - * MULTICALL_flush_page_update_queue:
    6.89 - *   This is a version of the flush which queues as part of a multicall.
    6.90 - */
    6.91 -void MULTICALL_flush_page_update_queue(void)
    6.92 -{
    6.93 -    unsigned long flags;
    6.94 -    unsigned int _idx;
    6.95 -    spin_lock_irqsave(&update_lock, flags);
    6.96 -    if ( (_idx = idx) != 0 ) 
    6.97 -    {
    6.98 -#if MMU_UPDATE_DEBUG > 1
    6.99 -        printk("Flushing %d entries from pt update queue\n", idx);
   6.100 -#endif
   6.101 -#if MMU_UPDATE_DEBUG > 0
   6.102 -        DEBUG_allow_pt_reads();
   6.103 -#endif
   6.104 -        idx = 0;
   6.105 -        wmb(); /* Make sure index is cleared first to avoid double updates. */
   6.106 -        queue_multicall3(__HYPERVISOR_mmu_update, 
   6.107 -                         (unsigned long)update_queue, 
   6.108 -                         (unsigned long)_idx, 
   6.109 -                         (unsigned long)NULL);
   6.110 -    }
   6.111 -    spin_unlock_irqrestore(&update_lock, flags);
   6.112 -}
   6.113 -
   6.114 -static inline void __flush_page_update_queue(void)
   6.115 -{
   6.116 -    unsigned int _idx = idx;
   6.117 -#if MMU_UPDATE_DEBUG > 1
   6.118 -    printk("Flushing %d entries from pt update queue\n", idx);
   6.119 -#endif
   6.120 -#if MMU_UPDATE_DEBUG > 0
   6.121 -    DEBUG_allow_pt_reads();
   6.122 -#endif
   6.123 -    idx = 0;
   6.124 -    wmb(); /* Make sure index is cleared first to avoid double updates. */
   6.125 -    if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) )
   6.126 -    {
   6.127 -        printk(KERN_ALERT "Failed to execute MMU updates.\n");
   6.128 -        BUG();
   6.129 -    }
   6.130 -}
   6.131 -
   6.132 -void _flush_page_update_queue(void)
   6.133 -{
   6.134 -    unsigned long flags;
   6.135 -    spin_lock_irqsave(&update_lock, flags);
   6.136 -    if ( idx != 0 ) __flush_page_update_queue();
   6.137 -    spin_unlock_irqrestore(&update_lock, flags);
   6.138 -}
   6.139 -
   6.140 -static inline void increment_index(void)
   6.141 -{
   6.142 -    idx++;
   6.143 -    if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
   6.144 -}
   6.145 -
   6.146 -void queue_l1_entry_update(pte_t *ptr, unsigned long val)
   6.147 -{
   6.148 -    unsigned long flags;
   6.149 -    spin_lock_irqsave(&update_lock, flags);
   6.150 -#if MMU_UPDATE_DEBUG > 0
   6.151 -    DEBUG_disallow_pt_read((unsigned long)ptr);
   6.152 -#endif
   6.153 -    update_queue[idx].ptr = virt_to_machine(ptr);
   6.154 -    update_queue[idx].val = val;
   6.155 -    increment_index();
   6.156 -    spin_unlock_irqrestore(&update_lock, flags);
   6.157 -}
   6.158 -
   6.159 -void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
   6.160 -{
   6.161 -    unsigned long flags;
   6.162 -    spin_lock_irqsave(&update_lock, flags);
   6.163 -    update_queue[idx].ptr = virt_to_machine(ptr);
   6.164 -    update_queue[idx].val = val;
   6.165 -    increment_index();
   6.166 -    spin_unlock_irqrestore(&update_lock, flags);
   6.167 -}
   6.168 -
   6.169 -void queue_pt_switch(unsigned long ptr)
   6.170 -{
   6.171 -    unsigned long flags;
   6.172 -    spin_lock_irqsave(&update_lock, flags);
   6.173 -    update_queue[idx].ptr  = phys_to_machine(ptr);
   6.174 -    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
   6.175 -    update_queue[idx].val  = MMUEXT_NEW_BASEPTR;
   6.176 -    increment_index();
   6.177 -    spin_unlock_irqrestore(&update_lock, flags);
   6.178 -}
   6.179 -
   6.180 -void queue_tlb_flush(void)
   6.181 -{
   6.182 -    unsigned long flags;
   6.183 -    spin_lock_irqsave(&update_lock, flags);
   6.184 -    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
   6.185 -    update_queue[idx].val  = MMUEXT_TLB_FLUSH;
   6.186 -    increment_index();
   6.187 -    spin_unlock_irqrestore(&update_lock, flags);
   6.188 -}
   6.189 -
   6.190 -void queue_invlpg(unsigned long ptr)
   6.191 -{
   6.192 -    unsigned long flags;
   6.193 -    spin_lock_irqsave(&update_lock, flags);
   6.194 -    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND;
   6.195 -    update_queue[idx].ptr |= ptr & PAGE_MASK;
   6.196 -    update_queue[idx].val  = MMUEXT_INVLPG;
   6.197 -    increment_index();
   6.198 -    spin_unlock_irqrestore(&update_lock, flags);
   6.199 -}
   6.200 -
   6.201 -void queue_pgd_pin(unsigned long ptr)
   6.202 -{
   6.203 -    unsigned long flags;
   6.204 -    spin_lock_irqsave(&update_lock, flags);
   6.205 -    update_queue[idx].ptr  = phys_to_machine(ptr);
   6.206 -    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
   6.207 -    update_queue[idx].val  = MMUEXT_PIN_L2_TABLE;
   6.208 -    increment_index();
   6.209 -    spin_unlock_irqrestore(&update_lock, flags);
   6.210 -}
   6.211 -
   6.212 -void queue_pgd_unpin(unsigned long ptr)
   6.213 -{
   6.214 -    unsigned long flags;
   6.215 -    spin_lock_irqsave(&update_lock, flags);
   6.216 -    update_queue[idx].ptr  = phys_to_machine(ptr);
   6.217 -    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
   6.218 -    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
   6.219 -    increment_index();
   6.220 -    spin_unlock_irqrestore(&update_lock, flags);
   6.221 -}
   6.222 -
   6.223 -void queue_pte_pin(unsigned long ptr)
   6.224 -{
   6.225 -    unsigned long flags;
   6.226 -    spin_lock_irqsave(&update_lock, flags);
   6.227 -    update_queue[idx].ptr  = phys_to_machine(ptr);
   6.228 -    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
   6.229 -    update_queue[idx].val  = MMUEXT_PIN_L1_TABLE;
   6.230 -    increment_index();
   6.231 -    spin_unlock_irqrestore(&update_lock, flags);
   6.232 -}
   6.233 -
   6.234 -void queue_pte_unpin(unsigned long ptr)
   6.235 -{
   6.236 -    unsigned long flags;
   6.237 -    spin_lock_irqsave(&update_lock, flags);
   6.238 -    update_queue[idx].ptr  = phys_to_machine(ptr);
   6.239 -    update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
   6.240 -    update_queue[idx].val  = MMUEXT_UNPIN_TABLE;
   6.241 -    increment_index();
   6.242 -    spin_unlock_irqrestore(&update_lock, flags);
   6.243 -}
   6.244 -
   6.245 -void queue_set_ldt(unsigned long ptr, unsigned long len)
   6.246 -{
   6.247 -    unsigned long flags;
   6.248 -    spin_lock_irqsave(&update_lock, flags);
   6.249 -    update_queue[idx].ptr  = MMU_EXTENDED_COMMAND | ptr;
   6.250 -    update_queue[idx].val  = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
   6.251 -    increment_index();
   6.252 -    spin_unlock_irqrestore(&update_lock, flags);
   6.253 -}
   6.254 -
   6.255 -void queue_machphys_update(unsigned long mfn, unsigned long pfn)
   6.256 -{
   6.257 -    unsigned long flags;
   6.258 -    spin_lock_irqsave(&update_lock, flags);
   6.259 -    update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   6.260 -    update_queue[idx].val = pfn;
   6.261 -    increment_index();
   6.262 -    spin_unlock_irqrestore(&update_lock, flags);
   6.263 -}
   6.264 -
   6.265 -#ifdef CONFIG_XEN_PHYSDEV_ACCESS
   6.266 -
   6.267 -unsigned long allocate_empty_lowmem_region(unsigned long pages)
   6.268 -{
   6.269 -    pgd_t         *pgd; 
   6.270 -    pmd_t         *pmd;
   6.271 -    pte_t         *pte;
   6.272 -    unsigned long *pfn_array;
   6.273 -    unsigned long  vstart;
   6.274 -    unsigned long  i;
   6.275 -    int            ret;
   6.276 -    unsigned int   order = get_order(pages*PAGE_SIZE);
   6.277 -
   6.278 -    vstart = __get_free_pages(GFP_KERNEL, order);
   6.279 -    if ( vstart == 0 )
   6.280 -        return 0UL;
   6.281 -
   6.282 -    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
   6.283 -    if ( pfn_array == NULL )
   6.284 -        BUG();
   6.285 -
   6.286 -    for ( i = 0; i < (1<<order); i++ )
   6.287 -    {
   6.288 -        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
   6.289 -        pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
   6.290 -        pte = pte_offset(pmd, (vstart + (i*PAGE_SIZE))); 
   6.291 -        pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
   6.292 -        queue_l1_entry_update(pte, 0);
   6.293 -        phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = 0xdeadbeef;
   6.294 -    }
   6.295 -
   6.296 -    flush_page_update_queue();
   6.297 -
   6.298 -    ret = HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
   6.299 -                                pfn_array, 1<<order);
   6.300 -    if ( unlikely(ret != (1<<order)) )
   6.301 -    {
   6.302 -        printk(KERN_WARNING "Unable to reduce memory reservation (%d)\n", ret);
   6.303 -        BUG();
   6.304 -    }
   6.305 -
   6.306 -    vfree(pfn_array);
   6.307 -
   6.308 -    return vstart;
   6.309 -}
   6.310 -
   6.311 -void deallocate_lowmem_region(unsigned long vstart, unsigned long pages)
   6.312 -{
   6.313 -    pgd_t         *pgd; 
   6.314 -    pmd_t         *pmd;
   6.315 -    pte_t         *pte;
   6.316 -    unsigned long *pfn_array;
   6.317 -    unsigned long  i;
   6.318 -    int            ret;
   6.319 -    unsigned int   order = get_order(pages*PAGE_SIZE);
   6.320 -
   6.321 -    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
   6.322 -    if ( pfn_array == NULL )
   6.323 -        BUG();
   6.324 -
   6.325 -    ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
   6.326 -                                pfn_array, 1<<order);
   6.327 -    if ( unlikely(ret != (1<<order)) )
   6.328 -    {
   6.329 -        printk(KERN_WARNING "Unable to increase memory reservation (%d)\n",
   6.330 -               ret);
   6.331 -        BUG();
   6.332 -    }
   6.333 -
   6.334 -    for ( i = 0; i < (1<<order); i++ )
   6.335 -    {
   6.336 -        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
   6.337 -        pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
   6.338 -        pte = pte_offset(pmd, (vstart + (i*PAGE_SIZE)));
   6.339 -        queue_l1_entry_update(pte, (pfn_array[i]<<PAGE_SHIFT)|__PAGE_KERNEL);
   6.340 -        queue_machphys_update(pfn_array[i], __pa(vstart)>>PAGE_SHIFT);
   6.341 -        phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = pfn_array[i];
   6.342 -    }
   6.343 -
   6.344 -    flush_page_update_queue();
   6.345 -
   6.346 -    vfree(pfn_array);
   6.347 -
   6.348 -    free_pages(vstart, order);
   6.349 -}
   6.350 -
   6.351 -#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
     7.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/ctrl_if.h	Mon Aug 02 10:30:38 2004 +0000
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,121 +0,0 @@
     7.4 -/******************************************************************************
     7.5 - * ctrl_if.h
     7.6 - * 
     7.7 - * Management functions for special interface to the domain controller.
     7.8 - * 
     7.9 - * Copyright (c) 2004, K A Fraser
    7.10 - */
    7.11 -
    7.12 -#ifndef __ASM_XEN__CTRL_IF_H__
    7.13 -#define __ASM_XEN__CTRL_IF_H__
    7.14 -
    7.15 -#include <linux/tqueue.h>
    7.16 -#include <asm/hypervisor.h>
    7.17 -
    7.18 -typedef control_msg_t ctrl_msg_t;
    7.19 -
    7.20 -/*
    7.21 - * Callback function type. Called for asynchronous processing of received
    7.22 - * request messages, and responses to previously-transmitted request messages.
    7.23 - * The parameters are (@msg, @id).
    7.24 - *  @msg: Original request/response message (not a copy). The message can be
    7.25 - *        modified in-place by the handler (e.g., a response callback can
    7.26 - *        turn a request message into a response message in place). The message
    7.27 - *        is no longer accessible after the callback handler returns -- if the
    7.28 - *        message is required to persist for longer then it must be copied.
    7.29 - *  @id:  (Response callbacks only) The 'id' that was specified when the
    7.30 - *        original request message was queued for transmission.
    7.31 - */
    7.32 -typedef void (*ctrl_msg_handler_t)(ctrl_msg_t *, unsigned long);
    7.33 -
    7.34 -/*
    7.35 - * Send @msg to the domain controller. Execute @hnd when a response is
    7.36 - * received, passing the response message and the specified @id. This
    7.37 - * operation will not block: it will return -EAGAIN if there is no space.
    7.38 - * Notes:
    7.39 - *  1. The @msg is copied if it is transmitted and so can be freed after this
    7.40 - *     function returns.
    7.41 - *  2. If @hnd is NULL then no callback is executed.
    7.42 - */
    7.43 -int ctrl_if_send_message_noblock(
    7.44 -    ctrl_msg_t *msg, 
    7.45 -    ctrl_msg_handler_t hnd,
    7.46 -    unsigned long id);
    7.47 -
    7.48 -/*
    7.49 - * Send @msg to the domain controller. Execute @hnd when a response is
    7.50 - * received, passing the response message and the specified @id. This
    7.51 - * operation will block until the message is sent, or a signal is received
    7.52 - * for the calling process (unless @wait_state is TASK_UNINTERRUPTIBLE).
    7.53 - * Notes:
    7.54 - *  1. The @msg is copied if it is transmitted and so can be freed after this
    7.55 - *     function returns.
    7.56 - *  2. If @hnd is NULL then no callback is executed.
    7.57 - */
    7.58 -int ctrl_if_send_message_block(
    7.59 -    ctrl_msg_t *msg, 
    7.60 -    ctrl_msg_handler_t hnd, 
    7.61 -    unsigned long id, 
    7.62 -    long wait_state);
    7.63 -
    7.64 -/*
    7.65 - * Request a callback when there is /possibly/ space to immediately send a
    7.66 - * message to the domain controller. This function returns 0 if there is
    7.67 - * already space to trasnmit a message --- in this case the callback task /may/
    7.68 - * still be executed. If this function returns 1 then the callback /will/ be
    7.69 - * executed when space becomes available.
    7.70 - */
    7.71 -int ctrl_if_enqueue_space_callback(struct tq_struct *task);
    7.72 -
    7.73 -/*
    7.74 - * Send a response (@msg) to a message from the domain controller. This will 
    7.75 - * never block.
    7.76 - * Notes:
    7.77 - *  1. The @msg is copied and so can be freed after this function returns.
    7.78 - *  2. The @msg may be the original request message, modified in-place.
    7.79 - */
    7.80 -void ctrl_if_send_response(ctrl_msg_t *msg);
    7.81 -
    7.82 -/*
    7.83 - * Register a receiver for typed messages from the domain controller. The 
    7.84 - * handler (@hnd) is called for every received message of specified @type.
    7.85 - * Returns TRUE (non-zero) if the handler was successfully registered.
    7.86 - * If CALLBACK_IN_BLOCKING CONTEXT is specified in @flags then callbacks will
    7.87 - * occur in a context in which it is safe to yield (i.e., process context).
    7.88 - */
    7.89 -#define CALLBACK_IN_BLOCKING_CONTEXT 1
    7.90 -int ctrl_if_register_receiver(
    7.91 -    u8 type, 
    7.92 -    ctrl_msg_handler_t hnd,
    7.93 -    unsigned int flags);
    7.94 -
    7.95 -/*
    7.96 - * Unregister a receiver for typed messages from the domain controller. The 
    7.97 - * handler (@hnd) will not be executed after this function returns.
    7.98 - */
    7.99 -void ctrl_if_unregister_receiver(u8 type, ctrl_msg_handler_t hnd);
   7.100 -
   7.101 -/* Suspend/resume notifications. */
   7.102 -void ctrl_if_suspend(void);
   7.103 -void ctrl_if_resume(void);
   7.104 -
   7.105 -/* Start-of-day setup. */
   7.106 -void ctrl_if_init(void);
   7.107 -
   7.108 -/*
   7.109 - * Returns TRUE if there are no outstanding message requests at the domain
   7.110 - * controller. This can be used to ensure that messages have really flushed
   7.111 - * through when it is not possible to use the response-callback interface.
   7.112 - * WARNING: If other subsystems are using the control interface then this
   7.113 - * function might never return TRUE!
   7.114 - */
   7.115 -int ctrl_if_transmitter_empty(void);  /* !! DANGEROUS FUNCTION !! */
   7.116 -
   7.117 -/*
   7.118 - * Manually discard response messages from the domain controller. 
   7.119 - * WARNING: This is usually done automatically -- this function should only
   7.120 - * be called when normal interrupt mechanisms are disabled!
   7.121 - */
   7.122 -void ctrl_if_discard_responses(void); /* !! DANGEROUS FUNCTION !! */
   7.123 -
   7.124 -#endif /* __ASM_XEN__CONTROL_IF_H__ */
     8.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/evtchn.h	Mon Aug 02 10:30:38 2004 +0000
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,83 +0,0 @@
     8.4 -/******************************************************************************
     8.5 - * evtchn.h
     8.6 - * 
     8.7 - * Communication via Xen event channels.
     8.8 - * Also definitions for the device that demuxes notifications to userspace.
     8.9 - * 
    8.10 - * Copyright (c) 2004, K A Fraser
    8.11 - */
    8.12 -
    8.13 -#ifndef __ASM_EVTCHN_H__
    8.14 -#define __ASM_EVTCHN_H__
    8.15 -
    8.16 -#include <linux/config.h>
    8.17 -#include <asm/hypervisor.h>
    8.18 -#include <asm/ptrace.h>
    8.19 -#include <asm/synch_bitops.h>
    8.20 -#include <asm/hypervisor-ifs/event_channel.h>
    8.21 -
    8.22 -/*
    8.23 - * LOW-LEVEL DEFINITIONS
    8.24 - */
    8.25 -
    8.26 -/* Entry point for notifications into Linux subsystems. */
    8.27 -void evtchn_do_upcall(struct pt_regs *regs);
    8.28 -
    8.29 -/* Entry point for notifications into the userland character device. */
    8.30 -void evtchn_device_upcall(int port);
    8.31 -
    8.32 -static inline void mask_evtchn(int port)
    8.33 -{
    8.34 -    shared_info_t *s = HYPERVISOR_shared_info;
    8.35 -    synch_set_bit(port, &s->evtchn_mask[0]);
    8.36 -}
    8.37 -
    8.38 -static inline void unmask_evtchn(int port)
    8.39 -{
    8.40 -    shared_info_t *s = HYPERVISOR_shared_info;
    8.41 -
    8.42 -    synch_clear_bit(port, &s->evtchn_mask[0]);
    8.43 -
    8.44 -    /*
    8.45 -     * The following is basically the equivalent of 'hw_resend_irq'. Just like
    8.46 -     * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
    8.47 -     */
    8.48 -    if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
    8.49 -         !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
    8.50 -    {
    8.51 -        s->vcpu_data[0].evtchn_upcall_pending = 1;
    8.52 -        if ( !s->vcpu_data[0].evtchn_upcall_mask )
    8.53 -            evtchn_do_upcall(NULL);
    8.54 -    }
    8.55 -}
    8.56 -
    8.57 -static inline void clear_evtchn(int port)
    8.58 -{
    8.59 -    shared_info_t *s = HYPERVISOR_shared_info;
    8.60 -    synch_clear_bit(port, &s->evtchn_pending[0]);
    8.61 -}
    8.62 -
    8.63 -static inline void notify_via_evtchn(int port)
    8.64 -{
    8.65 -    evtchn_op_t op;
    8.66 -    op.cmd = EVTCHNOP_send;
    8.67 -    op.u.send.local_port = port;
    8.68 -    (void)HYPERVISOR_event_channel_op(&op);
    8.69 -}
    8.70 -
    8.71 -/*
    8.72 - * CHARACTER-DEVICE DEFINITIONS
    8.73 - */
    8.74 -
    8.75 -/* /dev/xen/evtchn resides at device number major=10, minor=200 */
    8.76 -#define EVTCHN_MINOR 200
    8.77 -
    8.78 -/* /dev/xen/evtchn ioctls: */
    8.79 -/* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
    8.80 -#define EVTCHN_RESET  _IO('E', 1)
    8.81 -/* EVTCHN_BIND: Bind to teh specified event-channel port. */
    8.82 -#define EVTCHN_BIND   _IO('E', 2)
    8.83 -/* EVTCHN_UNBIND: Unbind from the specified event-channel port. */
    8.84 -#define EVTCHN_UNBIND _IO('E', 3)
    8.85 -
    8.86 -#endif /* __ASM_EVTCHN_H__ */
     9.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h	Mon Aug 02 10:30:38 2004 +0000
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,452 +0,0 @@
     9.4 -/******************************************************************************
     9.5 - * hypervisor.h
     9.6 - * 
     9.7 - * Linux-specific hypervisor handling.
     9.8 - * 
     9.9 - * Copyright (c) 2002, K A Fraser
    9.10 - */
    9.11 -
    9.12 -#ifndef __HYPERVISOR_H__
    9.13 -#define __HYPERVISOR_H__
    9.14 -
    9.15 -#include <linux/types.h>
    9.16 -#include <linux/kernel.h>
    9.17 -#include <asm/hypervisor-ifs/hypervisor-if.h>
    9.18 -#include <asm/hypervisor-ifs/dom0_ops.h>
    9.19 -#include <asm/hypervisor-ifs/io/domain_controller.h>
    9.20 -#include <asm/ptrace.h>
    9.21 -#include <asm/page.h>
    9.22 -
    9.23 -/* arch/xen/kernel/setup.c */
    9.24 -union start_info_union
    9.25 -{
    9.26 -    extended_start_info_t start_info;
    9.27 -    char padding[512];
    9.28 -};
    9.29 -extern union start_info_union start_info_union;
    9.30 -#define start_info (start_info_union.start_info)
    9.31 -
    9.32 -/* arch/xen/mm/hypervisor.c */
    9.33 -/*
    9.34 - * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
    9.35 - * be MACHINE addresses.
    9.36 - */
    9.37 -
    9.38 -extern unsigned int mmu_update_queue_idx;
    9.39 -
    9.40 -void queue_l1_entry_update(pte_t *ptr, unsigned long val);
    9.41 -void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
    9.42 -void queue_pt_switch(unsigned long ptr);
    9.43 -void queue_tlb_flush(void);
    9.44 -void queue_invlpg(unsigned long ptr);
    9.45 -void queue_pgd_pin(unsigned long ptr);
    9.46 -void queue_pgd_unpin(unsigned long ptr);
    9.47 -void queue_pte_pin(unsigned long ptr);
    9.48 -void queue_pte_unpin(unsigned long ptr);
    9.49 -void queue_set_ldt(unsigned long ptr, unsigned long bytes);
    9.50 -void queue_machphys_update(unsigned long mfn, unsigned long pfn);
    9.51 -#define MMU_UPDATE_DEBUG 0
    9.52 -
    9.53 -#if MMU_UPDATE_DEBUG > 0
    9.54 -typedef struct {
    9.55 -    void *ptr;
    9.56 -    unsigned long val, pteval;
    9.57 -    void *ptep;
    9.58 -    int line; char *file;
    9.59 -} page_update_debug_t;
    9.60 -extern page_update_debug_t update_debug_queue[];
    9.61 -#define queue_l1_entry_update(_p,_v) ({                           \
    9.62 - update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
    9.63 - update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
    9.64 - update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
    9.65 - update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
    9.66 - queue_l1_entry_update((_p),(_v));                                \
    9.67 -})
    9.68 -#define queue_l2_entry_update(_p,_v) ({                           \
    9.69 - update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
    9.70 - update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
    9.71 - update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
    9.72 - update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
    9.73 - queue_l2_entry_update((_p),(_v));                                \
    9.74 -})
    9.75 -#endif
    9.76 -
    9.77 -#if MMU_UPDATE_DEBUG > 1
    9.78 -#undef queue_l1_entry_update
    9.79 -#undef queue_l2_entry_update
    9.80 -#define queue_l1_entry_update(_p,_v) ({                           \
    9.81 - update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
    9.82 - update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
    9.83 - update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
    9.84 - update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
    9.85 - printk("L1 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
    9.86 -        (_p), pte_val(_p),                                        \
    9.87 -        (unsigned long)(_v));                                     \
    9.88 - queue_l1_entry_update((_p),(_v));                                \
    9.89 -})
    9.90 -#define queue_l2_entry_update(_p,_v) ({                           \
    9.91 - update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
    9.92 - update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
    9.93 - update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
    9.94 - update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
    9.95 - printk("L2 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
    9.96 -        (_p), pmd_val(_p),                                        \
    9.97 -        (unsigned long)(_v));                                     \
    9.98 - queue_l2_entry_update((_p),(_v));                                \
    9.99 -})
   9.100 -#define queue_pt_switch(_p) ({                                    \
   9.101 - printk("PTSWITCH %s %d: %08lx\n", __FILE__, __LINE__, (_p));     \
   9.102 - queue_pt_switch(_p);                                             \
   9.103 -})   
   9.104 -#define queue_tlb_flush() ({                                      \
   9.105 - printk("TLB FLUSH %s %d\n", __FILE__, __LINE__);                 \
   9.106 - queue_tlb_flush();                                               \
   9.107 -})   
   9.108 -#define queue_invlpg(_p) ({                                       \
   9.109 - printk("INVLPG %s %d: %08lx\n", __FILE__, __LINE__, (_p));       \
   9.110 - queue_invlpg(_p);                                                \
   9.111 -})   
   9.112 -#define queue_pgd_pin(_p) ({                                      \
   9.113 - printk("PGD PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));      \
   9.114 - queue_pgd_pin(_p);                                               \
   9.115 -})   
   9.116 -#define queue_pgd_unpin(_p) ({                                    \
   9.117 - printk("PGD UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
   9.118 - queue_pgd_unpin(_p);                                             \
   9.119 -})   
   9.120 -#define queue_pte_pin(_p) ({                                      \
   9.121 - printk("PTE PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));      \
   9.122 - queue_pte_pin(_p);                                               \
   9.123 -})   
   9.124 -#define queue_pte_unpin(_p) ({                                    \
   9.125 - printk("PTE UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
   9.126 - queue_pte_unpin(_p);                                             \
   9.127 -})   
   9.128 -#define queue_set_ldt(_p,_l) ({                                        \
   9.129 - printk("SETL LDT %s %d: %08lx %d\n", __FILE__, __LINE__, (_p), (_l)); \
   9.130 - queue_set_ldt((_p), (_l));                                            \
   9.131 -})   
   9.132 -#endif
   9.133 -
   9.134 -void _flush_page_update_queue(void);
   9.135 -static inline int flush_page_update_queue(void)
   9.136 -{
   9.137 -    unsigned int idx = mmu_update_queue_idx;
   9.138 -    if ( idx != 0 ) _flush_page_update_queue();
   9.139 -    return idx;
   9.140 -}
   9.141 -#define XEN_flush_page_update_queue() (_flush_page_update_queue())
   9.142 -void MULTICALL_flush_page_update_queue(void);
   9.143 -
   9.144 -#ifdef CONFIG_XEN_PHYSDEV_ACCESS
   9.145 -/* Allocate a contiguous empty region of low memory. Return virtual start. */
   9.146 -unsigned long allocate_empty_lowmem_region(unsigned long pages);
   9.147 -/* Deallocate a contiguous region of low memory. Return it to the allocator. */
   9.148 -void deallocate_lowmem_region(unsigned long vstart, unsigned long pages);
   9.149 -#endif
   9.150 -
   9.151 -/*
   9.152 - * Assembler stubs for hyper-calls.
   9.153 - */
   9.154 -
   9.155 -static inline int HYPERVISOR_set_trap_table(trap_info_t *table)
   9.156 -{
   9.157 -    int ret;
   9.158 -    __asm__ __volatile__ (
   9.159 -        TRAP_INSTR
   9.160 -        : "=a" (ret) : "0" (__HYPERVISOR_set_trap_table),
   9.161 -        "b" (table) : "memory" );
   9.162 -
   9.163 -    return ret;
   9.164 -}
   9.165 -
   9.166 -static inline int HYPERVISOR_mmu_update(mmu_update_t *req, 
   9.167 -                                        int count, 
   9.168 -                                        int *success_count)
   9.169 -{
   9.170 -    int ret;
   9.171 -    __asm__ __volatile__ (
   9.172 -        TRAP_INSTR
   9.173 -        : "=a" (ret) : "0" (__HYPERVISOR_mmu_update), 
   9.174 -        "b" (req), "c" (count), "d" (success_count) : "memory" );
   9.175 -
   9.176 -    return ret;
   9.177 -}
   9.178 -
   9.179 -static inline int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
   9.180 -{
   9.181 -    int ret;
   9.182 -    __asm__ __volatile__ (
   9.183 -        TRAP_INSTR
   9.184 -        : "=a" (ret) : "0" (__HYPERVISOR_set_gdt), 
   9.185 -        "b" (frame_list), "c" (entries) : "memory" );
   9.186 -
   9.187 -
   9.188 -    return ret;
   9.189 -}
   9.190 -
   9.191 -static inline int HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
   9.192 -{
   9.193 -    int ret;
   9.194 -    __asm__ __volatile__ (
   9.195 -        TRAP_INSTR
   9.196 -        : "=a" (ret) : "0" (__HYPERVISOR_stack_switch),
   9.197 -        "b" (ss), "c" (esp) : "memory" );
   9.198 -
   9.199 -    return ret;
   9.200 -}
   9.201 -
   9.202 -static inline int HYPERVISOR_set_callbacks(
   9.203 -    unsigned long event_selector, unsigned long event_address,
   9.204 -    unsigned long failsafe_selector, unsigned long failsafe_address)
   9.205 -{
   9.206 -    int ret;
   9.207 -    __asm__ __volatile__ (
   9.208 -        TRAP_INSTR
   9.209 -        : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
   9.210 -        "b" (event_selector), "c" (event_address), 
   9.211 -        "d" (failsafe_selector), "S" (failsafe_address) : "memory" );
   9.212 -
   9.213 -    return ret;
   9.214 -}
   9.215 -
   9.216 -static inline int HYPERVISOR_fpu_taskswitch(void)
   9.217 -{
   9.218 -    int ret;
   9.219 -    __asm__ __volatile__ (
   9.220 -        TRAP_INSTR
   9.221 -        : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
   9.222 -
   9.223 -    return ret;
   9.224 -}
   9.225 -
   9.226 -static inline int HYPERVISOR_yield(void)
   9.227 -{
   9.228 -    int ret;
   9.229 -    __asm__ __volatile__ (
   9.230 -        TRAP_INSTR
   9.231 -        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
   9.232 -        "b" (SCHEDOP_yield) : "memory" );
   9.233 -
   9.234 -    return ret;
   9.235 -}
   9.236 -
   9.237 -static inline int HYPERVISOR_block(void)
   9.238 -{
   9.239 -    int ret;
   9.240 -    __asm__ __volatile__ (
   9.241 -        TRAP_INSTR
   9.242 -        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
   9.243 -        "b" (SCHEDOP_block) : "memory" );
   9.244 -
   9.245 -    return ret;
   9.246 -}
   9.247 -
   9.248 -static inline int HYPERVISOR_shutdown(void)
   9.249 -{
   9.250 -    int ret;
   9.251 -    __asm__ __volatile__ (
   9.252 -        TRAP_INSTR
   9.253 -        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
   9.254 -        "b" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
   9.255 -        : "memory" );
   9.256 -
   9.257 -    return ret;
   9.258 -}
   9.259 -
   9.260 -static inline int HYPERVISOR_reboot(void)
   9.261 -{
   9.262 -    int ret;
   9.263 -    __asm__ __volatile__ (
   9.264 -        TRAP_INSTR
   9.265 -        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
   9.266 -        "b" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
   9.267 -        : "memory" );
   9.268 -
   9.269 -    return ret;
   9.270 -}
   9.271 -
   9.272 -static inline int HYPERVISOR_suspend(unsigned long srec)
   9.273 -{
   9.274 -    int ret;
   9.275 -    /* NB. On suspend, control software expects a suspend record in %esi. */
   9.276 -    __asm__ __volatile__ (
   9.277 -        TRAP_INSTR
   9.278 -        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
   9.279 -        "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)), 
   9.280 -        "S" (srec) : "memory" );
   9.281 -
   9.282 -    return ret;
   9.283 -}
   9.284 -
   9.285 -static inline long HYPERVISOR_set_timer_op(u64 timeout)
   9.286 -{
   9.287 -    int ret;
   9.288 -    unsigned long timeout_hi = (unsigned long)(timeout>>32);
   9.289 -    unsigned long timeout_lo = (unsigned long)timeout;
   9.290 -    __asm__ __volatile__ (
   9.291 -        TRAP_INSTR
   9.292 -        : "=a" (ret) : "0" (__HYPERVISOR_set_timer_op),
   9.293 -        "b" (timeout_hi), "c" (timeout_lo) : "memory" );
   9.294 -
   9.295 -    return ret;
   9.296 -}
   9.297 -
   9.298 -static inline int HYPERVISOR_dom0_op(dom0_op_t *dom0_op)
   9.299 -{
   9.300 -    int ret;
   9.301 -    dom0_op->interface_version = DOM0_INTERFACE_VERSION;
   9.302 -    __asm__ __volatile__ (
   9.303 -        TRAP_INSTR
   9.304 -        : "=a" (ret) : "0" (__HYPERVISOR_dom0_op),
   9.305 -        "b" (dom0_op) : "memory" );
   9.306 -
   9.307 -    return ret;
   9.308 -}
   9.309 -
   9.310 -static inline int HYPERVISOR_set_debugreg(int reg, unsigned long value)
   9.311 -{
   9.312 -    int ret;
   9.313 -    __asm__ __volatile__ (
   9.314 -        TRAP_INSTR
   9.315 -        : "=a" (ret) : "0" (__HYPERVISOR_set_debugreg),
   9.316 -        "b" (reg), "c" (value) : "memory" );
   9.317 -
   9.318 -    return ret;
   9.319 -}
   9.320 -
   9.321 -static inline unsigned long HYPERVISOR_get_debugreg(int reg)
   9.322 -{
   9.323 -    unsigned long ret;
   9.324 -    __asm__ __volatile__ (
   9.325 -        TRAP_INSTR
   9.326 -        : "=a" (ret) : "0" (__HYPERVISOR_get_debugreg),
   9.327 -        "b" (reg) : "memory" );
   9.328 -
   9.329 -    return ret;
   9.330 -}
   9.331 -
   9.332 -static inline int HYPERVISOR_update_descriptor(
   9.333 -    unsigned long pa, unsigned long word1, unsigned long word2)
   9.334 -{
   9.335 -    int ret;
   9.336 -    __asm__ __volatile__ (
   9.337 -        TRAP_INSTR
   9.338 -        : "=a" (ret) : "0" (__HYPERVISOR_update_descriptor), 
   9.339 -        "b" (pa), "c" (word1), "d" (word2) : "memory" );
   9.340 -
   9.341 -    return ret;
   9.342 -}
   9.343 -
   9.344 -static inline int HYPERVISOR_set_fast_trap(int idx)
   9.345 -{
   9.346 -    int ret;
   9.347 -    __asm__ __volatile__ (
   9.348 -        TRAP_INSTR
   9.349 -        : "=a" (ret) : "0" (__HYPERVISOR_set_fast_trap), 
   9.350 -        "b" (idx) : "memory" );
   9.351 -
   9.352 -    return ret;
   9.353 -}
   9.354 -
   9.355 -static inline int HYPERVISOR_dom_mem_op(unsigned int   op,
   9.356 -                                        unsigned long *pages,
   9.357 -                                        unsigned long  nr_pages)
   9.358 -{
   9.359 -    int ret;
   9.360 -    __asm__ __volatile__ (
   9.361 -        TRAP_INSTR
   9.362 -        : "=a" (ret) : "0" (__HYPERVISOR_dom_mem_op),
   9.363 -        "b" (op), "c" (pages), "d" (nr_pages) : "memory" );
   9.364 -
   9.365 -    return ret;
   9.366 -}
   9.367 -
   9.368 -static inline int HYPERVISOR_multicall(void *call_list, int nr_calls)
   9.369 -{
   9.370 -    int ret;
   9.371 -    __asm__ __volatile__ (
   9.372 -        TRAP_INSTR
   9.373 -        : "=a" (ret) : "0" (__HYPERVISOR_multicall),
   9.374 -        "b" (call_list), "c" (nr_calls) : "memory" );
   9.375 -
   9.376 -    return ret;
   9.377 -}
   9.378 -
   9.379 -static inline int HYPERVISOR_update_va_mapping(
   9.380 -    unsigned long page_nr, pte_t new_val, unsigned long flags)
   9.381 -{
   9.382 -    int ret;
   9.383 -    __asm__ __volatile__ (
   9.384 -        TRAP_INSTR
   9.385 -        : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping), 
   9.386 -        "b" (page_nr), "c" ((new_val).pte_low), "d" (flags) : "memory" );
   9.387 -
   9.388 -    if ( unlikely(ret < 0) )
   9.389 -    {
   9.390 -        printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
   9.391 -               page_nr, (new_val).pte_low, flags);
   9.392 -        BUG();
   9.393 -    }
   9.394 -
   9.395 -    return ret;
   9.396 -}
   9.397 -
   9.398 -static inline int HYPERVISOR_event_channel_op(void *op)
   9.399 -{
   9.400 -    int ret;
   9.401 -    __asm__ __volatile__ (
   9.402 -        TRAP_INSTR
   9.403 -        : "=a" (ret) : "0" (__HYPERVISOR_event_channel_op),
   9.404 -        "b" (op) : "memory" );
   9.405 -
   9.406 -    return ret;
   9.407 -}
   9.408 -
   9.409 -static inline int HYPERVISOR_xen_version(int cmd)
   9.410 -{
   9.411 -    int ret;
   9.412 -    __asm__ __volatile__ (
   9.413 -        TRAP_INSTR
   9.414 -        : "=a" (ret) : "0" (__HYPERVISOR_xen_version), 
   9.415 -        "b" (cmd) : "memory" );
   9.416 -
   9.417 -    return ret;
   9.418 -}
   9.419 -
   9.420 -static inline int HYPERVISOR_console_io(int cmd, int count, char *str)
   9.421 -{
   9.422 -    int ret;
   9.423 -    __asm__ __volatile__ (
   9.424 -        TRAP_INSTR
   9.425 -        : "=a" (ret) : "0" (__HYPERVISOR_console_io),
   9.426 -        "b" (cmd), "c" (count), "d" (str) : "memory" );
   9.427 -
   9.428 -    return ret;
   9.429 -}
   9.430 -
   9.431 -static inline int HYPERVISOR_physdev_op(void *physdev_op)
   9.432 -{
   9.433 -    int ret;
   9.434 -    __asm__ __volatile__ (
   9.435 -        TRAP_INSTR
   9.436 -        : "=a" (ret) : "0" (__HYPERVISOR_physdev_op),
   9.437 -        "b" (physdev_op) : "memory" );
   9.438 -
   9.439 -    return ret;
   9.440 -}
   9.441 -
   9.442 -static inline int HYPERVISOR_update_va_mapping_otherdomain(
   9.443 -    unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
   9.444 -{
   9.445 -    int ret;
   9.446 -    __asm__ __volatile__ (
   9.447 -        TRAP_INSTR
   9.448 -        : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping_otherdomain), 
   9.449 -        "b" (page_nr), "c" ((new_val).pte_low), "d" (flags), "S" (domid) :
   9.450 -        "memory" );
   9.451 -    
   9.452 -    return ret;
   9.453 -}
   9.454 -
   9.455 -#endif /* __HYPERVISOR_H__ */
    10.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/multicall.h	Mon Aug 02 10:30:38 2004 +0000
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,84 +0,0 @@
    10.4 -/******************************************************************************
    10.5 - * multicall.h
    10.6 - */
    10.7 -
    10.8 -#ifndef __MULTICALL_H__
    10.9 -#define __MULTICALL_H__
   10.10 -
   10.11 -#include <asm/hypervisor.h>
   10.12 -
   10.13 -extern multicall_entry_t multicall_list[];
   10.14 -extern int nr_multicall_ents;
   10.15 -
   10.16 -static inline void queue_multicall0(unsigned long op)
   10.17 -{
   10.18 -    int i = nr_multicall_ents;
   10.19 -    multicall_list[i].op      = op;
   10.20 -    nr_multicall_ents = i+1;
   10.21 -}
   10.22 -
   10.23 -static inline void queue_multicall1(unsigned long op, unsigned long arg1)
   10.24 -{
   10.25 -    int i = nr_multicall_ents;
   10.26 -    multicall_list[i].op      = op;
   10.27 -    multicall_list[i].args[0] = arg1;
   10.28 -    nr_multicall_ents = i+1;
   10.29 -}
   10.30 -
   10.31 -static inline void queue_multicall2(
   10.32 -    unsigned long op, unsigned long arg1, unsigned long arg2)
   10.33 -{
   10.34 -    int i = nr_multicall_ents;
   10.35 -    multicall_list[i].op      = op;
   10.36 -    multicall_list[i].args[0] = arg1;
   10.37 -    multicall_list[i].args[1] = arg2;
   10.38 -    nr_multicall_ents = i+1;
   10.39 -}
   10.40 -
   10.41 -static inline void queue_multicall3(
   10.42 -    unsigned long op, unsigned long arg1, unsigned long arg2,
   10.43 -    unsigned long arg3)
   10.44 -{
   10.45 -    int i = nr_multicall_ents;
   10.46 -    multicall_list[i].op      = op;
   10.47 -    multicall_list[i].args[0] = arg1;
   10.48 -    multicall_list[i].args[1] = arg2;
   10.49 -    multicall_list[i].args[2] = arg3;
   10.50 -    nr_multicall_ents = i+1;
   10.51 -}
   10.52 -
   10.53 -static inline void queue_multicall4(
   10.54 -    unsigned long op, unsigned long arg1, unsigned long arg2,
   10.55 -    unsigned long arg3, unsigned long arg4)
   10.56 -{
   10.57 -    int i = nr_multicall_ents;
   10.58 -    multicall_list[i].op      = op;
   10.59 -    multicall_list[i].args[0] = arg1;
   10.60 -    multicall_list[i].args[1] = arg2;
   10.61 -    multicall_list[i].args[2] = arg3;
   10.62 -    multicall_list[i].args[3] = arg4;
   10.63 -    nr_multicall_ents = i+1;
   10.64 -}
   10.65 -
   10.66 -static inline void queue_multicall5(
   10.67 -    unsigned long op, unsigned long arg1, unsigned long arg2,
   10.68 -    unsigned long arg3, unsigned long arg4, unsigned long arg5)
   10.69 -{
   10.70 -    int i = nr_multicall_ents;
   10.71 -    multicall_list[i].op      = op;
   10.72 -    multicall_list[i].args[0] = arg1;
   10.73 -    multicall_list[i].args[1] = arg2;
   10.74 -    multicall_list[i].args[2] = arg3;
   10.75 -    multicall_list[i].args[3] = arg4;
   10.76 -    multicall_list[i].args[4] = arg5;
   10.77 -    nr_multicall_ents = i+1;
   10.78 -}
   10.79 -
   10.80 -static inline void execute_multicall_list(void)
   10.81 -{
   10.82 -    if ( unlikely(nr_multicall_ents == 0) ) return;
   10.83 -    (void)HYPERVISOR_multicall(multicall_list, nr_multicall_ents);
   10.84 -    nr_multicall_ents = 0;
   10.85 -}
   10.86 -
   10.87 -#endif /* __MULTICALL_H__ */
    11.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/proc_cmd.h	Mon Aug 02 10:30:38 2004 +0000
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,63 +0,0 @@
    11.4 -/******************************************************************************
    11.5 - * proc_cmd.h
    11.6 - * 
    11.7 - * Interface to /proc/cmd and /proc/xen/privcmd.
    11.8 - */
    11.9 -
   11.10 -#ifndef __PROC_CMD_H__
   11.11 -#define __PROC_CMD_H__
   11.12 -
   11.13 -typedef struct privcmd_hypercall
   11.14 -{
   11.15 -    unsigned long op;
   11.16 -    unsigned long arg[5];
   11.17 -} privcmd_hypercall_t;
   11.18 -
   11.19 -typedef struct privcmd_mmap_entry {
   11.20 -    unsigned long va;
   11.21 -    unsigned long mfn;
   11.22 -    unsigned long npages;
   11.23 -} privcmd_mmap_entry_t; 
   11.24 -
   11.25 -typedef struct privcmd_mmap {
   11.26 -    int num;
   11.27 -    domid_t dom; /* target domain */
   11.28 -    privcmd_mmap_entry_t *entry;
   11.29 -} privcmd_mmap_t; 
   11.30 -
   11.31 -typedef struct privcmd_mmapbatch {
   11.32 -    int num;     // number of pages to populate
   11.33 -    domid_t dom; // target domain 
   11.34 -    unsigned long addr;  // virtual address
   11.35 -    unsigned long *arr; // array of mfns - top nibble set on err
   11.36 -} privcmd_mmapbatch_t; 
   11.37 -
   11.38 -typedef struct privcmd_blkmsg
   11.39 -{
   11.40 -    unsigned long op;
   11.41 -    void         *buf;
   11.42 -    int           buf_size;
   11.43 -} privcmd_blkmsg_t;
   11.44 -
   11.45 -/*
   11.46 - * @cmd: IOCTL_PRIVCMD_HYPERCALL
   11.47 - * @arg: &privcmd_hypercall_t
   11.48 - * Return: Value returned from execution of the specified hypercall.
   11.49 - */
   11.50 -#define IOCTL_PRIVCMD_HYPERCALL         \
   11.51 -    _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
   11.52 -
   11.53 -/*
   11.54 - * @cmd: IOCTL_PRIVCMD_INITDOMAIN_EVTCHN
   11.55 - * @arg: n/a
   11.56 - * Return: Port associated with domain-controller end of control event channel
   11.57 - *         for the initial domain.
   11.58 - */
   11.59 -#define IOCTL_PRIVCMD_INITDOMAIN_EVTCHN \
   11.60 -    _IOC(_IOC_NONE, 'P', 1, 0)
   11.61 -#define IOCTL_PRIVCMD_MMAP             \
   11.62 -    _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
   11.63 -#define IOCTL_PRIVCMD_MMAPBATCH             \
   11.64 -    _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmapbatch_t))
   11.65 -
   11.66 -#endif /* __PROC_CMD_H__ */
    12.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/suspend.h	Mon Aug 02 10:30:38 2004 +0000
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,25 +0,0 @@
    12.4 -/******************************************************************************
    12.5 - * suspend.h
    12.6 - * 
    12.7 - * NB. This file is part of the Xenolinux interface with Xenoserver control 
    12.8 - * software. It can be included in such software without invoking the GPL.
    12.9 - * 
   12.10 - * Copyright (c) 2003, K A Fraser
   12.11 - */
   12.12 -
   12.13 -#ifndef __ASM_XEN_SUSPEND_H__
   12.14 -#define __ASM_XEN_SUSPEND_H__
   12.15 -
   12.16 -typedef struct suspend_record_st {
   12.17 -    /* To be filled in before resume. */
   12.18 -    extended_start_info_t resume_info;
   12.19 -    /*
   12.20 -     * The number of a machine frame containing, in sequence, the number of
   12.21 -     * each machine frame that contains PFN -> MFN translation table data.
   12.22 -     */
   12.23 -    unsigned long pfn_to_mfn_frame_list;
   12.24 -    /* Number of entries in the PFN -> MFN translation table. */
   12.25 -    unsigned long nr_pfns;
   12.26 -} suspend_record_t;
   12.27 -
   12.28 -#endif /* __ASM_XEN_SUSPEND_H__ */
    13.1 --- a/linux-2.4.26-xen-sparse/mkbuildtree	Mon Aug 02 10:30:38 2004 +0000
    13.2 +++ b/linux-2.4.26-xen-sparse/mkbuildtree	Mon Aug 02 14:19:48 2004 +0000
    13.3 @@ -202,6 +202,12 @@ ln -sf ../asm-i386/ucontext.h
    13.4  ln -sf ../asm-i386/unaligned.h
    13.5  ln -sf ../asm-i386/unistd.h 
    13.6  ln -sf ../asm-i386/user.h 
    13.7 +ln -sf ../../${LINUX_26}/include/asm-xen/ctrl_if.h
    13.8 +ln -sf ../../${LINUX_26}/include/asm-xen/evtchn.h
    13.9 +ln -sf ../../${LINUX_26}/include/asm-xen/hypervisor.h
   13.10 +ln -sf ../../${LINUX_26}/include/asm-xen/multicall.h
   13.11 +ln -sf ../../${LINUX_26}/include/asm-xen/proc_cmd.h
   13.12 +ln -sf ../../${LINUX_26}/include/asm-xen/suspend.h
   13.13  ln -sf ../../${LINUX_26}/include/asm-xen/xen_proc.h
   13.14  
   13.15  cd ${AD}/arch/xen/kernel
   13.16 @@ -213,6 +219,8 @@ ln -sf ../../i386/kernel/pci-i386.h
   13.17  ln -sf ../../i386/kernel/ptrace.c
   13.18  ln -sf ../../i386/kernel/semaphore.c 
   13.19  ln -sf ../../i386/kernel/sys_i386.c 
   13.20 +ln -sf ../../../${LINUX_26}/arch/xen/kernel/ctrl_if.c
   13.21 +ln -sf ../../../${LINUX_26}/arch/xen/kernel/evtchn.c
   13.22  ln -sf ../../../${LINUX_26}/arch/xen/i386/kernel/ioport.c
   13.23  
   13.24  cd ${AD}/arch/xen/lib
   13.25 @@ -230,6 +238,7 @@ ln -sf ../../../${LINUX_26}/arch/xen/ker
   13.26  cd ${AD}/arch/xen/mm
   13.27  ln -sf ../../i386/mm/extable.c 
   13.28  ln -sf ../../i386/mm/pageattr.c 
   13.29 +ln -sf ../../../${LINUX_26}/arch/xen/i386/mm/hypervisor.c
   13.30  
   13.31  cd ${AD}/arch/xen/drivers/console
   13.32  ln -sf ../../../../${LINUX_26}/drivers/xen/console/console.c 
   13.33 @@ -237,6 +246,9 @@ ln -sf ../../../../${LINUX_26}/drivers/x
   13.34  cd ${AD}/arch/xen/drivers/dom0
   13.35  ln -sf ../../../../${LINUX_26}/drivers/xen/privcmd/privcmd.c core.c
   13.36  
   13.37 +cd ${AD}/arch/xen/drivers/evtchn
   13.38 +ln -sf ../../../../${LINUX_26}/drivers/xen/evtchn/evtchn.c
   13.39 +
   13.40  cd ${AD}/arch/xen/drivers/netif/frontend
   13.41  ln -sf ../../../../../${LINUX_26}/drivers/xen/netfront/netfront.c main.c
   13.42  
    14.1 --- a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/Makefile	Mon Aug 02 10:30:38 2004 +0000
    14.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/Makefile	Mon Aug 02 14:19:48 2004 +0000
    14.3 @@ -17,9 +17,6 @@ c-obj-y	:= semaphore.o vm86.o \
    14.4  		doublefault.o
    14.5  s-obj-y	:=
    14.6  
    14.7 -#obj-y				+= hypervisor.o
    14.8 -obj-y				+= evtchn.o
    14.9 -
   14.10  obj-y				+= cpu/
   14.11  obj-y				+= timers/
   14.12  c-obj-$(CONFIG_ACPI_BOOT)	+= acpi/
    15.1 --- a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/common.c	Mon Aug 02 10:30:38 2004 +0000
    15.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/cpu/common.c	Mon Aug 02 14:19:48 2004 +0000
    15.3 @@ -8,7 +8,7 @@
    15.4  #include <asm/msr.h>
    15.5  #include <asm/io.h>
    15.6  #include <asm/mmu_context.h>
    15.7 -#include <asm/hypervisor.h>
    15.8 +#include <asm-xen/hypervisor.h>
    15.9  
   15.10  #include "cpu.h"
   15.11  
    16.1 --- a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/evtchn.c	Mon Aug 02 10:30:38 2004 +0000
    16.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.3 @@ -1,479 +0,0 @@
    16.4 -/******************************************************************************
    16.5 - * evtchn.c
    16.6 - * 
    16.7 - * Communication via Xen event channels.
    16.8 - * 
    16.9 - * Copyright (c) 2002-2004, K A Fraser
   16.10 - */
   16.11 -
   16.12 -#include <linux/config.h>
   16.13 -#include <linux/irq.h>
   16.14 -#include <linux/interrupt.h>
   16.15 -#include <linux/sched.h>
   16.16 -#include <linux/kernel_stat.h>
   16.17 -#include <asm/atomic.h>
   16.18 -#include <asm/system.h>
   16.19 -#include <asm/ptrace.h>
   16.20 -#include <asm/synch_bitops.h>
   16.21 -#include <asm/hypervisor.h>
   16.22 -#include <asm/hypervisor-ifs/event_channel.h>
   16.23 -#include <asm/hypervisor-ifs/physdev.h>
   16.24 -#include <asm-xen/ctrl_if.h>
   16.25 -
   16.26 -/*
   16.27 - * This lock protects updates to the following mapping and reference-count
   16.28 - * arrays. The lock does not need to be acquired to read the mapping tables.
   16.29 - */
   16.30 -static spinlock_t irq_mapping_update_lock;
   16.31 -
   16.32 -/* IRQ <-> event-channel mappings. */
   16.33 -static int evtchn_to_irq[NR_EVENT_CHANNELS];
   16.34 -static int irq_to_evtchn[NR_IRQS];
   16.35 -
   16.36 -/* IRQ <-> VIRQ mapping. */
   16.37 -static int virq_to_irq[NR_VIRQS];
   16.38 -
   16.39 -/* Reference counts for bindings to IRQs. */
   16.40 -static int irq_bindcount[NR_IRQS];
   16.41 -
   16.42 -/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
   16.43 -static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
   16.44 -
   16.45 -/* Upcall to generic IRQ layer. */
   16.46 -extern asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs);
   16.47 -
   16.48 -#define VALID_EVTCHN(_chn) ((_chn) != -1)
   16.49 -
   16.50 -void evtchn_do_upcall(struct pt_regs *regs)
   16.51 -{
   16.52 -    unsigned long  l1, l2;
   16.53 -    unsigned int   l1i, l2i, port;
   16.54 -    int            irq;
   16.55 -    unsigned long  flags;
   16.56 -    shared_info_t *s = HYPERVISOR_shared_info;
   16.57 -
   16.58 -    local_irq_save(flags);
   16.59 -    
   16.60 -    while ( s->vcpu_data[0].evtchn_upcall_pending )
   16.61 -    {
   16.62 -        s->vcpu_data[0].evtchn_upcall_pending = 0;
   16.63 -        /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
   16.64 -        l1 = xchg(&s->evtchn_pending_sel, 0);
   16.65 -        while ( (l1i = ffs(l1)) != 0 )
   16.66 -        {
   16.67 -            l1i--;
   16.68 -            l1 &= ~(1 << l1i);
   16.69 -        
   16.70 -            l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
   16.71 -            while ( (l2i = ffs(l2)) != 0 )
   16.72 -            {
   16.73 -                l2i--;
   16.74 -                l2 &= ~(1 << l2i);
   16.75 -            
   16.76 -                port = (l1i << 5) + l2i;
   16.77 -                if ( (irq = evtchn_to_irq[port]) != -1 )
   16.78 -                    do_IRQ(irq, regs);
   16.79 -                else
   16.80 -                    evtchn_device_upcall(port);
   16.81 -            }
   16.82 -        }
   16.83 -    }
   16.84 -
   16.85 -    local_irq_restore(flags);
   16.86 -}
   16.87 -
   16.88 -
   16.89 -static int find_unbound_irq(void)
   16.90 -{
   16.91 -    int irq;
   16.92 -
   16.93 -    for ( irq = 0; irq < NR_IRQS; irq++ )
   16.94 -        if ( irq_bindcount[irq] == 0 )
   16.95 -            break;
   16.96 -
   16.97 -    if ( irq == NR_IRQS )
   16.98 -        panic("No available IRQ to bind to: increase NR_IRQS!\n");
   16.99 -
  16.100 -    return irq;
  16.101 -}
  16.102 -
  16.103 -int bind_virq_to_irq(int virq)
  16.104 -{
  16.105 -    evtchn_op_t op;
  16.106 -    int evtchn, irq;
  16.107 -
  16.108 -    spin_lock(&irq_mapping_update_lock);
  16.109 -
  16.110 -    if ( (irq = virq_to_irq[virq]) == -1 )
  16.111 -    {
  16.112 -        op.cmd              = EVTCHNOP_bind_virq;
  16.113 -        op.u.bind_virq.virq = virq;
  16.114 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  16.115 -            panic("Failed to bind virtual IRQ %d\n", virq);
  16.116 -        evtchn = op.u.bind_virq.port;
  16.117 -
  16.118 -        irq = find_unbound_irq();
  16.119 -        evtchn_to_irq[evtchn] = irq;
  16.120 -        irq_to_evtchn[irq]    = evtchn;
  16.121 -
  16.122 -        virq_to_irq[virq] = irq;
  16.123 -    }
  16.124 -
  16.125 -    irq_bindcount[irq]++;
  16.126 -
  16.127 -    spin_unlock(&irq_mapping_update_lock);
  16.128 -    
  16.129 -    return irq;
  16.130 -}
  16.131 -
  16.132 -void unbind_virq_from_irq(int virq)
  16.133 -{
  16.134 -    evtchn_op_t op;
  16.135 -    int irq    = virq_to_irq[virq];
  16.136 -    int evtchn = irq_to_evtchn[irq];
  16.137 -
  16.138 -    spin_lock(&irq_mapping_update_lock);
  16.139 -
  16.140 -    if ( --irq_bindcount[irq] == 0 )
  16.141 -    {
  16.142 -        op.cmd          = EVTCHNOP_close;
  16.143 -        op.u.close.dom  = DOMID_SELF;
  16.144 -        op.u.close.port = evtchn;
  16.145 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  16.146 -            panic("Failed to unbind virtual IRQ %d\n", virq);
  16.147 -
  16.148 -        evtchn_to_irq[evtchn] = -1;
  16.149 -        irq_to_evtchn[irq]    = -1;
  16.150 -        virq_to_irq[virq]     = -1;
  16.151 -    }
  16.152 -
  16.153 -    spin_unlock(&irq_mapping_update_lock);
  16.154 -}
  16.155 -
  16.156 -int bind_evtchn_to_irq(int evtchn)
  16.157 -{
  16.158 -    int irq;
  16.159 -
  16.160 -    spin_lock(&irq_mapping_update_lock);
  16.161 -
  16.162 -    if ( (irq = evtchn_to_irq[evtchn]) == -1 )
  16.163 -    {
  16.164 -        irq = find_unbound_irq();
  16.165 -        evtchn_to_irq[evtchn] = irq;
  16.166 -        irq_to_evtchn[irq]    = evtchn;
  16.167 -    }
  16.168 -
  16.169 -    irq_bindcount[irq]++;
  16.170 -
  16.171 -    spin_unlock(&irq_mapping_update_lock);
  16.172 -    
  16.173 -    return irq;
  16.174 -}
  16.175 -
  16.176 -void unbind_evtchn_from_irq(int evtchn)
  16.177 -{
  16.178 -    int irq = evtchn_to_irq[evtchn];
  16.179 -
  16.180 -    spin_lock(&irq_mapping_update_lock);
  16.181 -
  16.182 -    if ( --irq_bindcount[irq] == 0 )
  16.183 -    {
  16.184 -        evtchn_to_irq[evtchn] = -1;
  16.185 -        irq_to_evtchn[irq]    = -1;
  16.186 -    }
  16.187 -
  16.188 -    spin_unlock(&irq_mapping_update_lock);
  16.189 -}
  16.190 -
  16.191 -
  16.192 -/*
  16.193 - * Interface to generic handling in irq.c
  16.194 - */
  16.195 -
  16.196 -static unsigned int startup_dynirq(unsigned int irq)
  16.197 -{
  16.198 -    unmask_evtchn(irq_to_evtchn[irq]);
  16.199 -    return 0;
  16.200 -}
  16.201 -
  16.202 -static void shutdown_dynirq(unsigned int irq)
  16.203 -{
  16.204 -    mask_evtchn(irq_to_evtchn[irq]);
  16.205 -}
  16.206 -
  16.207 -static void enable_dynirq(unsigned int irq)
  16.208 -{
  16.209 -    unmask_evtchn(irq_to_evtchn[irq]);
  16.210 -}
  16.211 -
  16.212 -static void disable_dynirq(unsigned int irq)
  16.213 -{
  16.214 -    mask_evtchn(irq_to_evtchn[irq]);
  16.215 -}
  16.216 -
  16.217 -static void ack_dynirq(unsigned int irq)
  16.218 -{
  16.219 -    mask_evtchn(irq_to_evtchn[irq]);
  16.220 -    clear_evtchn(irq_to_evtchn[irq]);
  16.221 -}
  16.222 -
  16.223 -static void end_dynirq(unsigned int irq)
  16.224 -{
  16.225 -    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
  16.226 -        unmask_evtchn(irq_to_evtchn[irq]);
  16.227 -}
  16.228 -
  16.229 -static struct hw_interrupt_type dynirq_type = {
  16.230 -    "Dynamic-irq",
  16.231 -    startup_dynirq,
  16.232 -    shutdown_dynirq,
  16.233 -    enable_dynirq,
  16.234 -    disable_dynirq,
  16.235 -    ack_dynirq,
  16.236 -    end_dynirq,
  16.237 -    NULL
  16.238 -};
  16.239 -
  16.240 -static inline void pirq_unmask_notify(int pirq)
  16.241 -{
  16.242 -    physdev_op_t op;
  16.243 -    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
  16.244 -    {
  16.245 -        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
  16.246 -        (void)HYPERVISOR_physdev_op(&op);
  16.247 -    }
  16.248 -}
  16.249 -
  16.250 -static inline void pirq_query_unmask(int pirq)
  16.251 -{
  16.252 -    physdev_op_t op;
  16.253 -    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
  16.254 -    op.u.irq_status_query.irq = pirq;
  16.255 -    (void)HYPERVISOR_physdev_op(&op);
  16.256 -    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
  16.257 -    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
  16.258 -        set_bit(pirq, &pirq_needs_unmask_notify[0]);
  16.259 -}
  16.260 -
  16.261 -/*
  16.262 - * On startup, if there is no action associated with the IRQ then we are
  16.263 - * probing. In this case we should not share with others as it will confuse us.
  16.264 - */
  16.265 -#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
  16.266 -
  16.267 -static unsigned int startup_pirq(unsigned int irq)
  16.268 -{
  16.269 -    evtchn_op_t op;
  16.270 -    int evtchn;
  16.271 -
  16.272 -    op.cmd               = EVTCHNOP_bind_pirq;
  16.273 -    op.u.bind_pirq.pirq  = irq;
  16.274 -    /* NB. We are happy to share unless we are probing. */
  16.275 -    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
  16.276 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
  16.277 -    {
  16.278 -        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
  16.279 -            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
  16.280 -        return 0;
  16.281 -    }
  16.282 -    evtchn = op.u.bind_pirq.port;
  16.283 -
  16.284 -    pirq_query_unmask(irq_to_pirq(irq));
  16.285 -
  16.286 -    evtchn_to_irq[evtchn] = irq;
  16.287 -    irq_to_evtchn[irq]    = evtchn;
  16.288 -
  16.289 -    unmask_evtchn(evtchn);
  16.290 -    pirq_unmask_notify(irq_to_pirq(irq));
  16.291 -
  16.292 -    return 0;
  16.293 -}
  16.294 -
  16.295 -static void shutdown_pirq(unsigned int irq)
  16.296 -{
  16.297 -    evtchn_op_t op;
  16.298 -    int evtchn = irq_to_evtchn[irq];
  16.299 -
  16.300 -    if ( !VALID_EVTCHN(evtchn) )
  16.301 -        return;
  16.302 -
  16.303 -    mask_evtchn(evtchn);
  16.304 -
  16.305 -    op.cmd          = EVTCHNOP_close;
  16.306 -    op.u.close.dom  = DOMID_SELF;
  16.307 -    op.u.close.port = evtchn;
  16.308 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
  16.309 -        panic("Failed to unbind physical IRQ %d\n", irq);
  16.310 -
  16.311 -    evtchn_to_irq[evtchn] = -1;
  16.312 -    irq_to_evtchn[irq]    = -1;
  16.313 -}
  16.314 -
  16.315 -static void enable_pirq(unsigned int irq)
  16.316 -{
  16.317 -    int evtchn = irq_to_evtchn[irq];
  16.318 -    if ( !VALID_EVTCHN(evtchn) )
  16.319 -        return;
  16.320 -    unmask_evtchn(evtchn);
  16.321 -    pirq_unmask_notify(irq_to_pirq(irq));
  16.322 -}
  16.323 -
  16.324 -static void disable_pirq(unsigned int irq)
  16.325 -{
  16.326 -    int evtchn = irq_to_evtchn[irq];
  16.327 -    if ( !VALID_EVTCHN(evtchn) )
  16.328 -        return;
  16.329 -    mask_evtchn(evtchn);
  16.330 -}
  16.331 -
  16.332 -static void ack_pirq(unsigned int irq)
  16.333 -{
  16.334 -    int evtchn = irq_to_evtchn[irq];
  16.335 -    if ( !VALID_EVTCHN(evtchn) )
  16.336 -        return;
  16.337 -    mask_evtchn(evtchn);
  16.338 -    clear_evtchn(evtchn);
  16.339 -}
  16.340 -
  16.341 -static void end_pirq(unsigned int irq)
  16.342 -{
  16.343 -    int evtchn = irq_to_evtchn[irq];
  16.344 -    if ( !VALID_EVTCHN(evtchn) )
  16.345 -        return;
  16.346 -    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
  16.347 -    {
  16.348 -        unmask_evtchn(evtchn);
  16.349 -        pirq_unmask_notify(irq_to_pirq(irq));
  16.350 -    }
  16.351 -}
  16.352 -
  16.353 -static struct hw_interrupt_type pirq_type = {
  16.354 -    "Phys-irq",
  16.355 -    startup_pirq,
  16.356 -    shutdown_pirq,
  16.357 -    enable_pirq,
  16.358 -    disable_pirq,
  16.359 -    ack_pirq,
  16.360 -    end_pirq,
  16.361 -    NULL
  16.362 -};
  16.363 -
  16.364 -static irqreturn_t misdirect_interrupt(int irq, void *dev_id,
  16.365 -				       struct pt_regs *regs)
  16.366 -{
  16.367 -	/* nothing */
  16.368 -	return IRQ_HANDLED;
  16.369 -}
  16.370 -
  16.371 -static struct irqaction misdirect_action = {
  16.372 -    misdirect_interrupt, 
  16.373 -    SA_INTERRUPT, 
  16.374 -    0, 
  16.375 -    "misdirect", 
  16.376 -    NULL, 
  16.377 -    NULL
  16.378 -};
  16.379 -
  16.380 -void irq_suspend(void)
  16.381 -{
  16.382 -    int virq, irq, evtchn;
  16.383 -
  16.384 -    /* Unbind VIRQs from event channels. */
  16.385 -    for ( virq = 0; virq < NR_VIRQS; virq++ )
  16.386 -    {
  16.387 -        if ( (irq = virq_to_irq[virq]) == -1 )
  16.388 -            continue;
  16.389 -        evtchn = irq_to_evtchn[irq];
  16.390 -
  16.391 -        /* Mark the event channel as unused in our table. */
  16.392 -        evtchn_to_irq[evtchn] = -1;
  16.393 -        irq_to_evtchn[irq]    = -1;
  16.394 -    }
  16.395 -
  16.396 -    /*
  16.397 -     * We should now be unbound from all event channels. Stale bindings to 
  16.398 -     * PIRQs and/or inter-domain event channels will cause us to barf here.
  16.399 -     */
  16.400 -    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
  16.401 -        if ( evtchn_to_irq[evtchn] != -1 )
  16.402 -            panic("Suspend attempted while bound to evtchn %d.\n", evtchn);
  16.403 -}
  16.404 -
  16.405 -
  16.406 -void irq_resume(void)
  16.407 -{
  16.408 -    evtchn_op_t op;
  16.409 -    int         virq, irq, evtchn;
  16.410 -
  16.411 -    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
  16.412 -        mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
  16.413 -
  16.414 -    for ( virq = 0; virq < NR_VIRQS; virq++ )
  16.415 -    {
  16.416 -        if ( (irq = virq_to_irq[virq]) == -1 )
  16.417 -            continue;
  16.418 -
  16.419 -        /* Get a new binding from Xen. */
  16.420 -        op.cmd              = EVTCHNOP_bind_virq;
  16.421 -        op.u.bind_virq.virq = virq;
  16.422 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  16.423 -            panic("Failed to bind virtual IRQ %d\n", virq);
  16.424 -        evtchn = op.u.bind_virq.port;
  16.425 -        
  16.426 -        /* Record the new mapping. */
  16.427 -        evtchn_to_irq[evtchn] = irq;
  16.428 -        irq_to_evtchn[irq]    = evtchn;
  16.429 -
  16.430 -        /* Ready for use. */
  16.431 -        unmask_evtchn(evtchn);
  16.432 -    }
  16.433 -}
  16.434 -
  16.435 -void __init init_IRQ(void)
  16.436 -{
  16.437 -    int i;
  16.438 -
  16.439 -    spin_lock_init(&irq_mapping_update_lock);
  16.440 -
  16.441 -    /* No VIRQ -> IRQ mappings. */
  16.442 -    for ( i = 0; i < NR_VIRQS; i++ )
  16.443 -        virq_to_irq[i] = -1;
  16.444 -
  16.445 -    /* No event-channel -> IRQ mappings. */
  16.446 -    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
  16.447 -    {
  16.448 -        evtchn_to_irq[i] = -1;
  16.449 -        mask_evtchn(i); /* No event channels are 'live' right now. */
  16.450 -    }
  16.451 -
  16.452 -    /* No IRQ -> event-channel mappings. */
  16.453 -    for ( i = 0; i < NR_IRQS; i++ )
  16.454 -        irq_to_evtchn[i] = -1;
  16.455 -
  16.456 -    for ( i = 0; i < NR_DYNIRQS; i++ )
  16.457 -    {
  16.458 -        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
  16.459 -        irq_bindcount[dynirq_to_irq(i)] = 0;
  16.460 -
  16.461 -        irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
  16.462 -        irq_desc[dynirq_to_irq(i)].action  = 0;
  16.463 -        irq_desc[dynirq_to_irq(i)].depth   = 1;
  16.464 -        irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
  16.465 -    }
  16.466 -
  16.467 -    for ( i = 0; i < NR_PIRQS; i++ )
  16.468 -    {
  16.469 -        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
  16.470 -        irq_bindcount[pirq_to_irq(i)] = 1;
  16.471 -
  16.472 -        irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
  16.473 -        irq_desc[pirq_to_irq(i)].action  = 0;
  16.474 -        irq_desc[pirq_to_irq(i)].depth   = 1;
  16.475 -        irq_desc[pirq_to_irq(i)].handler = &pirq_type;
  16.476 -    }
  16.477 -
  16.478 -    (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
  16.479 -
  16.480 -    /* This needs to be done early, but after the IRQ subsystem is alive. */
  16.481 -    ctrl_if_init();
  16.482 -}
    17.1 --- a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/setup.c	Mon Aug 02 10:30:38 2004 +0000
    17.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/setup.c	Mon Aug 02 14:19:48 2004 +0000
    17.3 @@ -48,7 +48,7 @@
    17.4  #include <asm/io_apic.h>
    17.5  #include <asm/ist.h>
    17.6  #include <asm/std_resources.h>
    17.7 -#include <asm/hypervisor.h>
    17.8 +#include <asm-xen/hypervisor.h>
    17.9  #include "setup_arch_pre.h"
   17.10  
   17.11  int disable_pse __initdata = 0;
    18.1 --- a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c	Mon Aug 02 10:30:38 2004 +0000
    18.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c	Mon Aug 02 14:19:48 2004 +0000
    18.3 @@ -1,18 +1,18 @@
    18.4  /******************************************************************************
    18.5 - * xen/i386/mm/hypervisor.c
    18.6 + * mm/hypervisor.c
    18.7   * 
    18.8   * Update page tables via the hypervisor.
    18.9   * 
   18.10 - * Copyright (c) 2002, K A Fraser
   18.11 + * Copyright (c) 2002-2004, K A Fraser
   18.12   */
   18.13  
   18.14  #include <linux/config.h>
   18.15  #include <linux/sched.h>
   18.16  #include <linux/mm.h>
   18.17  #include <linux/vmalloc.h>
   18.18 -#include <asm/hypervisor.h>
   18.19  #include <asm/page.h>
   18.20  #include <asm/pgtable.h>
   18.21 +#include <asm-xen/hypervisor.h>
   18.22  #include <asm-xen/multicall.h>
   18.23  
   18.24  /*
   18.25 @@ -23,11 +23,14 @@
   18.26   */
   18.27  static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
   18.28  
   18.29 -#if 0
   18.30 +/* Linux 2.6 isn't using the traditional batched interface. */
   18.31 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   18.32  #define QUEUE_SIZE 2048
   18.33 +#define pte_offset_kernel pte_offset
   18.34  #else
   18.35  #define QUEUE_SIZE 1
   18.36  #endif
   18.37 +
   18.38  static mmu_update_t update_queue[QUEUE_SIZE];
   18.39  unsigned int mmu_update_queue_idx = 0;
   18.40  #define idx mmu_update_queue_idx
    19.1 --- a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/init.c	Mon Aug 02 10:30:38 2004 +0000
    19.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/init.c	Mon Aug 02 14:19:48 2004 +0000
    19.3 @@ -40,7 +40,7 @@
    19.4  #include <asm/tlb.h>
    19.5  #include <asm/tlbflush.h>
    19.6  #include <asm/sections.h>
    19.7 -#include <asm/hypervisor.h>
    19.8 +#include <asm-xen/hypervisor.h>
    19.9  
   19.10  DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
   19.11  unsigned long highstart_pfn, highend_pfn;
    20.1 --- a/linux-2.6.7-xen-sparse/arch/xen/kernel/Makefile	Mon Aug 02 10:30:38 2004 +0000
    20.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/kernel/Makefile	Mon Aug 02 14:19:48 2004 +0000
    20.3 @@ -9,4 +9,4 @@ XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
    20.4  
    20.5  extra-y += vmlinux.lds.s
    20.6  
    20.7 -obj-y	:= ctrl_if.o process.o reboot.o xen_proc.o empty.o
    20.8 +obj-y	:= ctrl_if.o evtchn.o process.o reboot.o xen_proc.o empty.o
    21.1 --- a/linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c	Mon Aug 02 10:30:38 2004 +0000
    21.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c	Mon Aug 02 14:19:48 2004 +0000
    21.3 @@ -47,12 +47,6 @@ static ctrl_msg_handler_t ctrl_if_rxmsg_
    21.4  static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
    21.5      /* Is it late enough during bootstrap to use schedule_task()? */
    21.6  static int safe_to_schedule_task;
    21.7 -#if 0                           /* XXXcl tq */
    21.8 -    /* Passed to schedule_task(). */
    21.9 -static struct tq_struct ctrl_if_rxmsg_deferred_tq;
   21.10 -#else
   21.11 -static struct work_struct ctrl_if_rxmsg_deferred_work;
   21.12 -#endif
   21.13      /* Queue up messages to be handled in process context. */
   21.14  static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
   21.15  static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
   21.16 @@ -64,11 +58,14 @@ static struct {
   21.17      unsigned long      id;
   21.18  } ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
   21.19  
   21.20 -#if 0                           /* XXXcl tq */
   21.21 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   21.22 +static struct tq_struct ctrl_if_rxmsg_deferred_tq;
   21.23  static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
   21.24  #else
   21.25 +static struct work_struct ctrl_if_rxmsg_deferred_work;
   21.26  static struct workqueue_struct *ctrl_if_tx_wq = NULL;
   21.27  #endif
   21.28 +
   21.29  static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
   21.30  static void __ctrl_if_tx_tasklet(unsigned long data);
   21.31  static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
   21.32 @@ -125,9 +122,10 @@ static void __ctrl_if_tx_tasklet(unsigne
   21.33  
   21.34      if ( was_full && !TX_FULL(ctrl_if) )
   21.35      {
   21.36 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   21.37 +        run_task_queue(&ctrl_if_tx_tq);
   21.38 +#else
   21.39          wake_up(&ctrl_if_tx_wait);
   21.40 -#if 0                           /* XXXcl tq */
   21.41 -        run_task_queue(&ctrl_if_tx_tq);
   21.42  #endif
   21.43      }
   21.44  }
   21.45 @@ -162,12 +160,13 @@ static void __ctrl_if_rx_tasklet(unsigne
   21.46          if ( msg.length != 0 )
   21.47              memcpy(msg.msg, pmsg->msg, msg.length);
   21.48  
   21.49 -        if ( test_bit(msg.type, (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
   21.50 +        if ( test_bit(msg.type, 
   21.51 +                      (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
   21.52          {
   21.53              pmsg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
   21.54                  ctrl_if_rxmsg_deferred_prod++)];
   21.55              memcpy(pmsg, &msg, offsetof(ctrl_msg_t, msg) + msg.length);
   21.56 -#if 0                           /* XXXcl tq */
   21.57 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   21.58              schedule_task(&ctrl_if_rxmsg_deferred_tq);
   21.59  #else
   21.60              schedule_work(&ctrl_if_rxmsg_deferred_work);
   21.61 @@ -180,7 +179,8 @@ static void __ctrl_if_rx_tasklet(unsigne
   21.62      }
   21.63  }
   21.64  
   21.65 -static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id, struct pt_regs *regs)
   21.66 +static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
   21.67 +                                     struct pt_regs *regs)
   21.68  {
   21.69      control_if_t *ctrl_if = get_ctrl_if();
   21.70  
   21.71 @@ -280,10 +280,10 @@ int ctrl_if_enqueue_space_callback(struc
   21.72      if ( !TX_FULL(ctrl_if) )
   21.73          return 0;
   21.74  
   21.75 -#if 0                           /* XXXcl tq */
   21.76 -    (void)queue_task(task, &ctrl_if_tx_tq);
   21.77 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   21.78 +    (void)queue_task(work, &ctrl_if_tx_tq);
   21.79  #else
   21.80 -    if (ctrl_if_tx_wq)
   21.81 +    if ( ctrl_if_tx_wq )
   21.82          (void)queue_work(ctrl_if_tx_wq, work);
   21.83      else
   21.84          return 1;
   21.85 @@ -435,10 +435,11 @@ void __init ctrl_if_init(void)
   21.86  
   21.87      for ( i = 0; i < 256; i++ )
   21.88          ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
   21.89 -#if 0                           /* XXXcl tq */
   21.90 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   21.91      ctrl_if_rxmsg_deferred_tq.routine = __ctrl_if_rxmsg_deferred;
   21.92  #else
   21.93 -    INIT_WORK(&ctrl_if_rxmsg_deferred_work, (void *)__ctrl_if_rxmsg_deferred,
   21.94 +    INIT_WORK(&ctrl_if_rxmsg_deferred_work,
   21.95 +              (void *)__ctrl_if_rxmsg_deferred,
   21.96                NULL);
   21.97  #endif
   21.98  
   21.99 @@ -453,9 +454,11 @@ void __init ctrl_if_init(void)
  21.100  static int __init ctrl_if_late_setup(void)
  21.101  {
  21.102      safe_to_schedule_task = 1;
  21.103 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  21.104      ctrl_if_tx_wq = create_workqueue("ctrl_if_tx");
  21.105      if (ctrl_if_tx_wq == NULL)
  21.106          return 1;                 /* XXX */
  21.107 +#endif
  21.108      return 0;
  21.109  }
  21.110  __initcall(ctrl_if_late_setup);
    22.1 --- a/linux-2.6.7-xen-sparse/arch/xen/kernel/empty.c	Mon Aug 02 10:30:38 2004 +0000
    22.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/kernel/empty.c	Mon Aug 02 14:19:48 2004 +0000
    22.3 @@ -1,6 +1,6 @@
    22.4  
    22.5  #include <linux/string.h>
    22.6 -#include <asm/hypervisor.h>
    22.7 +#include <asm-xen/hypervisor.h>
    22.8  
    22.9  #if 0
   22.10  static __inline__ int HYPERVISOR_console_write(const char *str, int count)
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/kernel/evtchn.c	Mon Aug 02 14:19:48 2004 +0000
    23.3 @@ -0,0 +1,479 @@
    23.4 +/******************************************************************************
    23.5 + * evtchn.c
    23.6 + * 
    23.7 + * Communication via Xen event channels.
    23.8 + * 
    23.9 + * Copyright (c) 2002-2004, K A Fraser
   23.10 + */
   23.11 +
   23.12 +#include <linux/config.h>
   23.13 +#include <linux/irq.h>
   23.14 +#include <linux/interrupt.h>
   23.15 +#include <linux/sched.h>
   23.16 +#include <linux/kernel_stat.h>
   23.17 +#include <asm/atomic.h>
   23.18 +#include <asm/system.h>
   23.19 +#include <asm/ptrace.h>
   23.20 +#include <asm/synch_bitops.h>
   23.21 +#include <asm/hypervisor-ifs/event_channel.h>
   23.22 +#include <asm/hypervisor-ifs/physdev.h>
   23.23 +#include <asm-xen/ctrl_if.h>
   23.24 +#include <asm-xen/hypervisor.h>
   23.25 +
   23.26 +/*
   23.27 + * This lock protects updates to the following mapping and reference-count
   23.28 + * arrays. The lock does not need to be acquired to read the mapping tables.
   23.29 + */
   23.30 +static spinlock_t irq_mapping_update_lock;
   23.31 +
   23.32 +/* IRQ <-> event-channel mappings. */
   23.33 +static int evtchn_to_irq[NR_EVENT_CHANNELS];
   23.34 +static int irq_to_evtchn[NR_IRQS];
   23.35 +
   23.36 +/* IRQ <-> VIRQ mapping. */
   23.37 +static int virq_to_irq[NR_VIRQS];
   23.38 +
   23.39 +/* Reference counts for bindings to IRQs. */
   23.40 +static int irq_bindcount[NR_IRQS];
   23.41 +
   23.42 +/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
   23.43 +static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
   23.44 +
   23.45 +/* Upcall to generic IRQ layer. */
   23.46 +extern asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs);
   23.47 +
   23.48 +#define VALID_EVTCHN(_chn) ((_chn) != -1)
   23.49 +
   23.50 +void evtchn_do_upcall(struct pt_regs *regs)
   23.51 +{
   23.52 +    unsigned long  l1, l2;
   23.53 +    unsigned int   l1i, l2i, port;
   23.54 +    int            irq;
   23.55 +    unsigned long  flags;
   23.56 +    shared_info_t *s = HYPERVISOR_shared_info;
   23.57 +
   23.58 +    local_irq_save(flags);
   23.59 +    
   23.60 +    while ( s->vcpu_data[0].evtchn_upcall_pending )
   23.61 +    {
   23.62 +        s->vcpu_data[0].evtchn_upcall_pending = 0;
   23.63 +        /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
   23.64 +        l1 = xchg(&s->evtchn_pending_sel, 0);
   23.65 +        while ( (l1i = ffs(l1)) != 0 )
   23.66 +        {
   23.67 +            l1i--;
   23.68 +            l1 &= ~(1 << l1i);
   23.69 +        
   23.70 +            l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
   23.71 +            while ( (l2i = ffs(l2)) != 0 )
   23.72 +            {
   23.73 +                l2i--;
   23.74 +                l2 &= ~(1 << l2i);
   23.75 +            
   23.76 +                port = (l1i << 5) + l2i;
   23.77 +                if ( (irq = evtchn_to_irq[port]) != -1 )
   23.78 +                    do_IRQ(irq, regs);
   23.79 +                else
   23.80 +                    evtchn_device_upcall(port);
   23.81 +            }
   23.82 +        }
   23.83 +    }
   23.84 +
   23.85 +    local_irq_restore(flags);
   23.86 +}
   23.87 +
   23.88 +
   23.89 +static int find_unbound_irq(void)
   23.90 +{
   23.91 +    int irq;
   23.92 +
   23.93 +    for ( irq = 0; irq < NR_IRQS; irq++ )
   23.94 +        if ( irq_bindcount[irq] == 0 )
   23.95 +            break;
   23.96 +
   23.97 +    if ( irq == NR_IRQS )
   23.98 +        panic("No available IRQ to bind to: increase NR_IRQS!\n");
   23.99 +
  23.100 +    return irq;
  23.101 +}
  23.102 +
  23.103 +int bind_virq_to_irq(int virq)
  23.104 +{
  23.105 +    evtchn_op_t op;
  23.106 +    int evtchn, irq;
  23.107 +
  23.108 +    spin_lock(&irq_mapping_update_lock);
  23.109 +
  23.110 +    if ( (irq = virq_to_irq[virq]) == -1 )
  23.111 +    {
  23.112 +        op.cmd              = EVTCHNOP_bind_virq;
  23.113 +        op.u.bind_virq.virq = virq;
  23.114 +        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  23.115 +            panic("Failed to bind virtual IRQ %d\n", virq);
  23.116 +        evtchn = op.u.bind_virq.port;
  23.117 +
  23.118 +        irq = find_unbound_irq();
  23.119 +        evtchn_to_irq[evtchn] = irq;
  23.120 +        irq_to_evtchn[irq]    = evtchn;
  23.121 +
  23.122 +        virq_to_irq[virq] = irq;
  23.123 +    }
  23.124 +
  23.125 +    irq_bindcount[irq]++;
  23.126 +
  23.127 +    spin_unlock(&irq_mapping_update_lock);
  23.128 +    
  23.129 +    return irq;
  23.130 +}
  23.131 +
  23.132 +void unbind_virq_from_irq(int virq)
  23.133 +{
  23.134 +    evtchn_op_t op;
  23.135 +    int irq    = virq_to_irq[virq];
  23.136 +    int evtchn = irq_to_evtchn[irq];
  23.137 +
  23.138 +    spin_lock(&irq_mapping_update_lock);
  23.139 +
  23.140 +    if ( --irq_bindcount[irq] == 0 )
  23.141 +    {
  23.142 +        op.cmd          = EVTCHNOP_close;
  23.143 +        op.u.close.dom  = DOMID_SELF;
  23.144 +        op.u.close.port = evtchn;
  23.145 +        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  23.146 +            panic("Failed to unbind virtual IRQ %d\n", virq);
  23.147 +
  23.148 +        evtchn_to_irq[evtchn] = -1;
  23.149 +        irq_to_evtchn[irq]    = -1;
  23.150 +        virq_to_irq[virq]     = -1;
  23.151 +    }
  23.152 +
  23.153 +    spin_unlock(&irq_mapping_update_lock);
  23.154 +}
  23.155 +
  23.156 +int bind_evtchn_to_irq(int evtchn)
  23.157 +{
  23.158 +    int irq;
  23.159 +
  23.160 +    spin_lock(&irq_mapping_update_lock);
  23.161 +
  23.162 +    if ( (irq = evtchn_to_irq[evtchn]) == -1 )
  23.163 +    {
  23.164 +        irq = find_unbound_irq();
  23.165 +        evtchn_to_irq[evtchn] = irq;
  23.166 +        irq_to_evtchn[irq]    = evtchn;
  23.167 +    }
  23.168 +
  23.169 +    irq_bindcount[irq]++;
  23.170 +
  23.171 +    spin_unlock(&irq_mapping_update_lock);
  23.172 +    
  23.173 +    return irq;
  23.174 +}
  23.175 +
  23.176 +void unbind_evtchn_from_irq(int evtchn)
  23.177 +{
  23.178 +    int irq = evtchn_to_irq[evtchn];
  23.179 +
  23.180 +    spin_lock(&irq_mapping_update_lock);
  23.181 +
  23.182 +    if ( --irq_bindcount[irq] == 0 )
  23.183 +    {
  23.184 +        evtchn_to_irq[evtchn] = -1;
  23.185 +        irq_to_evtchn[irq]    = -1;
  23.186 +    }
  23.187 +
  23.188 +    spin_unlock(&irq_mapping_update_lock);
  23.189 +}
  23.190 +
  23.191 +
  23.192 +/*
  23.193 + * Interface to generic handling in irq.c
  23.194 + */
  23.195 +
  23.196 +static unsigned int startup_dynirq(unsigned int irq)
  23.197 +{
  23.198 +    unmask_evtchn(irq_to_evtchn[irq]);
  23.199 +    return 0;
  23.200 +}
  23.201 +
  23.202 +static void shutdown_dynirq(unsigned int irq)
  23.203 +{
  23.204 +    mask_evtchn(irq_to_evtchn[irq]);
  23.205 +}
  23.206 +
  23.207 +static void enable_dynirq(unsigned int irq)
  23.208 +{
  23.209 +    unmask_evtchn(irq_to_evtchn[irq]);
  23.210 +}
  23.211 +
  23.212 +static void disable_dynirq(unsigned int irq)
  23.213 +{
  23.214 +    mask_evtchn(irq_to_evtchn[irq]);
  23.215 +}
  23.216 +
  23.217 +static void ack_dynirq(unsigned int irq)
  23.218 +{
  23.219 +    mask_evtchn(irq_to_evtchn[irq]);
  23.220 +    clear_evtchn(irq_to_evtchn[irq]);
  23.221 +}
  23.222 +
  23.223 +static void end_dynirq(unsigned int irq)
  23.224 +{
  23.225 +    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
  23.226 +        unmask_evtchn(irq_to_evtchn[irq]);
  23.227 +}
  23.228 +
  23.229 +static struct hw_interrupt_type dynirq_type = {
  23.230 +    "Dynamic-irq",
  23.231 +    startup_dynirq,
  23.232 +    shutdown_dynirq,
  23.233 +    enable_dynirq,
  23.234 +    disable_dynirq,
  23.235 +    ack_dynirq,
  23.236 +    end_dynirq,
  23.237 +    NULL
  23.238 +};
  23.239 +
  23.240 +static inline void pirq_unmask_notify(int pirq)
  23.241 +{
  23.242 +    physdev_op_t op;
  23.243 +    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
  23.244 +    {
  23.245 +        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
  23.246 +        (void)HYPERVISOR_physdev_op(&op);
  23.247 +    }
  23.248 +}
  23.249 +
  23.250 +static inline void pirq_query_unmask(int pirq)
  23.251 +{
  23.252 +    physdev_op_t op;
  23.253 +    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
  23.254 +    op.u.irq_status_query.irq = pirq;
  23.255 +    (void)HYPERVISOR_physdev_op(&op);
  23.256 +    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
  23.257 +    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
  23.258 +        set_bit(pirq, &pirq_needs_unmask_notify[0]);
  23.259 +}
  23.260 +
  23.261 +/*
  23.262 + * On startup, if there is no action associated with the IRQ then we are
  23.263 + * probing. In this case we should not share with others as it will confuse us.
  23.264 + */
  23.265 +#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
  23.266 +
  23.267 +static unsigned int startup_pirq(unsigned int irq)
  23.268 +{
  23.269 +    evtchn_op_t op;
  23.270 +    int evtchn;
  23.271 +
  23.272 +    op.cmd               = EVTCHNOP_bind_pirq;
  23.273 +    op.u.bind_pirq.pirq  = irq;
  23.274 +    /* NB. We are happy to share unless we are probing. */
  23.275 +    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
  23.276 +    if ( HYPERVISOR_event_channel_op(&op) != 0 )
  23.277 +    {
  23.278 +        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
  23.279 +            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
  23.280 +        return 0;
  23.281 +    }
  23.282 +    evtchn = op.u.bind_pirq.port;
  23.283 +
  23.284 +    pirq_query_unmask(irq_to_pirq(irq));
  23.285 +
  23.286 +    evtchn_to_irq[evtchn] = irq;
  23.287 +    irq_to_evtchn[irq]    = evtchn;
  23.288 +
  23.289 +    unmask_evtchn(evtchn);
  23.290 +    pirq_unmask_notify(irq_to_pirq(irq));
  23.291 +
  23.292 +    return 0;
  23.293 +}
  23.294 +
  23.295 +static void shutdown_pirq(unsigned int irq)
  23.296 +{
  23.297 +    evtchn_op_t op;
  23.298 +    int evtchn = irq_to_evtchn[irq];
  23.299 +
  23.300 +    if ( !VALID_EVTCHN(evtchn) )
  23.301 +        return;
  23.302 +
  23.303 +    mask_evtchn(evtchn);
  23.304 +
  23.305 +    op.cmd          = EVTCHNOP_close;
  23.306 +    op.u.close.dom  = DOMID_SELF;
  23.307 +    op.u.close.port = evtchn;
  23.308 +    if ( HYPERVISOR_event_channel_op(&op) != 0 )
  23.309 +        panic("Failed to unbind physical IRQ %d\n", irq);
  23.310 +
  23.311 +    evtchn_to_irq[evtchn] = -1;
  23.312 +    irq_to_evtchn[irq]    = -1;
  23.313 +}
  23.314 +
  23.315 +static void enable_pirq(unsigned int irq)
  23.316 +{
  23.317 +    int evtchn = irq_to_evtchn[irq];
  23.318 +    if ( !VALID_EVTCHN(evtchn) )
  23.319 +        return;
  23.320 +    unmask_evtchn(evtchn);
  23.321 +    pirq_unmask_notify(irq_to_pirq(irq));
  23.322 +}
  23.323 +
  23.324 +static void disable_pirq(unsigned int irq)
  23.325 +{
  23.326 +    int evtchn = irq_to_evtchn[irq];
  23.327 +    if ( !VALID_EVTCHN(evtchn) )
  23.328 +        return;
  23.329 +    mask_evtchn(evtchn);
  23.330 +}
  23.331 +
  23.332 +static void ack_pirq(unsigned int irq)
  23.333 +{
  23.334 +    int evtchn = irq_to_evtchn[irq];
  23.335 +    if ( !VALID_EVTCHN(evtchn) )
  23.336 +        return;
  23.337 +    mask_evtchn(evtchn);
  23.338 +    clear_evtchn(evtchn);
  23.339 +}
  23.340 +
  23.341 +static void end_pirq(unsigned int irq)
  23.342 +{
  23.343 +    int evtchn = irq_to_evtchn[irq];
  23.344 +    if ( !VALID_EVTCHN(evtchn) )
  23.345 +        return;
  23.346 +    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
  23.347 +    {
  23.348 +        unmask_evtchn(evtchn);
  23.349 +        pirq_unmask_notify(irq_to_pirq(irq));
  23.350 +    }
  23.351 +}
  23.352 +
  23.353 +static struct hw_interrupt_type pirq_type = {
  23.354 +    "Phys-irq",
  23.355 +    startup_pirq,
  23.356 +    shutdown_pirq,
  23.357 +    enable_pirq,
  23.358 +    disable_pirq,
  23.359 +    ack_pirq,
  23.360 +    end_pirq,
  23.361 +    NULL
  23.362 +};
  23.363 +
  23.364 +static irqreturn_t misdirect_interrupt(int irq, void *dev_id,
  23.365 +                                       struct pt_regs *regs)
  23.366 +{
  23.367 +    /* nothing */
  23.368 +    return IRQ_HANDLED;
  23.369 +}
  23.370 +
  23.371 +static struct irqaction misdirect_action = {
  23.372 +    misdirect_interrupt, 
  23.373 +    SA_INTERRUPT, 
  23.374 +    0, 
  23.375 +    "misdirect", 
  23.376 +    NULL, 
  23.377 +    NULL
  23.378 +};
  23.379 +
  23.380 +void irq_suspend(void)
  23.381 +{
  23.382 +    int virq, irq, evtchn;
  23.383 +
  23.384 +    /* Unbind VIRQs from event channels. */
  23.385 +    for ( virq = 0; virq < NR_VIRQS; virq++ )
  23.386 +    {
  23.387 +        if ( (irq = virq_to_irq[virq]) == -1 )
  23.388 +            continue;
  23.389 +        evtchn = irq_to_evtchn[irq];
  23.390 +
  23.391 +        /* Mark the event channel as unused in our table. */
  23.392 +        evtchn_to_irq[evtchn] = -1;
  23.393 +        irq_to_evtchn[irq]    = -1;
  23.394 +    }
  23.395 +
  23.396 +    /*
  23.397 +     * We should now be unbound from all event channels. Stale bindings to 
  23.398 +     * PIRQs and/or inter-domain event channels will cause us to barf here.
  23.399 +     */
  23.400 +    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
  23.401 +        if ( evtchn_to_irq[evtchn] != -1 )
  23.402 +            panic("Suspend attempted while bound to evtchn %d.\n", evtchn);
  23.403 +}
  23.404 +
  23.405 +
  23.406 +void irq_resume(void)
  23.407 +{
  23.408 +    evtchn_op_t op;
  23.409 +    int         virq, irq, evtchn;
  23.410 +
  23.411 +    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
  23.412 +        mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
  23.413 +
  23.414 +    for ( virq = 0; virq < NR_VIRQS; virq++ )
  23.415 +    {
  23.416 +        if ( (irq = virq_to_irq[virq]) == -1 )
  23.417 +            continue;
  23.418 +
  23.419 +        /* Get a new binding from Xen. */
  23.420 +        op.cmd              = EVTCHNOP_bind_virq;
  23.421 +        op.u.bind_virq.virq = virq;
  23.422 +        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  23.423 +            panic("Failed to bind virtual IRQ %d\n", virq);
  23.424 +        evtchn = op.u.bind_virq.port;
  23.425 +        
  23.426 +        /* Record the new mapping. */
  23.427 +        evtchn_to_irq[evtchn] = irq;
  23.428 +        irq_to_evtchn[irq]    = evtchn;
  23.429 +
  23.430 +        /* Ready for use. */
  23.431 +        unmask_evtchn(evtchn);
  23.432 +    }
  23.433 +}
  23.434 +
  23.435 +void __init init_IRQ(void)
  23.436 +{
  23.437 +    int i;
  23.438 +
  23.439 +    spin_lock_init(&irq_mapping_update_lock);
  23.440 +
  23.441 +    /* No VIRQ -> IRQ mappings. */
  23.442 +    for ( i = 0; i < NR_VIRQS; i++ )
  23.443 +        virq_to_irq[i] = -1;
  23.444 +
  23.445 +    /* No event-channel -> IRQ mappings. */
  23.446 +    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
  23.447 +    {
  23.448 +        evtchn_to_irq[i] = -1;
  23.449 +        mask_evtchn(i); /* No event channels are 'live' right now. */
  23.450 +    }
  23.451 +
  23.452 +    /* No IRQ -> event-channel mappings. */
  23.453 +    for ( i = 0; i < NR_IRQS; i++ )
  23.454 +        irq_to_evtchn[i] = -1;
  23.455 +
  23.456 +    for ( i = 0; i < NR_DYNIRQS; i++ )
  23.457 +    {
  23.458 +        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
  23.459 +        irq_bindcount[dynirq_to_irq(i)] = 0;
  23.460 +
  23.461 +        irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
  23.462 +        irq_desc[dynirq_to_irq(i)].action  = 0;
  23.463 +        irq_desc[dynirq_to_irq(i)].depth   = 1;
  23.464 +        irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
  23.465 +    }
  23.466 +
  23.467 +    for ( i = 0; i < NR_PIRQS; i++ )
  23.468 +    {
  23.469 +        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
  23.470 +        irq_bindcount[pirq_to_irq(i)] = 1;
  23.471 +
  23.472 +        irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
  23.473 +        irq_desc[pirq_to_irq(i)].action  = 0;
  23.474 +        irq_desc[pirq_to_irq(i)].depth   = 1;
  23.475 +        irq_desc[pirq_to_irq(i)].handler = &pirq_type;
  23.476 +    }
  23.477 +
  23.478 +    (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
  23.479 +
  23.480 +    /* This needs to be done early, but after the IRQ subsystem is alive. */
  23.481 +    ctrl_if_init();
  23.482 +}
    24.1 --- a/linux-2.6.7-xen-sparse/arch/xen/kernel/reboot.c	Mon Aug 02 10:30:38 2004 +0000
    24.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/kernel/reboot.c	Mon Aug 02 14:19:48 2004 +0000
    24.3 @@ -1,6 +1,6 @@
    24.4  
    24.5  #include <linux/module.h>
    24.6 -#include <asm/hypervisor.h>
    24.7 +#include <asm-xen/hypervisor.h>
    24.8  
    24.9  int reboot_thru_bios = 0;	/* for dmi_scan.c */
   24.10  
    25.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/blkback/common.h	Mon Aug 02 10:30:38 2004 +0000
    25.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/blkback/common.h	Mon Aug 02 14:19:48 2004 +0000
    25.3 @@ -12,17 +12,13 @@
    25.4  #include <linux/interrupt.h>
    25.5  #include <linux/slab.h>
    25.6  #include <linux/blkdev.h>
    25.7 -#include <asm-xen/ctrl_if.h>
    25.8  #include <asm/io.h>
    25.9  #include <asm/setup.h>
   25.10  #include <asm/pgalloc.h>
   25.11 +#include <asm-xen/ctrl_if.h>
   25.12 +#include <asm-xen/hypervisor.h>
   25.13  #include <asm-xen/hypervisor-ifs/io/blkif.h>
   25.14  
   25.15 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   25.16 -#define irqreturn_t void
   25.17 -#define IRQ_HANDLED
   25.18 -#endif
   25.19 -
   25.20  #if 0
   25.21  #define ASSERT(_p) \
   25.22      if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
    26.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/console/console.c	Mon Aug 02 10:30:38 2004 +0000
    26.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/console/console.c	Mon Aug 02 14:19:48 2004 +0000
    26.3 @@ -26,8 +26,8 @@
    26.4  #include <asm/io.h>
    26.5  #include <asm/irq.h>
    26.6  #include <asm/uaccess.h>
    26.7 -#include <asm/hypervisor.h>
    26.8  #include <asm/hypervisor-ifs/event_channel.h>
    26.9 +#include <asm-xen/hypervisor.h>
   26.10  #include <asm-xen/evtchn.h>
   26.11  #include <asm-xen/ctrl_if.h>
   26.12  
   26.13 @@ -80,8 +80,6 @@ static struct tty_driver xencons_driver;
   26.14  static struct tq_struct xencons_tx_flush_task = {
   26.15      routine: xencons_tx_flush_task_routine
   26.16  };
   26.17 -#define irqreturn_t void
   26.18 -#define IRQ_HANDLED
   26.19  #endif
   26.20  
   26.21  
    27.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/evtchn/evtchn.c	Mon Aug 02 10:30:38 2004 +0000
    27.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/evtchn/evtchn.c	Mon Aug 02 14:19:48 2004 +0000
    27.3 @@ -22,10 +22,16 @@
    27.4  #include <linux/poll.h>
    27.5  #include <linux/irq.h>
    27.6  #include <linux/init.h>
    27.7 -#include <linux/gfp.h>
    27.8  #include <asm-xen/evtchn.h>
    27.9  
   27.10 -#if 0
   27.11 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   27.12 +#include <linux/devfs_fs_kernel.h>
   27.13 +#define OLD_DEVFS
   27.14 +#else
   27.15 +#include <linux/gfp.h>
   27.16 +#endif
   27.17 +
   27.18 +#ifdef OLD_DEVFS
   27.19  /* NB. This must be shared amongst drivers if more things go in /dev/xen */
   27.20  static devfs_handle_t xen_dev_dir;
   27.21  #endif
   27.22 @@ -50,7 +56,6 @@ static spinlock_t lock;
   27.23  
   27.24  void evtchn_device_upcall(int port)
   27.25  {
   27.26 -
   27.27      spin_lock(&lock);
   27.28  
   27.29      mask_evtchn(port);
   27.30 @@ -314,17 +319,19 @@ static struct file_operations evtchn_fop
   27.31  };
   27.32  
   27.33  static struct miscdevice evtchn_miscdev = {
   27.34 -	.minor		= EVTCHN_MINOR,
   27.35 -	.name		= "evtchn",
   27.36 -	.devfs_name 	= "misc/evtchn",
   27.37 -	.fops		= &evtchn_fops
   27.38 +    .minor        = EVTCHN_MINOR,
   27.39 +    .name         = "evtchn",
   27.40 +    .fops         = &evtchn_fops,
   27.41 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
   27.42 +    .devfs_name   = "misc/evtchn",
   27.43 +#endif
   27.44  };
   27.45  
   27.46  static int __init evtchn_init(void)
   27.47  {
   27.48 -#if 0
   27.49 +#ifdef OLD_DEVFS
   27.50      devfs_handle_t symlink_handle;
   27.51 -    int            err, pos;
   27.52 +    int            pos;
   27.53      char           link_dest[64];
   27.54  #endif
   27.55      int err;
   27.56 @@ -337,7 +344,7 @@ static int __init evtchn_init(void)
   27.57          return err;
   27.58      }
   27.59  
   27.60 -#if 0
   27.61 +#ifdef OLD_DEVFS
   27.62      /* (DEVFS) create directory '/dev/xen'. */
   27.63      xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
   27.64  
    28.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/netback/common.h	Mon Aug 02 10:30:38 2004 +0000
    28.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/netback/common.h	Mon Aug 02 14:19:48 2004 +0000
    28.3 @@ -19,11 +19,6 @@
    28.4  #include <asm/io.h>
    28.5  #include <asm/pgalloc.h>
    28.6  
    28.7 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
    28.8 -#define irqreturn_t void
    28.9 -#define IRQ_HANDLED
   28.10 -#endif
   28.11 -
   28.12  #if 0
   28.13  #define ASSERT(_p) \
   28.14      if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
    29.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c	Mon Aug 02 10:30:38 2004 +0000
    29.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c	Mon Aug 02 14:19:48 2004 +0000
    29.3 @@ -25,11 +25,6 @@
    29.4  #include <asm-xen/hypervisor-ifs/io/netif.h>
    29.5  #include <asm/page.h>
    29.6  
    29.7 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
    29.8 -#define irqreturn_t void
    29.9 -#define IRQ_HANDLED
   29.10 -#endif
   29.11 -
   29.12  #define RX_BUF_SIZE ((PAGE_SIZE/2)+1) /* Fool the slab allocator :-) */
   29.13  
   29.14  static void network_tx_buf_gc(struct net_device *dev);
    30.1 --- a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/hypervisor.h	Mon Aug 02 10:30:38 2004 +0000
    30.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.3 @@ -1,464 +0,0 @@
    30.4 -/******************************************************************************
    30.5 - * hypervisor.h
    30.6 - * 
    30.7 - * Linux-specific hypervisor handling.
    30.8 - * 
    30.9 - * Copyright (c) 2002, K A Fraser
   30.10 - */
   30.11 -
   30.12 -#ifndef __HYPERVISOR_H__
   30.13 -#define __HYPERVISOR_H__
   30.14 -
   30.15 -#include <linux/types.h>
   30.16 -#include <linux/kernel.h>
   30.17 -#include <asm/hypervisor-ifs/hypervisor-if.h>
   30.18 -#include <asm/hypervisor-ifs/dom0_ops.h>
   30.19 -#include <asm/hypervisor-ifs/io/domain_controller.h>
   30.20 -#include <asm/ptrace.h>
   30.21 -#include <asm/page.h>
   30.22 -#include <asm-xen/xen.h>
   30.23 -
   30.24 -/* arch/xen/i386/kernel/setup.c */
   30.25 -union start_info_union
   30.26 -{
   30.27 -    extended_start_info_t start_info;
   30.28 -    char padding[512];
   30.29 -};
   30.30 -extern union start_info_union start_info_union;
   30.31 -#define start_info (start_info_union.start_info)
   30.32 -
   30.33 -/* arch/xen/i386/kernel/hypervisor.c */
   30.34 -void do_hypervisor_callback(struct pt_regs *regs);
   30.35 -
   30.36 -/* arch/xen/i386/mm/init.c */
   30.37 -void wrprotect_bootpt(pgd_t *, void *, int);
   30.38 -
   30.39 -/* arch/xen/i386/kernel/head.S */
   30.40 -void lgdt_finish(void);
   30.41 -
   30.42 -/* arch/xen/i386/mm/hypervisor.c */
   30.43 -/*
   30.44 - * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
   30.45 - * be MACHINE addresses.
   30.46 - */
   30.47 -
   30.48 -extern unsigned int mmu_update_queue_idx;
   30.49 -
   30.50 -void queue_l1_entry_update(pte_t *ptr, unsigned long val);
   30.51 -void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
   30.52 -void queue_pt_switch(unsigned long ptr);
   30.53 -void queue_tlb_flush(void);
   30.54 -void queue_invlpg(unsigned long ptr);
   30.55 -void queue_pgd_pin(unsigned long ptr);
   30.56 -void queue_pgd_unpin(unsigned long ptr);
   30.57 -void queue_pte_pin(unsigned long ptr);
   30.58 -void queue_pte_unpin(unsigned long ptr);
   30.59 -void queue_set_ldt(unsigned long ptr, unsigned long bytes);
   30.60 -void queue_machphys_update(unsigned long mfn, unsigned long pfn);
   30.61 -#define MMU_UPDATE_DEBUG 0
   30.62 -
   30.63 -#if MMU_UPDATE_DEBUG > 0
   30.64 -typedef struct {
   30.65 -    void *ptr;
   30.66 -    unsigned long val, pteval;
   30.67 -    void *ptep;
   30.68 -    int line; char *file;
   30.69 -} page_update_debug_t;
   30.70 -extern page_update_debug_t update_debug_queue[];
   30.71 -#define queue_l1_entry_update(_p,_v) ({                           \
   30.72 - update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
   30.73 - update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
   30.74 - update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
   30.75 - update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
   30.76 - queue_l1_entry_update((_p),(_v));                                \
   30.77 -})
   30.78 -#define queue_l2_entry_update(_p,_v) ({                           \
   30.79 - update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
   30.80 - update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
   30.81 - update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
   30.82 - update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
   30.83 - queue_l2_entry_update((_p),(_v));                                \
   30.84 -})
   30.85 -#endif
   30.86 -
   30.87 -#if MMU_UPDATE_DEBUG > 1
   30.88 -#if MMU_UPDATE_DEBUG > 2
   30.89 -#undef queue_l1_entry_update
   30.90 -#define queue_l1_entry_update(_p,_v) ({                           \
   30.91 - update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
   30.92 - update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
   30.93 - update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
   30.94 - update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
   30.95 - printk("L1 %s %d: %p/%08lx (%08lx -> %08lx)\n", __FILE__, __LINE__,  \
   30.96 -        (_p), virt_to_machine(_p), pte_val(*(_p)),                 \
   30.97 -        (unsigned long)(_v));                                     \
   30.98 - queue_l1_entry_update((_p),(_v));                                \
   30.99 -})
  30.100 -#endif
  30.101 -#undef queue_l2_entry_update
  30.102 -#define queue_l2_entry_update(_p,_v) ({                           \
  30.103 - update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
  30.104 - update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
  30.105 - update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
  30.106 - update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
  30.107 - printk("L2 %s %d: %p/%08lx (%08lx -> %08lx)\n", __FILE__, __LINE__,  \
  30.108 -        (_p), virt_to_machine(_p), pmd_val(*_p),                  \
  30.109 -        (unsigned long)(_v));                                     \
  30.110 - queue_l2_entry_update((_p),(_v));                                \
  30.111 -})
  30.112 -#define queue_pt_switch(_p) ({                                    \
  30.113 - printk("PTSWITCH %s %d: %08lx\n", __FILE__, __LINE__, (_p));     \
  30.114 - queue_pt_switch(_p);                                             \
  30.115 -})   
  30.116 -#define queue_tlb_flush() ({                                      \
  30.117 - printk("TLB FLUSH %s %d\n", __FILE__, __LINE__);                 \
  30.118 - queue_tlb_flush();                                               \
  30.119 -})   
  30.120 -#define queue_invlpg(_p) ({                                       \
  30.121 - printk("INVLPG %s %d: %08lx\n", __FILE__, __LINE__, (_p));       \
  30.122 - queue_invlpg(_p);                                                \
  30.123 -})   
  30.124 -#define queue_pgd_pin(_p) ({                                      \
  30.125 - printk("PGD PIN %s %d: %08lx/%08lx\n", __FILE__, __LINE__, (_p), \
  30.126 -	phys_to_machine(_p));                                     \
  30.127 - queue_pgd_pin(_p);                                               \
  30.128 -})   
  30.129 -#define queue_pgd_unpin(_p) ({                                    \
  30.130 - printk("PGD UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
  30.131 - queue_pgd_unpin(_p);                                             \
  30.132 -})   
  30.133 -#define queue_pte_pin(_p) ({                                      \
  30.134 - printk("PTE PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));      \
  30.135 - queue_pte_pin(_p);                                               \
  30.136 -})   
  30.137 -#define queue_pte_unpin(_p) ({                                    \
  30.138 - printk("PTE UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
  30.139 - queue_pte_unpin(_p);                                             \
  30.140 -})   
  30.141 -#define queue_set_ldt(_p,_l) ({                                        \
  30.142 - printk("SETL LDT %s %d: %08lx %d\n", __FILE__, __LINE__, (_p), (_l)); \
  30.143 - queue_set_ldt((_p), (_l));                                            \
  30.144 -})   
  30.145 -#endif
  30.146 -
  30.147 -void _flush_page_update_queue(void);
  30.148 -static inline int flush_page_update_queue(void)
  30.149 -{
  30.150 -    unsigned int idx = mmu_update_queue_idx;
  30.151 -    if ( idx != 0 ) _flush_page_update_queue();
  30.152 -    return idx;
  30.153 -}
  30.154 -#define xen_flush_page_update_queue() (_flush_page_update_queue())
  30.155 -void MULTICALL_flush_page_update_queue(void);
  30.156 -
  30.157 -#ifdef CONFIG_XEN_PHYSDEV_ACCESS
  30.158 -/* Allocate a contiguous empty region of low memory. Return virtual start. */
  30.159 -unsigned long allocate_empty_lowmem_region(unsigned long pages);
  30.160 -/* Deallocate a contiguous region of low memory. Return it to the allocator. */
  30.161 -void deallocate_lowmem_region(unsigned long vstart, unsigned long pages);
  30.162 -#endif
  30.163 -
  30.164 -/*
  30.165 - * Assembler stubs for hyper-calls.
  30.166 - */
  30.167 -
  30.168 -static inline int HYPERVISOR_set_trap_table(trap_info_t *table)
  30.169 -{
  30.170 -    int ret;
  30.171 -    __asm__ __volatile__ (
  30.172 -        TRAP_INSTR
  30.173 -        : "=a" (ret) : "0" (__HYPERVISOR_set_trap_table),
  30.174 -        "b" (table) : "memory" );
  30.175 -
  30.176 -    return ret;
  30.177 -}
  30.178 -
  30.179 -static inline int HYPERVISOR_mmu_update(mmu_update_t *req, int count,
  30.180 -					int *success_count)
  30.181 -{
  30.182 -    int ret;
  30.183 -    __asm__ __volatile__ (
  30.184 -        TRAP_INSTR
  30.185 -        : "=a" (ret) : "0" (__HYPERVISOR_mmu_update), 
  30.186 -        "b" (req), "c" (count), "d" (success_count) : "memory" );
  30.187 -
  30.188 -    return ret;
  30.189 -}
  30.190 -
  30.191 -static inline int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
  30.192 -{
  30.193 -    int ret;
  30.194 -    __asm__ __volatile__ (
  30.195 -        TRAP_INSTR
  30.196 -        : "=a" (ret) : "0" (__HYPERVISOR_set_gdt), 
  30.197 -        "b" (frame_list), "c" (entries) : "memory" );
  30.198 -
  30.199 -
  30.200 -    return ret;
  30.201 -}
  30.202 -
  30.203 -static inline int HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
  30.204 -{
  30.205 -    int ret;
  30.206 -    __asm__ __volatile__ (
  30.207 -        TRAP_INSTR
  30.208 -        : "=a" (ret) : "0" (__HYPERVISOR_stack_switch),
  30.209 -        "b" (ss), "c" (esp) : "memory" );
  30.210 -
  30.211 -    return ret;
  30.212 -}
  30.213 -
  30.214 -static inline int HYPERVISOR_set_callbacks(
  30.215 -    unsigned long event_selector, unsigned long event_address,
  30.216 -    unsigned long failsafe_selector, unsigned long failsafe_address)
  30.217 -{
  30.218 -    int ret;
  30.219 -    __asm__ __volatile__ (
  30.220 -        TRAP_INSTR
  30.221 -        : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
  30.222 -        "b" (event_selector), "c" (event_address), 
  30.223 -        "d" (failsafe_selector), "S" (failsafe_address) : "memory" );
  30.224 -
  30.225 -    return ret;
  30.226 -}
  30.227 -
  30.228 -static inline int HYPERVISOR_fpu_taskswitch(void)
  30.229 -{
  30.230 -    int ret;
  30.231 -    __asm__ __volatile__ (
  30.232 -        TRAP_INSTR
  30.233 -        : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
  30.234 -
  30.235 -    return ret;
  30.236 -}
  30.237 -
  30.238 -static inline int HYPERVISOR_yield(void)
  30.239 -{
  30.240 -    int ret;
  30.241 -    __asm__ __volatile__ (
  30.242 -        TRAP_INSTR
  30.243 -        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
  30.244 -        "b" (SCHEDOP_yield) : "memory" );
  30.245 -
  30.246 -    return ret;
  30.247 -}
  30.248 -
  30.249 -static inline int HYPERVISOR_block(void)
  30.250 -{
  30.251 -    int ret;
  30.252 -    __asm__ __volatile__ (
  30.253 -        TRAP_INSTR
  30.254 -        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
  30.255 -        "b" (SCHEDOP_block) : "memory" );
  30.256 -
  30.257 -    return ret;
  30.258 -}
  30.259 -
  30.260 -static inline int HYPERVISOR_shutdown(void)
  30.261 -{
  30.262 -    int ret;
  30.263 -    __asm__ __volatile__ (
  30.264 -        TRAP_INSTR
  30.265 -        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
  30.266 -        "b" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
  30.267 -        : "memory" );
  30.268 -
  30.269 -    return ret;
  30.270 -}
  30.271 -
  30.272 -static inline int HYPERVISOR_reboot(void)
  30.273 -{
  30.274 -    int ret;
  30.275 -    __asm__ __volatile__ (
  30.276 -        TRAP_INSTR
  30.277 -        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
  30.278 -        "b" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
  30.279 -        : "memory" );
  30.280 -
  30.281 -    return ret;
  30.282 -}
  30.283 -
  30.284 -static inline int HYPERVISOR_suspend(unsigned long srec)
  30.285 -{
  30.286 -    int ret;
  30.287 -    /* NB. On suspend, control software expects a suspend record in %esi. */
  30.288 -    __asm__ __volatile__ (
  30.289 -        TRAP_INSTR
  30.290 -        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
  30.291 -        "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)), 
  30.292 -        "S" (srec) : "memory" );
  30.293 -
  30.294 -    return ret;
  30.295 -}
  30.296 -
  30.297 -static inline long HYPERVISOR_set_timer_op(u64 timeout)
  30.298 -{
  30.299 -    int ret;
  30.300 -    unsigned long timeout_hi = (unsigned long)(timeout>>32);
  30.301 -    unsigned long timeout_lo = (unsigned long)timeout;
  30.302 -    __asm__ __volatile__ (
  30.303 -        TRAP_INSTR
  30.304 -        : "=a" (ret) : "0" (__HYPERVISOR_set_timer_op),
  30.305 -        "b" (timeout_hi), "c" (timeout_lo) : "memory" );
  30.306 -
  30.307 -    return ret;
  30.308 -}
  30.309 -
  30.310 -static inline int HYPERVISOR_dom0_op(dom0_op_t *dom0_op)
  30.311 -{
  30.312 -    int ret;
  30.313 -    dom0_op->interface_version = DOM0_INTERFACE_VERSION;
  30.314 -    __asm__ __volatile__ (
  30.315 -        TRAP_INSTR
  30.316 -        : "=a" (ret) : "0" (__HYPERVISOR_dom0_op),
  30.317 -        "b" (dom0_op) : "memory" );
  30.318 -
  30.319 -    return ret;
  30.320 -}
  30.321 -
  30.322 -static inline int HYPERVISOR_set_debugreg(int reg, unsigned long value)
  30.323 -{
  30.324 -    int ret;
  30.325 -    __asm__ __volatile__ (
  30.326 -        TRAP_INSTR
  30.327 -        : "=a" (ret) : "0" (__HYPERVISOR_set_debugreg),
  30.328 -        "b" (reg), "c" (value) : "memory" );
  30.329 -
  30.330 -    return ret;
  30.331 -}
  30.332 -
  30.333 -static inline unsigned long HYPERVISOR_get_debugreg(int reg)
  30.334 -{
  30.335 -    unsigned long ret;
  30.336 -    __asm__ __volatile__ (
  30.337 -        TRAP_INSTR
  30.338 -        : "=a" (ret) : "0" (__HYPERVISOR_get_debugreg),
  30.339 -        "b" (reg) : "memory" );
  30.340 -
  30.341 -    return ret;
  30.342 -}
  30.343 -
  30.344 -static inline int HYPERVISOR_update_descriptor(
  30.345 -    unsigned long ma, unsigned long word1, unsigned long word2)
  30.346 -{
  30.347 -    int ret;
  30.348 -    __asm__ __volatile__ (
  30.349 -        TRAP_INSTR
  30.350 -        : "=a" (ret) : "0" (__HYPERVISOR_update_descriptor), 
  30.351 -        "b" (ma), "c" (word1), "d" (word2) : "memory" );
  30.352 -
  30.353 -    return ret;
  30.354 -}
  30.355 -
  30.356 -static inline int HYPERVISOR_set_fast_trap(int idx)
  30.357 -{
  30.358 -    int ret;
  30.359 -    __asm__ __volatile__ (
  30.360 -        TRAP_INSTR
  30.361 -        : "=a" (ret) : "0" (__HYPERVISOR_set_fast_trap), 
  30.362 -        "b" (idx) : "memory" );
  30.363 -
  30.364 -    return ret;
  30.365 -}
  30.366 -
  30.367 -static inline int HYPERVISOR_dom_mem_op(unsigned int   op,
  30.368 -                                        unsigned long *pages,
  30.369 -                                        unsigned long  nr_pages)
  30.370 -{
  30.371 -    int ret;
  30.372 -    __asm__ __volatile__ (
  30.373 -        TRAP_INSTR
  30.374 -        : "=a" (ret) : "0" (__HYPERVISOR_dom_mem_op),
  30.375 -        "b" (op), "c" (pages), "d" (nr_pages) : "memory" );
  30.376 -
  30.377 -    return ret;
  30.378 -}
  30.379 -
  30.380 -static inline int HYPERVISOR_multicall(void *call_list, int nr_calls)
  30.381 -{
  30.382 -    int ret;
  30.383 -    __asm__ __volatile__ (
  30.384 -        TRAP_INSTR
  30.385 -        : "=a" (ret) : "0" (__HYPERVISOR_multicall),
  30.386 -        "b" (call_list), "c" (nr_calls) : "memory" );
  30.387 -
  30.388 -    return ret;
  30.389 -}
  30.390 -
  30.391 -static inline int HYPERVISOR_update_va_mapping(
  30.392 -    unsigned long page_nr, pte_t new_val, unsigned long flags)
  30.393 -{
  30.394 -    int ret;
  30.395 -    __asm__ __volatile__ (
  30.396 -        TRAP_INSTR
  30.397 -        : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping), 
  30.398 -        "b" (page_nr), "c" ((new_val).pte_low), "d" (flags) : "memory" );
  30.399 -
  30.400 -    if ( unlikely(ret < 0) )
  30.401 -    {
  30.402 -        printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
  30.403 -               page_nr, (new_val).pte_low, flags);
  30.404 -        BUG();
  30.405 -    }
  30.406 -
  30.407 -    return ret;
  30.408 -}
  30.409 -
  30.410 -static inline int HYPERVISOR_event_channel_op(void *op)
  30.411 -{
  30.412 -    int ret;
  30.413 -    __asm__ __volatile__ (
  30.414 -        TRAP_INSTR
  30.415 -        : "=a" (ret) : "0" (__HYPERVISOR_event_channel_op),
  30.416 -        "b" (op) : "memory" );
  30.417 -
  30.418 -    return ret;
  30.419 -}
  30.420 -
  30.421 -static inline int HYPERVISOR_xen_version(int cmd)
  30.422 -{
  30.423 -    int ret;
  30.424 -    __asm__ __volatile__ (
  30.425 -        TRAP_INSTR
  30.426 -        : "=a" (ret) : "0" (__HYPERVISOR_xen_version), 
  30.427 -        "b" (cmd) : "memory" );
  30.428 -
  30.429 -    return ret;
  30.430 -}
  30.431 -
  30.432 -static inline int HYPERVISOR_console_io(int cmd, int count, char *str)
  30.433 -{
  30.434 -    int ret;
  30.435 -    __asm__ __volatile__ (
  30.436 -        TRAP_INSTR
  30.437 -        : "=a" (ret) : "0" (__HYPERVISOR_console_io),
  30.438 -        "b" (cmd), "c" (count), "d" (str) : "memory" );
  30.439 -
  30.440 -    return ret;
  30.441 -}
  30.442 -
  30.443 -static inline int HYPERVISOR_physdev_op(void *physdev_op)
  30.444 -{
  30.445 -    int ret;
  30.446 -    __asm__ __volatile__ (
  30.447 -        TRAP_INSTR
  30.448 -        : "=a" (ret) : "0" (__HYPERVISOR_physdev_op),
  30.449 -        "b" (physdev_op) : "memory" );
  30.450 -
  30.451 -    return ret;
  30.452 -}
  30.453 -
  30.454 -static inline int HYPERVISOR_update_va_mapping_otherdomain(
  30.455 -    unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
  30.456 -{
  30.457 -    int ret;
  30.458 -    __asm__ __volatile__ (
  30.459 -        TRAP_INSTR
  30.460 -        : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping_otherdomain), 
  30.461 -        "b" (page_nr), "c" ((new_val).pte_low), "d" (flags), "S" (domid) :
  30.462 -        "memory" );
  30.463 -    
  30.464 -    return ret;
  30.465 -}
  30.466 -
  30.467 -#endif /* __HYPERVISOR_H__ */
    31.1 --- a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/msr.h	Mon Aug 02 10:30:38 2004 +0000
    31.2 +++ b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/msr.h	Mon Aug 02 14:19:48 2004 +0000
    31.3 @@ -2,7 +2,7 @@
    31.4  #define __ASM_MSR_H
    31.5  
    31.6  #include <linux/smp.h>
    31.7 -#include <asm/hypervisor.h>
    31.8 +#include <asm-xen/hypervisor.h>
    31.9  
   31.10  /*
   31.11   * Access to machine-specific registers (available on 586 and better only)
    32.1 --- a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Mon Aug 02 10:30:38 2004 +0000
    32.2 +++ b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Mon Aug 02 14:19:48 2004 +0000
    32.3 @@ -2,7 +2,7 @@
    32.4  #define _I386_PGTABLE_H
    32.5  
    32.6  #include <linux/config.h>
    32.7 -#include <asm/hypervisor.h>
    32.8 +#include <asm-xen/hypervisor.h>
    32.9  
   32.10  /*
   32.11   * The Linux memory management assumes a three-level page table setup. On
    33.1 --- a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/system.h	Mon Aug 02 10:30:38 2004 +0000
    33.2 +++ b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/system.h	Mon Aug 02 14:19:48 2004 +0000
    33.3 @@ -7,7 +7,7 @@
    33.4  #include <asm/synch_bitops.h>
    33.5  #include <asm/segment.h>
    33.6  #include <asm/cpufeature.h>
    33.7 -#include <asm/hypervisor.h>
    33.8 +#include <asm-xen/hypervisor.h>
    33.9  #include <asm-xen/evtchn.h>
   33.10  
   33.11  #ifdef __KERNEL__
    34.1 --- a/linux-2.6.7-xen-sparse/include/asm-xen/ctrl_if.h	Mon Aug 02 10:30:38 2004 +0000
    34.2 +++ b/linux-2.6.7-xen-sparse/include/asm-xen/ctrl_if.h	Mon Aug 02 14:19:48 2004 +0000
    34.3 @@ -9,7 +9,12 @@
    34.4  #ifndef __ASM_XEN__CTRL_IF_H__
    34.5  #define __ASM_XEN__CTRL_IF_H__
    34.6  
    34.7 -#include <asm/hypervisor.h>
    34.8 +#include <asm-xen/hypervisor.h>
    34.9 +
   34.10 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
   34.11 +#include <linux/tqueue.h>
   34.12 +#define work_struct tq_struct
   34.13 +#endif
   34.14  
   34.15  typedef control_msg_t ctrl_msg_t;
   34.16  
    35.1 --- a/linux-2.6.7-xen-sparse/include/asm-xen/evtchn.h	Mon Aug 02 10:30:38 2004 +0000
    35.2 +++ b/linux-2.6.7-xen-sparse/include/asm-xen/evtchn.h	Mon Aug 02 14:19:48 2004 +0000
    35.3 @@ -11,7 +11,7 @@
    35.4  #define __ASM_EVTCHN_H__
    35.5  
    35.6  #include <linux/config.h>
    35.7 -#include <asm/hypervisor.h>
    35.8 +#include <asm-xen/hypervisor.h>
    35.9  #include <asm/ptrace.h>
   35.10  #include <asm/synch_bitops.h>
   35.11  #include <asm/hypervisor-ifs/event_channel.h>
    36.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    36.2 +++ b/linux-2.6.7-xen-sparse/include/asm-xen/hypervisor.h	Mon Aug 02 14:19:48 2004 +0000
    36.3 @@ -0,0 +1,468 @@
    36.4 +/******************************************************************************
    36.5 + * hypervisor.h
    36.6 + * 
    36.7 + * Linux-specific hypervisor handling.
    36.8 + * 
    36.9 + * Copyright (c) 2002, K A Fraser
   36.10 + */
   36.11 +
   36.12 +#ifndef __HYPERVISOR_H__
   36.13 +#define __HYPERVISOR_H__
   36.14 +
   36.15 +#include <linux/types.h>
   36.16 +#include <linux/kernel.h>
   36.17 +#include <linux/version.h>
   36.18 +#include <asm/hypervisor-ifs/hypervisor-if.h>
   36.19 +#include <asm/hypervisor-ifs/dom0_ops.h>
   36.20 +#include <asm/hypervisor-ifs/io/domain_controller.h>
   36.21 +#include <asm/ptrace.h>
   36.22 +#include <asm/page.h>
   36.23 +
   36.24 +/* arch/xen/i386/kernel/setup.c */
   36.25 +union start_info_union
   36.26 +{
   36.27 +    extended_start_info_t start_info;
   36.28 +    char padding[512];
   36.29 +};
   36.30 +extern union start_info_union start_info_union;
   36.31 +#define start_info (start_info_union.start_info)
   36.32 +
   36.33 +/* arch/xen/kernel/process.c */
   36.34 +void xen_cpu_idle (void);
   36.35 +
   36.36 +/* arch/xen/i386/kernel/hypervisor.c */
   36.37 +void do_hypervisor_callback(struct pt_regs *regs);
   36.38 +
   36.39 +/* arch/xen/i386/mm/init.c */
   36.40 +void wrprotect_bootpt(pgd_t *, void *, int);
   36.41 +
   36.42 +/* arch/xen/i386/kernel/head.S */
   36.43 +void lgdt_finish(void);
   36.44 +
   36.45 +/* arch/xen/i386/mm/hypervisor.c */
   36.46 +/*
   36.47 + * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
   36.48 + * be MACHINE addresses.
   36.49 + */
   36.50 +
   36.51 +extern unsigned int mmu_update_queue_idx;
   36.52 +
   36.53 +void queue_l1_entry_update(pte_t *ptr, unsigned long val);
   36.54 +void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
   36.55 +void queue_pt_switch(unsigned long ptr);
   36.56 +void queue_tlb_flush(void);
   36.57 +void queue_invlpg(unsigned long ptr);
   36.58 +void queue_pgd_pin(unsigned long ptr);
   36.59 +void queue_pgd_unpin(unsigned long ptr);
   36.60 +void queue_pte_pin(unsigned long ptr);
   36.61 +void queue_pte_unpin(unsigned long ptr);
   36.62 +void queue_set_ldt(unsigned long ptr, unsigned long bytes);
   36.63 +void queue_machphys_update(unsigned long mfn, unsigned long pfn);
   36.64 +#define MMU_UPDATE_DEBUG 0
   36.65 +
   36.66 +#if MMU_UPDATE_DEBUG > 0
   36.67 +typedef struct {
   36.68 +    void *ptr;
   36.69 +    unsigned long val, pteval;
   36.70 +    void *ptep;
   36.71 +    int line; char *file;
   36.72 +} page_update_debug_t;
   36.73 +extern page_update_debug_t update_debug_queue[];
   36.74 +#define queue_l1_entry_update(_p,_v) ({                           \
   36.75 + update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
   36.76 + update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
   36.77 + update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
   36.78 + update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
   36.79 + queue_l1_entry_update((_p),(_v));                                \
   36.80 +})
   36.81 +#define queue_l2_entry_update(_p,_v) ({                           \
   36.82 + update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
   36.83 + update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
   36.84 + update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
   36.85 + update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
   36.86 + queue_l2_entry_update((_p),(_v));                                \
   36.87 +})
   36.88 +#endif
   36.89 +
   36.90 +#if MMU_UPDATE_DEBUG > 1
   36.91 +#if MMU_UPDATE_DEBUG > 2
   36.92 +#undef queue_l1_entry_update
   36.93 +#define queue_l1_entry_update(_p,_v) ({                           \
   36.94 + update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
   36.95 + update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
   36.96 + update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
   36.97 + update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
   36.98 + printk("L1 %s %d: %p/%08lx (%08lx -> %08lx)\n", __FILE__, __LINE__,  \
   36.99 +        (_p), virt_to_machine(_p), pte_val(*(_p)),                 \
  36.100 +        (unsigned long)(_v));                                     \
  36.101 + queue_l1_entry_update((_p),(_v));                                \
  36.102 +})
  36.103 +#endif
  36.104 +#undef queue_l2_entry_update
  36.105 +#define queue_l2_entry_update(_p,_v) ({                           \
  36.106 + update_debug_queue[mmu_update_queue_idx].ptr  = (_p);             \
  36.107 + update_debug_queue[mmu_update_queue_idx].val  = (_v);             \
  36.108 + update_debug_queue[mmu_update_queue_idx].line = __LINE__;         \
  36.109 + update_debug_queue[mmu_update_queue_idx].file = __FILE__;         \
  36.110 + printk("L2 %s %d: %p/%08lx (%08lx -> %08lx)\n", __FILE__, __LINE__,  \
  36.111 +        (_p), virt_to_machine(_p), pmd_val(*_p),                  \
  36.112 +        (unsigned long)(_v));                                     \
  36.113 + queue_l2_entry_update((_p),(_v));                                \
  36.114 +})
  36.115 +#define queue_pt_switch(_p) ({                                    \
  36.116 + printk("PTSWITCH %s %d: %08lx\n", __FILE__, __LINE__, (_p));     \
  36.117 + queue_pt_switch(_p);                                             \
  36.118 +})   
  36.119 +#define queue_tlb_flush() ({                                      \
  36.120 + printk("TLB FLUSH %s %d\n", __FILE__, __LINE__);                 \
  36.121 + queue_tlb_flush();                                               \
  36.122 +})   
  36.123 +#define queue_invlpg(_p) ({                                       \
  36.124 + printk("INVLPG %s %d: %08lx\n", __FILE__, __LINE__, (_p));       \
  36.125 + queue_invlpg(_p);                                                \
  36.126 +})   
  36.127 +#define queue_pgd_pin(_p) ({                                      \
  36.128 + printk("PGD PIN %s %d: %08lx/%08lx\n", __FILE__, __LINE__, (_p), \
  36.129 +	phys_to_machine(_p));                                     \
  36.130 + queue_pgd_pin(_p);                                               \
  36.131 +})   
  36.132 +#define queue_pgd_unpin(_p) ({                                    \
  36.133 + printk("PGD UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
  36.134 + queue_pgd_unpin(_p);                                             \
  36.135 +})   
  36.136 +#define queue_pte_pin(_p) ({                                      \
  36.137 + printk("PTE PIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));      \
  36.138 + queue_pte_pin(_p);                                               \
  36.139 +})   
  36.140 +#define queue_pte_unpin(_p) ({                                    \
  36.141 + printk("PTE UNPIN %s %d: %08lx\n", __FILE__, __LINE__, (_p));    \
  36.142 + queue_pte_unpin(_p);                                             \
  36.143 +})   
  36.144 +#define queue_set_ldt(_p,_l) ({                                        \
  36.145 + printk("SETL LDT %s %d: %08lx %d\n", __FILE__, __LINE__, (_p), (_l)); \
  36.146 + queue_set_ldt((_p), (_l));                                            \
  36.147 +})   
  36.148 +#endif
  36.149 +
  36.150 +void _flush_page_update_queue(void);
  36.151 +static inline int flush_page_update_queue(void)
  36.152 +{
  36.153 +    unsigned int idx = mmu_update_queue_idx;
  36.154 +    if ( idx != 0 ) _flush_page_update_queue();
  36.155 +    return idx;
  36.156 +}
  36.157 +#define xen_flush_page_update_queue() (_flush_page_update_queue())
  36.158 +#define XEN_flush_page_update_queue() (_flush_page_update_queue())
  36.159 +void MULTICALL_flush_page_update_queue(void);
  36.160 +
  36.161 +#ifdef CONFIG_XEN_PHYSDEV_ACCESS
  36.162 +/* Allocate a contiguous empty region of low memory. Return virtual start. */
  36.163 +unsigned long allocate_empty_lowmem_region(unsigned long pages);
  36.164 +/* Deallocate a contiguous region of low memory. Return it to the allocator. */
  36.165 +void deallocate_lowmem_region(unsigned long vstart, unsigned long pages);
  36.166 +#endif
  36.167 +
  36.168 +/*
  36.169 + * Assembler stubs for hyper-calls.
  36.170 + */
  36.171 +
  36.172 +static inline int HYPERVISOR_set_trap_table(trap_info_t *table)
  36.173 +{
  36.174 +    int ret;
  36.175 +    __asm__ __volatile__ (
  36.176 +        TRAP_INSTR
  36.177 +        : "=a" (ret) : "0" (__HYPERVISOR_set_trap_table),
  36.178 +        "b" (table) : "memory" );
  36.179 +
  36.180 +    return ret;
  36.181 +}
  36.182 +
  36.183 +static inline int HYPERVISOR_mmu_update(mmu_update_t *req, int count,
  36.184 +					int *success_count)
  36.185 +{
  36.186 +    int ret;
  36.187 +    __asm__ __volatile__ (
  36.188 +        TRAP_INSTR
  36.189 +        : "=a" (ret) : "0" (__HYPERVISOR_mmu_update), 
  36.190 +        "b" (req), "c" (count), "d" (success_count) : "memory" );
  36.191 +
  36.192 +    return ret;
  36.193 +}
  36.194 +
  36.195 +static inline int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
  36.196 +{
  36.197 +    int ret;
  36.198 +    __asm__ __volatile__ (
  36.199 +        TRAP_INSTR
  36.200 +        : "=a" (ret) : "0" (__HYPERVISOR_set_gdt), 
  36.201 +        "b" (frame_list), "c" (entries) : "memory" );
  36.202 +
  36.203 +
  36.204 +    return ret;
  36.205 +}
  36.206 +
  36.207 +static inline int HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
  36.208 +{
  36.209 +    int ret;
  36.210 +    __asm__ __volatile__ (
  36.211 +        TRAP_INSTR
  36.212 +        : "=a" (ret) : "0" (__HYPERVISOR_stack_switch),
  36.213 +        "b" (ss), "c" (esp) : "memory" );
  36.214 +
  36.215 +    return ret;
  36.216 +}
  36.217 +
  36.218 +static inline int HYPERVISOR_set_callbacks(
  36.219 +    unsigned long event_selector, unsigned long event_address,
  36.220 +    unsigned long failsafe_selector, unsigned long failsafe_address)
  36.221 +{
  36.222 +    int ret;
  36.223 +    __asm__ __volatile__ (
  36.224 +        TRAP_INSTR
  36.225 +        : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
  36.226 +        "b" (event_selector), "c" (event_address), 
  36.227 +        "d" (failsafe_selector), "S" (failsafe_address) : "memory" );
  36.228 +
  36.229 +    return ret;
  36.230 +}
  36.231 +
  36.232 +static inline int HYPERVISOR_fpu_taskswitch(void)
  36.233 +{
  36.234 +    int ret;
  36.235 +    __asm__ __volatile__ (
  36.236 +        TRAP_INSTR
  36.237 +        : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
  36.238 +
  36.239 +    return ret;
  36.240 +}
  36.241 +
  36.242 +static inline int HYPERVISOR_yield(void)
  36.243 +{
  36.244 +    int ret;
  36.245 +    __asm__ __volatile__ (
  36.246 +        TRAP_INSTR
  36.247 +        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
  36.248 +        "b" (SCHEDOP_yield) : "memory" );
  36.249 +
  36.250 +    return ret;
  36.251 +}
  36.252 +
  36.253 +static inline int HYPERVISOR_block(void)
  36.254 +{
  36.255 +    int ret;
  36.256 +    __asm__ __volatile__ (
  36.257 +        TRAP_INSTR
  36.258 +        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
  36.259 +        "b" (SCHEDOP_block) : "memory" );
  36.260 +
  36.261 +    return ret;
  36.262 +}
  36.263 +
  36.264 +static inline int HYPERVISOR_shutdown(void)
  36.265 +{
  36.266 +    int ret;
  36.267 +    __asm__ __volatile__ (
  36.268 +        TRAP_INSTR
  36.269 +        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
  36.270 +        "b" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
  36.271 +        : "memory" );
  36.272 +
  36.273 +    return ret;
  36.274 +}
  36.275 +
  36.276 +static inline int HYPERVISOR_reboot(void)
  36.277 +{
  36.278 +    int ret;
  36.279 +    __asm__ __volatile__ (
  36.280 +        TRAP_INSTR
  36.281 +        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
  36.282 +        "b" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
  36.283 +        : "memory" );
  36.284 +
  36.285 +    return ret;
  36.286 +}
  36.287 +
  36.288 +static inline int HYPERVISOR_suspend(unsigned long srec)
  36.289 +{
  36.290 +    int ret;
  36.291 +    /* NB. On suspend, control software expects a suspend record in %esi. */
  36.292 +    __asm__ __volatile__ (
  36.293 +        TRAP_INSTR
  36.294 +        : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
  36.295 +        "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)), 
  36.296 +        "S" (srec) : "memory" );
  36.297 +
  36.298 +    return ret;
  36.299 +}
  36.300 +
  36.301 +static inline long HYPERVISOR_set_timer_op(u64 timeout)
  36.302 +{
  36.303 +    int ret;
  36.304 +    unsigned long timeout_hi = (unsigned long)(timeout>>32);
  36.305 +    unsigned long timeout_lo = (unsigned long)timeout;
  36.306 +    __asm__ __volatile__ (
  36.307 +        TRAP_INSTR
  36.308 +        : "=a" (ret) : "0" (__HYPERVISOR_set_timer_op),
  36.309 +        "b" (timeout_hi), "c" (timeout_lo) : "memory" );
  36.310 +
  36.311 +    return ret;
  36.312 +}
  36.313 +
  36.314 +static inline int HYPERVISOR_dom0_op(dom0_op_t *dom0_op)
  36.315 +{
  36.316 +    int ret;
  36.317 +    dom0_op->interface_version = DOM0_INTERFACE_VERSION;
  36.318 +    __asm__ __volatile__ (
  36.319 +        TRAP_INSTR
  36.320 +        : "=a" (ret) : "0" (__HYPERVISOR_dom0_op),
  36.321 +        "b" (dom0_op) : "memory" );
  36.322 +
  36.323 +    return ret;
  36.324 +}
  36.325 +
  36.326 +static inline int HYPERVISOR_set_debugreg(int reg, unsigned long value)
  36.327 +{
  36.328 +    int ret;
  36.329 +    __asm__ __volatile__ (
  36.330 +        TRAP_INSTR
  36.331 +        : "=a" (ret) : "0" (__HYPERVISOR_set_debugreg),
  36.332 +        "b" (reg), "c" (value) : "memory" );
  36.333 +
  36.334 +    return ret;
  36.335 +}
  36.336 +
  36.337 +static inline unsigned long HYPERVISOR_get_debugreg(int reg)
  36.338 +{
  36.339 +    unsigned long ret;
  36.340 +    __asm__ __volatile__ (
  36.341 +        TRAP_INSTR
  36.342 +        : "=a" (ret) : "0" (__HYPERVISOR_get_debugreg),
  36.343 +        "b" (reg) : "memory" );
  36.344 +
  36.345 +    return ret;
  36.346 +}
  36.347 +
  36.348 +static inline int HYPERVISOR_update_descriptor(
  36.349 +    unsigned long ma, unsigned long word1, unsigned long word2)
  36.350 +{
  36.351 +    int ret;
  36.352 +    __asm__ __volatile__ (
  36.353 +        TRAP_INSTR
  36.354 +        : "=a" (ret) : "0" (__HYPERVISOR_update_descriptor), 
  36.355 +        "b" (ma), "c" (word1), "d" (word2) : "memory" );
  36.356 +
  36.357 +    return ret;
  36.358 +}
  36.359 +
  36.360 +static inline int HYPERVISOR_set_fast_trap(int idx)
  36.361 +{
  36.362 +    int ret;
  36.363 +    __asm__ __volatile__ (
  36.364 +        TRAP_INSTR
  36.365 +        : "=a" (ret) : "0" (__HYPERVISOR_set_fast_trap), 
  36.366 +        "b" (idx) : "memory" );
  36.367 +
  36.368 +    return ret;
  36.369 +}
  36.370 +
  36.371 +static inline int HYPERVISOR_dom_mem_op(unsigned int   op,
  36.372 +                                        unsigned long *pages,
  36.373 +                                        unsigned long  nr_pages)
  36.374 +{
  36.375 +    int ret;
  36.376 +    __asm__ __volatile__ (
  36.377 +        TRAP_INSTR
  36.378 +        : "=a" (ret) : "0" (__HYPERVISOR_dom_mem_op),
  36.379 +        "b" (op), "c" (pages), "d" (nr_pages) : "memory" );
  36.380 +
  36.381 +    return ret;
  36.382 +}
  36.383 +
  36.384 +static inline int HYPERVISOR_multicall(void *call_list, int nr_calls)
  36.385 +{
  36.386 +    int ret;
  36.387 +    __asm__ __volatile__ (
  36.388 +        TRAP_INSTR
  36.389 +        : "=a" (ret) : "0" (__HYPERVISOR_multicall),
  36.390 +        "b" (call_list), "c" (nr_calls) : "memory" );
  36.391 +
  36.392 +    return ret;
  36.393 +}
  36.394 +
  36.395 +static inline int HYPERVISOR_update_va_mapping(
  36.396 +    unsigned long page_nr, pte_t new_val, unsigned long flags)
  36.397 +{
  36.398 +    int ret;
  36.399 +    __asm__ __volatile__ (
  36.400 +        TRAP_INSTR
  36.401 +        : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping), 
  36.402 +        "b" (page_nr), "c" ((new_val).pte_low), "d" (flags) : "memory" );
  36.403 +
  36.404 +    if ( unlikely(ret < 0) )
  36.405 +    {
  36.406 +        printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
  36.407 +               page_nr, (new_val).pte_low, flags);
  36.408 +        BUG();
  36.409 +    }
  36.410 +
  36.411 +    return ret;
  36.412 +}
  36.413 +
  36.414 +static inline int HYPERVISOR_event_channel_op(void *op)
  36.415 +{
  36.416 +    int ret;
  36.417 +    __asm__ __volatile__ (
  36.418 +        TRAP_INSTR
  36.419 +        : "=a" (ret) : "0" (__HYPERVISOR_event_channel_op),
  36.420 +        "b" (op) : "memory" );
  36.421 +
  36.422 +    return ret;
  36.423 +}
  36.424 +
  36.425 +static inline int HYPERVISOR_xen_version(int cmd)
  36.426 +{
  36.427 +    int ret;
  36.428 +    __asm__ __volatile__ (
  36.429 +        TRAP_INSTR
  36.430 +        : "=a" (ret) : "0" (__HYPERVISOR_xen_version), 
  36.431 +        "b" (cmd) : "memory" );
  36.432 +
  36.433 +    return ret;
  36.434 +}
  36.435 +
  36.436 +static inline int HYPERVISOR_console_io(int cmd, int count, char *str)
  36.437 +{
  36.438 +    int ret;
  36.439 +    __asm__ __volatile__ (
  36.440 +        TRAP_INSTR
  36.441 +        : "=a" (ret) : "0" (__HYPERVISOR_console_io),
  36.442 +        "b" (cmd), "c" (count), "d" (str) : "memory" );
  36.443 +
  36.444 +    return ret;
  36.445 +}
  36.446 +
  36.447 +static inline int HYPERVISOR_physdev_op(void *physdev_op)
  36.448 +{
  36.449 +    int ret;
  36.450 +    __asm__ __volatile__ (
  36.451 +        TRAP_INSTR
  36.452 +        : "=a" (ret) : "0" (__HYPERVISOR_physdev_op),
  36.453 +        "b" (physdev_op) : "memory" );
  36.454 +
  36.455 +    return ret;
  36.456 +}
  36.457 +
  36.458 +static inline int HYPERVISOR_update_va_mapping_otherdomain(
  36.459 +    unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
  36.460 +{
  36.461 +    int ret;
  36.462 +    __asm__ __volatile__ (
  36.463 +        TRAP_INSTR
  36.464 +        : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping_otherdomain), 
  36.465 +        "b" (page_nr), "c" ((new_val).pte_low), "d" (flags), "S" (domid) :
  36.466 +        "memory" );
  36.467 +    
  36.468 +    return ret;
  36.469 +}
  36.470 +
  36.471 +#endif /* __HYPERVISOR_H__ */
    37.1 --- a/linux-2.6.7-xen-sparse/include/asm-xen/multicall.h	Mon Aug 02 10:30:38 2004 +0000
    37.2 +++ b/linux-2.6.7-xen-sparse/include/asm-xen/multicall.h	Mon Aug 02 14:19:48 2004 +0000
    37.3 @@ -5,7 +5,7 @@
    37.4  #ifndef __MULTICALL_H__
    37.5  #define __MULTICALL_H__
    37.6  
    37.7 -#include <asm/hypervisor.h>
    37.8 +#include <asm-xen/hypervisor.h>
    37.9  
   37.10  extern multicall_entry_t multicall_list[];
   37.11  extern int nr_multicall_ents;
    38.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    38.2 +++ b/linux-2.6.7-xen-sparse/include/asm-xen/suspend.h	Mon Aug 02 14:19:48 2004 +0000
    38.3 @@ -0,0 +1,25 @@
    38.4 +/******************************************************************************
    38.5 + * suspend.h
    38.6 + * 
    38.7 + * NB. This file is part of the Xenolinux interface with Xenoserver control 
    38.8 + * software. It can be included in such software without invoking the GPL.
    38.9 + * 
   38.10 + * Copyright (c) 2003, K A Fraser
   38.11 + */
   38.12 +
   38.13 +#ifndef __ASM_XEN_SUSPEND_H__
   38.14 +#define __ASM_XEN_SUSPEND_H__
   38.15 +
   38.16 +typedef struct suspend_record_st {
   38.17 +    /* To be filled in before resume. */
   38.18 +    extended_start_info_t resume_info;
   38.19 +    /*
   38.20 +     * The number of a machine frame containing, in sequence, the number of
   38.21 +     * each machine frame that contains PFN -> MFN translation table data.
   38.22 +     */
   38.23 +    unsigned long pfn_to_mfn_frame_list;
   38.24 +    /* Number of entries in the PFN -> MFN translation table. */
   38.25 +    unsigned long nr_pfns;
   38.26 +} suspend_record_t;
   38.27 +
   38.28 +#endif /* __ASM_XEN_SUSPEND_H__ */
    39.1 --- a/linux-2.6.7-xen-sparse/include/asm-xen/xen.h	Mon Aug 02 10:30:38 2004 +0000
    39.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    39.3 @@ -1,3 +0,0 @@
    39.4 -
    39.5 -/* arch/xen/kernel/process.c */
    39.6 -void xen_cpu_idle (void);
    40.1 --- a/xen/common/dom_mem_ops.c	Mon Aug 02 10:30:38 2004 +0000
    40.2 +++ b/xen/common/dom_mem_ops.c	Mon Aug 02 14:19:48 2004 +0000
    40.3 @@ -3,7 +3,7 @@
    40.4   *
    40.5   * Code to handle memory related requests from domains eg. balloon driver.
    40.6   *
    40.7 - * Copyright (c) 2003, B Dragovic & K A Fraser.
    40.8 + * Copyright (c) 2003-2004, B Dragovic & K A Fraser.
    40.9   */
   40.10  
   40.11  #include <xen/config.h>