ia64/xen-unstable

changeset 7140:61b3b357d827

Merge xen-unstable into xen-ia64-unstable
author djm@kirby.fc.hp.com
date Thu Sep 29 17:28:44 2005 -0600 (2005-09-29)
parents 93e27f7ca8a8 d88e98fd4f7a
children c22741d000a5
files .hgignore Makefile buildconfigs/Rules.mk docs/src/user/installation.tex linux-2.6-xen-sparse/arch/ia64/Kconfig linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_ia64 linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c linux-2.6-xen-sparse/arch/xen/kernel/devmem.c linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c linux-2.6-xen-sparse/arch/xen/kernel/fixup.c linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c linux-2.6-xen-sparse/arch/xen/kernel/reboot.c linux-2.6-xen-sparse/arch/xen/kernel/smp.c linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/entry.S linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6-xen-sparse/drivers/xen/blkback/common.h linux-2.6-xen-sparse/drivers/xen/blkback/interface.c linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6-xen-sparse/drivers/xen/blkfront/block.h linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c linux-2.6-xen-sparse/drivers/xen/blktap/common.h linux-2.6-xen-sparse/drivers/xen/blktap/interface.c linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c linux-2.6-xen-sparse/drivers/xen/netback/common.h linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6-xen-sparse/drivers/xen/tpmback/common.h linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypervisor.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h linux-2.6-xen-sparse/include/asm-xen/balloon.h linux-2.6-xen-sparse/include/asm-xen/driver_util.h linux-2.6-xen-sparse/include/asm-xen/evtchn.h linux-2.6-xen-sparse/include/asm-xen/foreign_page.h linux-2.6-xen-sparse/include/asm-xen/gnttab.h linux-2.6-xen-sparse/include/asm-xen/xen_proc.h linux-2.6-xen-sparse/include/asm-xen/xenbus.h tools/check/check_hotplug tools/console/daemon/io.c tools/debugger/gdb/README tools/examples/Makefile tools/examples/xmexample.vmx tools/firmware/vmxassist/Makefile tools/firmware/vmxassist/gen.c tools/firmware/vmxassist/head.S tools/firmware/vmxassist/setup.c tools/firmware/vmxassist/vm86.c tools/firmware/vmxassist/vmxloader.c tools/ioemu/hw/cirrus_vga.c tools/ioemu/hw/pc.c tools/ioemu/hw/vga.c tools/ioemu/target-i386-dm/helper2.c tools/ioemu/vl.c tools/libxc/xc_vmx_build.c tools/libxc/xenguest.h tools/libxc/xg_private.h tools/python/xen/lowlevel/xc/xc.c tools/python/xen/lowlevel/xs/xs.c tools/python/xen/web/SrvBase.py tools/python/xen/web/http.py tools/python/xen/xend/PrettyPrint.py tools/python/xen/xend/XendCheckpoint.py tools/python/xen/xend/XendClient.py tools/python/xen/xend/XendDomain.py tools/python/xen/xend/XendDomainInfo.py tools/python/xen/xend/image.py tools/python/xen/xend/server/DevController.py tools/python/xen/xend/server/SrvDaemon.py tools/python/xen/xend/server/SrvDmesg.py tools/python/xen/xend/server/SrvDomain.py tools/python/xen/xend/server/SrvDomainDir.py tools/python/xen/xend/server/SrvNode.py tools/python/xen/xend/xenstore/xsnode.py tools/python/xen/xend/xenstore/xstransact.py tools/python/xen/xm/main.py tools/xenstore/Makefile tools/xenstore/speedtest.c tools/xenstore/tdb.c tools/xenstore/tdb.h tools/xenstore/testsuite/04rm.test tools/xenstore/testsuite/08transaction.slowtest tools/xenstore/testsuite/08transaction.test tools/xenstore/testsuite/12readonly.test tools/xenstore/testsuite/14complexperms.test tools/xenstore/testsuite/16block-watch-crash.test tools/xenstore/xenstore_client.c tools/xenstore/xenstored.h tools/xenstore/xenstored_core.c tools/xenstore/xenstored_core.h tools/xenstore/xenstored_domain.c tools/xenstore/xenstored_transaction.c tools/xenstore/xenstored_transaction.h tools/xenstore/xenstored_watch.c tools/xenstore/xenstored_watch.h tools/xenstore/xs.c tools/xenstore/xs.h tools/xenstore/xs_lib.c tools/xenstore/xs_lib.h tools/xenstore/xs_random.c tools/xenstore/xs_stress.c tools/xenstore/xs_tdb_dump.c tools/xenstore/xs_test.c xen/arch/ia64/asm-offsets.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/xen/process.c xen/arch/ia64/xen/vcpu.c xen/arch/x86/mm.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/time.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/common/grant_table.c xen/include/asm-ia64/vcpu.h xen/include/asm-x86/e820.h xen/include/asm-x86/mm.h xen/include/asm-x86/vmx_platform.h xen/include/xen/grant_table.h
line diff
     1.1 --- a/.hgignore	Thu Sep 29 16:22:02 2005 -0600
     1.2 +++ b/.hgignore	Thu Sep 29 17:28:44 2005 -0600
     1.3 @@ -158,6 +158,7 @@
     1.4  ^tools/xenstore/xs_dom0_test$
     1.5  ^tools/xenstore/xs_random$
     1.6  ^tools/xenstore/xs_stress$
     1.7 +^tools/xenstore/xs_tdb_dump$
     1.8  ^tools/xenstore/xs_test$
     1.9  ^tools/xenstore/xs_watch_stress$
    1.10  ^tools/xentrace/xenctx$
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Thu Sep 29 16:22:02 2005 -0600
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Thu Sep 29 17:28:44 2005 -0600
     2.3 @@ -2684,7 +2684,7 @@ CONFIG_ACPI_SYSTEM=y
     2.4  #
     2.5  # File systems
     2.6  #
     2.7 -CONFIG_EXT2_FS=m
     2.8 +CONFIG_EXT2_FS=y
     2.9  CONFIG_EXT2_FS_XATTR=y
    2.10  CONFIG_EXT2_FS_POSIX_ACL=y
    2.11  CONFIG_EXT2_FS_SECURITY=y
    2.12 @@ -2913,7 +2913,7 @@ CONFIG_KEYS=y
    2.13  # CONFIG_KEYS_DEBUG_PROC_KEYS is not set
    2.14  CONFIG_SECURITY=y
    2.15  # CONFIG_SECURITY_NETWORK is not set
    2.16 -CONFIG_SECURITY_CAPABILITIES=m
    2.17 +CONFIG_SECURITY_CAPABILITIES=y
    2.18  CONFIG_SECURITY_ROOTPLUG=m
    2.19  CONFIG_SECURITY_SECLVL=m
    2.20  CONFIG_SECURITY_SELINUX=y
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c	Thu Sep 29 16:22:02 2005 -0600
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c	Thu Sep 29 17:28:44 2005 -0600
     3.3 @@ -112,6 +112,10 @@ void xen_idle(void)
     3.4  
     3.5  #ifdef CONFIG_HOTPLUG_CPU
     3.6  #include <asm/nmi.h>
     3.7 +#ifdef CONFIG_SMP
     3.8 +extern void smp_suspend(void);
     3.9 +extern void smp_resume(void);
    3.10 +#endif
    3.11  /* We don't actually take CPU down, just spin without interrupts. */
    3.12  static inline void play_dead(void)
    3.13  {
    3.14 @@ -120,6 +124,14 @@ static inline void play_dead(void)
    3.15  		HYPERVISOR_yield();
    3.16  
    3.17  	__flush_tlb_all();
    3.18 +   /* 
    3.19 +    * Restore IPI/IRQ mappings before marking online to prevent 
    3.20 +    * race between pending interrupts and restoration of handler. 
    3.21 +    */
    3.22 +#ifdef CONFIG_SMP
    3.23 +	local_irq_enable(); /* XXX Needed for smp_resume(). Clean me up. */
    3.24 +	smp_resume();
    3.25 +#endif
    3.26  	cpu_set(smp_processor_id(), cpu_online_map);
    3.27  }
    3.28  #else
    3.29 @@ -135,10 +147,6 @@ static inline void play_dead(void)
    3.30   * low exit latency (ie sit in a loop waiting for
    3.31   * somebody to say that they'd like to reschedule)
    3.32   */
    3.33 -#ifdef CONFIG_SMP
    3.34 -extern void smp_suspend(void);
    3.35 -extern void smp_resume(void);
    3.36 -#endif
    3.37  void cpu_idle (void)
    3.38  {
    3.39  	int cpu = _smp_processor_id();
    3.40 @@ -166,9 +174,6 @@ void cpu_idle (void)
    3.41  				HYPERVISOR_vcpu_down(cpu);
    3.42  #endif
    3.43  				play_dead();
    3.44 -#ifdef CONFIG_SMP
    3.45 -				smp_resume();
    3.46 -#endif
    3.47  				local_irq_enable();
    3.48  			}
    3.49  
     4.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c	Thu Sep 29 16:22:02 2005 -0600
     4.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c	Thu Sep 29 17:28:44 2005 -0600
     4.3 @@ -131,21 +131,9 @@ DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IP
     4.4  
     4.5  static inline void __send_IPI_one(unsigned int cpu, int vector)
     4.6  {
     4.7 -	unsigned int evtchn;
     4.8 -
     4.9 -	evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
    4.10 -	// printk("send_IPI_mask_bitmask cpu %d vector %d evtchn %d\n", cpu, vector, evtchn);
    4.11 -	if (evtchn) {
    4.12 -#if 0
    4.13 -		shared_info_t *s = HYPERVISOR_shared_info;
    4.14 -		while (synch_test_bit(evtchn, &s->evtchn_pending[0]) ||
    4.15 -		       synch_test_bit(evtchn, &s->evtchn_mask[0]))
    4.16 -			;
    4.17 -#endif
    4.18 -		notify_via_evtchn(evtchn);
    4.19 -	} else
    4.20 -		printk("send_IPI to unbound port %d/%d",
    4.21 -		       cpu, vector);
    4.22 +	int evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
    4.23 +	BUG_ON(evtchn < 0);
    4.24 +	notify_remote_via_evtchn(evtchn);
    4.25  }
    4.26  
    4.27  void __send_IPI_shortcut(unsigned int shortcut, int vector)
     5.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c	Thu Sep 29 16:22:02 2005 -0600
     5.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c	Thu Sep 29 17:28:44 2005 -0600
     5.3 @@ -446,27 +446,6 @@ static void __init smp_callin(void)
     5.4  
     5.5  static int cpucount;
     5.6  
     5.7 -
     5.8 -static irqreturn_t ldebug_interrupt(
     5.9 -	int irq, void *dev_id, struct pt_regs *regs)
    5.10 -{
    5.11 -	return IRQ_HANDLED;
    5.12 -}
    5.13 -
    5.14 -static DEFINE_PER_CPU(int, ldebug_irq);
    5.15 -static char ldebug_name[NR_CPUS][15];
    5.16 -
    5.17 -void ldebug_setup(void)
    5.18 -{
    5.19 -	int cpu = smp_processor_id();
    5.20 -
    5.21 -	per_cpu(ldebug_irq, cpu) = bind_virq_to_irq(VIRQ_DEBUG);
    5.22 -	sprintf(ldebug_name[cpu], "ldebug%d", cpu);
    5.23 -	BUG_ON(request_irq(per_cpu(ldebug_irq, cpu), ldebug_interrupt,
    5.24 -	                   SA_INTERRUPT, ldebug_name[cpu], NULL));
    5.25 -}
    5.26 -
    5.27 -
    5.28  extern void local_setup_timer(void);
    5.29  
    5.30  /*
    5.31 @@ -484,7 +463,6 @@ static void __init start_secondary(void 
    5.32  	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
    5.33  		rep_nop();
    5.34  	local_setup_timer();
    5.35 -	ldebug_setup();
    5.36  	smp_intr_init();
    5.37  	local_irq_enable();
    5.38  	/*
     6.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c	Thu Sep 29 16:22:02 2005 -0600
     6.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c	Thu Sep 29 17:28:44 2005 -0600
     6.3 @@ -186,8 +186,8 @@ static inline u64 scale_delta(u64 delta,
     6.4  		"mov  %4,%%eax ; "
     6.5  		"mov  %%edx,%4 ; "
     6.6  		"mul  %5       ; "
     6.7 +		"xor  %5,%5    ; "
     6.8  		"add  %4,%%eax ; "
     6.9 -		"xor  %5,%5    ; "
    6.10  		"adc  %5,%%edx ; "
    6.11  		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
    6.12  		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
    6.13 @@ -836,13 +836,6 @@ void start_hz_timer(void)
    6.14  	cpu_clear(smp_processor_id(), nohz_cpu_mask);
    6.15  }
    6.16  
    6.17 -void time_suspend(void)
    6.18 -{
    6.19 -	/* nothing */
    6.20 -	teardown_irq(per_cpu(timer_irq, 0), &irq_timer);
    6.21 -	unbind_virq_from_irq(VIRQ_TIMER);
    6.22 -}
    6.23 -
    6.24  /* No locking required. We are only CPU running, and interrupts are off. */
    6.25  void time_resume(void)
    6.26  {
    6.27 @@ -854,9 +847,6 @@ void time_resume(void)
    6.28  	per_cpu(processed_system_time, 0) = processed_system_time;
    6.29  
    6.30  	update_wallclock();
    6.31 -
    6.32 -	per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER);
    6.33 -	(void)setup_irq(per_cpu(timer_irq, 0), &irq_timer);
    6.34  }
    6.35  
    6.36  #ifdef CONFIG_SMP
     7.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c	Thu Sep 29 16:22:02 2005 -0600
     7.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c	Thu Sep 29 17:28:44 2005 -0600
     7.3 @@ -209,7 +209,10 @@ static void dump_fault_path(unsigned lon
     7.4  {
     7.5  	unsigned long *p, page;
     7.6  
     7.7 -        page = __pa(per_cpu(cur_pgd, smp_processor_id()));
     7.8 +	preempt_disable();
     7.9 +	page = __pa(per_cpu(cur_pgd, smp_processor_id()));
    7.10 +	preempt_enable();
    7.11 +
    7.12  	p  = (unsigned long *)__va(page);
    7.13  	p += (address >> 30) * 2;
    7.14  	printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
    7.15 @@ -237,8 +240,13 @@ static void dump_fault_path(unsigned lon
    7.16  {
    7.17  	unsigned long page;
    7.18  
    7.19 +	preempt_disable();
    7.20  	page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
    7.21  	    [address >> 22];
    7.22 +	preempt_enable();
    7.23 +
    7.24 +	page = ((unsigned long *) per_cpu(cur_pgd, get_cpu()))
    7.25 +	    [address >> 22];
    7.26  	printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
    7.27  	       machine_to_phys(page));
    7.28  	/*
    7.29 @@ -567,7 +575,9 @@ vmalloc_fault:
    7.30  		pmd_t *pmd, *pmd_k;
    7.31  		pte_t *pte_k;
    7.32  
    7.33 +		preempt_disable();
    7.34  		pgd = index + per_cpu(cur_pgd, smp_processor_id());
    7.35 +		preempt_enable();
    7.36  		pgd_k = init_mm.pgd + index;
    7.37  
    7.38  		if (!pgd_present(*pgd_k))
     8.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c	Thu Sep 29 16:22:02 2005 -0600
     8.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c	Thu Sep 29 17:28:44 2005 -0600
     8.3 @@ -144,3 +144,13 @@ struct file_operations mem_fops = {
     8.4  	.mmap		= mmap_mem,
     8.5  	.open		= open_mem,
     8.6  };
     8.7 +
     8.8 +/*
     8.9 + * Local variables:
    8.10 + *  c-file-style: "linux"
    8.11 + *  indent-tabs-mode: t
    8.12 + *  c-indent-level: 8
    8.13 + *  c-basic-offset: 8
    8.14 + *  tab-width: 8
    8.15 + * End:
    8.16 + */
     9.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Thu Sep 29 16:22:02 2005 -0600
     9.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Thu Sep 29 17:28:44 2005 -0600
     9.3 @@ -3,7 +3,7 @@
     9.4   * 
     9.5   * Communication via Xen event channels.
     9.6   * 
     9.7 - * Copyright (c) 2002-2004, K A Fraser
     9.8 + * Copyright (c) 2002-2005, K A Fraser
     9.9   * 
    9.10   * This file may be distributed separately from the Linux kernel, or
    9.11   * incorporated into other software packages, subject to the following license:
    9.12 @@ -73,27 +73,33 @@ static unsigned long pirq_needs_unmask_n
    9.13  static u8  cpu_evtchn[NR_EVENT_CHANNELS];
    9.14  static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
    9.15  
    9.16 -#define active_evtchns(cpu,sh,idx)              \
    9.17 -    ((sh)->evtchn_pending[idx] &                \
    9.18 -     cpu_evtchn_mask[cpu][idx] &                \
    9.19 -     ~(sh)->evtchn_mask[idx])
    9.20 +#define active_evtchns(cpu,sh,idx)		\
    9.21 +	((sh)->evtchn_pending[idx] &		\
    9.22 +	 cpu_evtchn_mask[cpu][idx] &		\
    9.23 +	 ~(sh)->evtchn_mask[idx])
    9.24  
    9.25 -void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
    9.26 +static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
    9.27  {
    9.28 -    clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
    9.29 -    set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
    9.30 -    cpu_evtchn[chn] = cpu;
    9.31 +	clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
    9.32 +	set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
    9.33 +	cpu_evtchn[chn] = cpu;
    9.34 +}
    9.35 +
    9.36 +static void init_evtchn_cpu_bindings(void)
    9.37 +{
    9.38 +	/* By default all event channels notify CPU#0. */
    9.39 +	memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
    9.40 +	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
    9.41  }
    9.42  
    9.43  #else
    9.44  
    9.45 -#define active_evtchns(cpu,sh,idx)              \
    9.46 -    ((sh)->evtchn_pending[idx] &                \
    9.47 -     ~(sh)->evtchn_mask[idx])
    9.48 +#define active_evtchns(cpu,sh,idx)		\
    9.49 +	((sh)->evtchn_pending[idx] &		\
    9.50 +	 ~(sh)->evtchn_mask[idx])
    9.51 +#define bind_evtchn_to_cpu(chn,cpu)	((void)0)
    9.52 +#define init_evtchn_cpu_bindings()	((void)0)
    9.53  
    9.54 -void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
    9.55 -{
    9.56 -}
    9.57  #endif
    9.58  
    9.59  /* Upcall to generic IRQ layer. */
    9.60 @@ -108,9 +114,9 @@ extern asmlinkage unsigned int do_IRQ(st
    9.61  #elif defined (__x86_64__)
    9.62  #define IRQ_REG orig_rax
    9.63  #endif
    9.64 -#define do_IRQ(irq, regs) do {                  \
    9.65 -    (regs)->IRQ_REG = (irq);                    \
    9.66 -    do_IRQ((regs));                             \
    9.67 +#define do_IRQ(irq, regs) do {			\
    9.68 +	(regs)->IRQ_REG = (irq);		\
    9.69 +	do_IRQ((regs));				\
    9.70  } while (0)
    9.71  #endif
    9.72  
    9.73 @@ -123,249 +129,241 @@ extern asmlinkage unsigned int do_IRQ(st
    9.74   */
    9.75  void force_evtchn_callback(void)
    9.76  {
    9.77 -    (void)HYPERVISOR_xen_version(0, NULL);
    9.78 +	(void)HYPERVISOR_xen_version(0, NULL);
    9.79  }
    9.80  EXPORT_SYMBOL(force_evtchn_callback);
    9.81  
    9.82  /* NB. Interrupts are disabled on entry. */
    9.83  asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
    9.84  {
    9.85 -    u32     l1, l2;
    9.86 -    unsigned int   l1i, l2i, port;
    9.87 -    int            irq, cpu = smp_processor_id();
    9.88 -    shared_info_t *s = HYPERVISOR_shared_info;
    9.89 -    vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
    9.90 -
    9.91 -    vcpu_info->evtchn_upcall_pending = 0;
    9.92 +	u32     l1, l2;
    9.93 +	unsigned int   l1i, l2i, port;
    9.94 +	int            irq, cpu = smp_processor_id();
    9.95 +	shared_info_t *s = HYPERVISOR_shared_info;
    9.96 +	vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
    9.97  
    9.98 -    /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
    9.99 -    l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
   9.100 -    while ( l1 != 0 )
   9.101 -    {
   9.102 -        l1i = __ffs(l1);
   9.103 -        l1 &= ~(1 << l1i);
   9.104 +	vcpu_info->evtchn_upcall_pending = 0;
   9.105 +
   9.106 +	/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
   9.107 +	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
   9.108 +	while (l1 != 0) {
   9.109 +		l1i = __ffs(l1);
   9.110 +		l1 &= ~(1 << l1i);
   9.111          
   9.112 -        while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
   9.113 -        {
   9.114 -            l2i = __ffs(l2);
   9.115 -            l2 &= ~(1 << l2i);
   9.116 +		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
   9.117 +			l2i = __ffs(l2);
   9.118 +			l2 &= ~(1 << l2i);
   9.119              
   9.120 -            port = (l1i << 5) + l2i;
   9.121 -            if ( (irq = evtchn_to_irq[port]) != -1 ) {
   9.122 -                do_IRQ(irq, regs);
   9.123 -	    } else
   9.124 -                evtchn_device_upcall(port);
   9.125 -        }
   9.126 -    }
   9.127 +			port = (l1i << 5) + l2i;
   9.128 +			if ((irq = evtchn_to_irq[port]) != -1)
   9.129 +				do_IRQ(irq, regs);
   9.130 +			else
   9.131 +				evtchn_device_upcall(port);
   9.132 +		}
   9.133 +	}
   9.134  }
   9.135  EXPORT_SYMBOL(evtchn_do_upcall);
   9.136  
   9.137  static int find_unbound_irq(void)
   9.138  {
   9.139 -    int irq;
   9.140 +	int irq;
   9.141  
   9.142 -    for ( irq = 0; irq < NR_IRQS; irq++ )
   9.143 -        if ( irq_bindcount[irq] == 0 )
   9.144 -            break;
   9.145 +	for (irq = 0; irq < NR_IRQS; irq++)
   9.146 +		if (irq_bindcount[irq] == 0)
   9.147 +			break;
   9.148  
   9.149 -    if ( irq == NR_IRQS )
   9.150 -        panic("No available IRQ to bind to: increase NR_IRQS!\n");
   9.151 +	if (irq == NR_IRQS)
   9.152 +		panic("No available IRQ to bind to: increase NR_IRQS!\n");
   9.153  
   9.154 -    return irq;
   9.155 +	return irq;
   9.156  }
   9.157  
   9.158  int bind_virq_to_irq(int virq)
   9.159  {
   9.160 -    evtchn_op_t op;
   9.161 -    int evtchn, irq;
   9.162 -    int cpu = smp_processor_id();
   9.163 -
   9.164 -    spin_lock(&irq_mapping_update_lock);
   9.165 -
   9.166 -    if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
   9.167 -    {
   9.168 -        op.cmd              = EVTCHNOP_bind_virq;
   9.169 -        op.u.bind_virq.virq = virq;
   9.170 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   9.171 -            panic("Failed to bind virtual IRQ %d\n", virq);
   9.172 -        evtchn = op.u.bind_virq.port;
   9.173 +	evtchn_op_t op;
   9.174 +	int evtchn, irq;
   9.175 +	int cpu = smp_processor_id();
   9.176  
   9.177 -        irq = find_unbound_irq();
   9.178 -        evtchn_to_irq[evtchn] = irq;
   9.179 -        irq_to_evtchn[irq]    = evtchn;
   9.180 -
   9.181 -        per_cpu(virq_to_irq, cpu)[virq] = irq;
   9.182 +	spin_lock(&irq_mapping_update_lock);
   9.183  
   9.184 -        bind_evtchn_to_cpu(evtchn, cpu);
   9.185 -    }
   9.186 +	if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
   9.187 +		op.cmd              = EVTCHNOP_bind_virq;
   9.188 +		op.u.bind_virq.virq = virq;
   9.189 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   9.190 +		evtchn = op.u.bind_virq.port;
   9.191  
   9.192 -    irq_bindcount[irq]++;
   9.193 +		irq = find_unbound_irq();
   9.194 +		evtchn_to_irq[evtchn] = irq;
   9.195 +		irq_to_evtchn[irq]    = evtchn;
   9.196  
   9.197 -    spin_unlock(&irq_mapping_update_lock);
   9.198 +		per_cpu(virq_to_irq, cpu)[virq] = irq;
   9.199 +
   9.200 +		bind_evtchn_to_cpu(evtchn, cpu);
   9.201 +	}
   9.202 +
   9.203 +	irq_bindcount[irq]++;
   9.204 +
   9.205 +	spin_unlock(&irq_mapping_update_lock);
   9.206      
   9.207 -    return irq;
   9.208 +	return irq;
   9.209  }
   9.210  EXPORT_SYMBOL(bind_virq_to_irq);
   9.211  
   9.212  void unbind_virq_from_irq(int virq)
   9.213  {
   9.214 -    evtchn_op_t op;
   9.215 -    int cpu    = smp_processor_id();
   9.216 -    int irq    = per_cpu(virq_to_irq, cpu)[virq];
   9.217 -    int evtchn = irq_to_evtchn[irq];
   9.218 -
   9.219 -    spin_lock(&irq_mapping_update_lock);
   9.220 +	evtchn_op_t op;
   9.221 +	int cpu    = smp_processor_id();
   9.222 +	int irq    = per_cpu(virq_to_irq, cpu)[virq];
   9.223 +	int evtchn = irq_to_evtchn[irq];
   9.224  
   9.225 -    if ( --irq_bindcount[irq] == 0 )
   9.226 -    {
   9.227 -        op.cmd          = EVTCHNOP_close;
   9.228 -        op.u.close.dom  = DOMID_SELF;
   9.229 -        op.u.close.port = evtchn;
   9.230 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   9.231 -            panic("Failed to unbind virtual IRQ %d\n", virq);
   9.232 +	spin_lock(&irq_mapping_update_lock);
   9.233  
   9.234 -        /*
   9.235 -         * This is a slight hack. Interdomain ports can be allocated directly 
   9.236 -         * by userspace, and at that point they get bound by Xen to vcpu 0. We 
   9.237 -         * therefore need to make sure that if we get an event on an event 
   9.238 -         * channel we don't know about vcpu 0 handles it. Binding channels to 
   9.239 -         * vcpu 0 when closing them achieves this.
   9.240 -         */
   9.241 -        bind_evtchn_to_cpu(evtchn, 0);
   9.242 -        evtchn_to_irq[evtchn] = -1;
   9.243 -        irq_to_evtchn[irq]    = -1;
   9.244 -        per_cpu(virq_to_irq, cpu)[virq]     = -1;
   9.245 -    }
   9.246 +	if (--irq_bindcount[irq] == 0) {
   9.247 +		op.cmd          = EVTCHNOP_close;
   9.248 +		op.u.close.dom  = DOMID_SELF;
   9.249 +		op.u.close.port = evtchn;
   9.250 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   9.251  
   9.252 -    spin_unlock(&irq_mapping_update_lock);
   9.253 +		/*
   9.254 +		 * This is a slight hack. Interdomain ports can be allocated 
   9.255 +		 * directly by userspace, and at that point they get bound by 
   9.256 +		 * Xen to vcpu 0. We therefore need to make sure that if we get
   9.257 +		 * an event on an event channel we don't know about vcpu 0 
   9.258 +		 * handles it. Binding channels to vcpu 0 when closing them
   9.259 +		 * achieves this.
   9.260 +		 */
   9.261 +		bind_evtchn_to_cpu(evtchn, 0);
   9.262 +		evtchn_to_irq[evtchn] = -1;
   9.263 +		irq_to_evtchn[irq]    = -1;
   9.264 +		per_cpu(virq_to_irq, cpu)[virq] = -1;
   9.265 +	}
   9.266 +
   9.267 +	spin_unlock(&irq_mapping_update_lock);
   9.268  }
   9.269  EXPORT_SYMBOL(unbind_virq_from_irq);
   9.270  
   9.271  int bind_ipi_to_irq(int ipi)
   9.272  {
   9.273 -    evtchn_op_t op;
   9.274 -    int evtchn, irq;
   9.275 -    int cpu = smp_processor_id();
   9.276 -
   9.277 -    spin_lock(&irq_mapping_update_lock);
   9.278 -
   9.279 -    if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
   9.280 -    {
   9.281 -        op.cmd = EVTCHNOP_bind_ipi;
   9.282 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   9.283 -            panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
   9.284 -        evtchn = op.u.bind_ipi.port;
   9.285 +	evtchn_op_t op;
   9.286 +	int evtchn, irq;
   9.287 +	int cpu = smp_processor_id();
   9.288  
   9.289 -        irq = find_unbound_irq();
   9.290 -        evtchn_to_irq[evtchn] = irq;
   9.291 -        irq_to_evtchn[irq]    = evtchn;
   9.292 -
   9.293 -        per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
   9.294 +	spin_lock(&irq_mapping_update_lock);
   9.295  
   9.296 -        bind_evtchn_to_cpu(evtchn, cpu);
   9.297 -    } 
   9.298 -    else
   9.299 -    {
   9.300 -        irq = evtchn_to_irq[evtchn];
   9.301 -    }
   9.302 +	if ((evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == -1) {
   9.303 +		op.cmd = EVTCHNOP_bind_ipi;
   9.304 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   9.305 +		evtchn = op.u.bind_ipi.port;
   9.306  
   9.307 -    irq_bindcount[irq]++;
   9.308 +		irq = find_unbound_irq();
   9.309 +		evtchn_to_irq[evtchn] = irq;
   9.310 +		irq_to_evtchn[irq]    = evtchn;
   9.311  
   9.312 -    spin_unlock(&irq_mapping_update_lock);
   9.313 +		per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
   9.314  
   9.315 -    return irq;
   9.316 +		bind_evtchn_to_cpu(evtchn, cpu);
   9.317 +	} else {
   9.318 +		irq = evtchn_to_irq[evtchn];
   9.319 +	}
   9.320 +
   9.321 +	irq_bindcount[irq]++;
   9.322 +
   9.323 +	spin_unlock(&irq_mapping_update_lock);
   9.324 +
   9.325 +	return irq;
   9.326  }
   9.327  EXPORT_SYMBOL(bind_ipi_to_irq);
   9.328  
   9.329  void unbind_ipi_from_irq(int ipi)
   9.330  {
   9.331 -    evtchn_op_t op;
   9.332 -    int cpu    = smp_processor_id();
   9.333 -    int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
   9.334 -    int irq    = evtchn_to_irq[evtchn];
   9.335 -
   9.336 -    spin_lock(&irq_mapping_update_lock);
   9.337 +	evtchn_op_t op;
   9.338 +	int cpu    = smp_processor_id();
   9.339 +	int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
   9.340 +	int irq    = evtchn_to_irq[evtchn];
   9.341  
   9.342 -    if ( --irq_bindcount[irq] == 0 )
   9.343 -    {
   9.344 -        op.cmd          = EVTCHNOP_close;
   9.345 -        op.u.close.dom  = DOMID_SELF;
   9.346 -        op.u.close.port = evtchn;
   9.347 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   9.348 -            panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
   9.349 +	spin_lock(&irq_mapping_update_lock);
   9.350  
   9.351 -        /* See comments in unbind_virq_from_irq */
   9.352 -        bind_evtchn_to_cpu(evtchn, 0);
   9.353 -        evtchn_to_irq[evtchn] = -1;
   9.354 -        irq_to_evtchn[irq]    = -1;
   9.355 -        per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
   9.356 -    }
   9.357 +	if (--irq_bindcount[irq] == 0) {
   9.358 +		op.cmd          = EVTCHNOP_close;
   9.359 +		op.u.close.dom  = DOMID_SELF;
   9.360 +		op.u.close.port = evtchn;
   9.361 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   9.362  
   9.363 -    spin_unlock(&irq_mapping_update_lock);
   9.364 +		/* See comments in unbind_virq_from_irq */
   9.365 +		bind_evtchn_to_cpu(evtchn, 0);
   9.366 +		evtchn_to_irq[evtchn] = -1;
   9.367 +		irq_to_evtchn[irq]    = -1;
   9.368 +		per_cpu(ipi_to_evtchn, cpu)[ipi] = -1;
   9.369 +	}
   9.370 +
   9.371 +	spin_unlock(&irq_mapping_update_lock);
   9.372  }
   9.373  EXPORT_SYMBOL(unbind_ipi_from_irq);
   9.374  
   9.375  int bind_evtchn_to_irq(unsigned int evtchn)
   9.376  {
   9.377 -    int irq;
   9.378 -
   9.379 -    spin_lock(&irq_mapping_update_lock);
   9.380 +	int irq;
   9.381  
   9.382 -    if ( (irq = evtchn_to_irq[evtchn]) == -1 )
   9.383 -    {
   9.384 -        irq = find_unbound_irq();
   9.385 -        evtchn_to_irq[evtchn] = irq;
   9.386 -        irq_to_evtchn[irq]    = evtchn;
   9.387 -    }
   9.388 +	spin_lock(&irq_mapping_update_lock);
   9.389  
   9.390 -    irq_bindcount[irq]++;
   9.391 +	if ((irq = evtchn_to_irq[evtchn]) == -1) {
   9.392 +		irq = find_unbound_irq();
   9.393 +		evtchn_to_irq[evtchn] = irq;
   9.394 +		irq_to_evtchn[irq]    = evtchn;
   9.395 +	}
   9.396  
   9.397 -    spin_unlock(&irq_mapping_update_lock);
   9.398 +	irq_bindcount[irq]++;
   9.399 +
   9.400 +	spin_unlock(&irq_mapping_update_lock);
   9.401      
   9.402 -    return irq;
   9.403 +	return irq;
   9.404  }
   9.405  EXPORT_SYMBOL(bind_evtchn_to_irq);
   9.406  
   9.407 -void unbind_evtchn_from_irq(unsigned int evtchn)
   9.408 +void unbind_evtchn_from_irq(unsigned int irq)
   9.409  {
   9.410 -    int irq = evtchn_to_irq[evtchn];
   9.411 -
   9.412 -    spin_lock(&irq_mapping_update_lock);
   9.413 +	evtchn_op_t op;
   9.414 +	int evtchn = irq_to_evtchn[irq];
   9.415  
   9.416 -    if ( --irq_bindcount[irq] == 0 )
   9.417 -    {
   9.418 -        evtchn_to_irq[evtchn] = -1;
   9.419 -        irq_to_evtchn[irq]    = -1;
   9.420 -    }
   9.421 +	spin_lock(&irq_mapping_update_lock);
   9.422  
   9.423 -    spin_unlock(&irq_mapping_update_lock);
   9.424 +	if ((--irq_bindcount[irq] == 0) && (evtchn != -1)) {
   9.425 +		op.cmd          = EVTCHNOP_close;
   9.426 +		op.u.close.dom  = DOMID_SELF;
   9.427 +		op.u.close.port = evtchn;
   9.428 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   9.429 +
   9.430 +		evtchn_to_irq[evtchn] = -1;
   9.431 +		irq_to_evtchn[irq]    = -1;
   9.432 +	}
   9.433 +
   9.434 +	spin_unlock(&irq_mapping_update_lock);
   9.435  }
   9.436  EXPORT_SYMBOL(unbind_evtchn_from_irq);
   9.437  
   9.438  int bind_evtchn_to_irqhandler(
   9.439 -    unsigned int evtchn,
   9.440 -    irqreturn_t (*handler)(int, void *, struct pt_regs *),
   9.441 -    unsigned long irqflags,
   9.442 -    const char *devname,
   9.443 -    void *dev_id)
   9.444 +	unsigned int evtchn,
   9.445 +	irqreturn_t (*handler)(int, void *, struct pt_regs *),
   9.446 +	unsigned long irqflags,
   9.447 +	const char *devname,
   9.448 +	void *dev_id)
   9.449  {
   9.450 -    unsigned int irq;
   9.451 -    int retval;
   9.452 +	unsigned int irq;
   9.453 +	int retval;
   9.454  
   9.455 -    irq = bind_evtchn_to_irq(evtchn);
   9.456 -    retval = request_irq(irq, handler, irqflags, devname, dev_id);
   9.457 -    if ( retval != 0 )
   9.458 -        unbind_evtchn_from_irq(evtchn);
   9.459 +	irq = bind_evtchn_to_irq(evtchn);
   9.460 +	retval = request_irq(irq, handler, irqflags, devname, dev_id);
   9.461 +	if (retval != 0)
   9.462 +		unbind_evtchn_from_irq(irq);
   9.463  
   9.464 -    return retval;
   9.465 +	return irq;
   9.466  }
   9.467  EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
   9.468  
   9.469 -void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id)
   9.470 +void unbind_evtchn_from_irqhandler(unsigned int irq, void *dev_id)
   9.471  {
   9.472 -    unsigned int irq = evtchn_to_irq[evtchn];
   9.473 -    free_irq(irq, dev_id);
   9.474 -    unbind_evtchn_from_irq(evtchn);
   9.475 +	free_irq(irq, dev_id);
   9.476 +	unbind_evtchn_from_irq(irq);
   9.477  }
   9.478  EXPORT_SYMBOL(unbind_evtchn_from_irqhandler);
   9.479  
   9.480 @@ -378,50 +376,50 @@ static void do_nothing_function(void *ig
   9.481  /* Rebind an evtchn so that it gets delivered to a specific cpu */
   9.482  static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
   9.483  {
   9.484 -    evtchn_op_t op;
   9.485 -    int evtchn;
   9.486 -
   9.487 -    spin_lock(&irq_mapping_update_lock);
   9.488 -    evtchn = irq_to_evtchn[irq];
   9.489 -    if (!VALID_EVTCHN(evtchn)) {
   9.490 -        spin_unlock(&irq_mapping_update_lock);
   9.491 -        return;
   9.492 -    }
   9.493 -
   9.494 -    /* Tell Xen to send future instances of this interrupt to other vcpu. */
   9.495 -    op.cmd = EVTCHNOP_bind_vcpu;
   9.496 -    op.u.bind_vcpu.port = evtchn;
   9.497 -    op.u.bind_vcpu.vcpu = tcpu;
   9.498 +	evtchn_op_t op;
   9.499 +	int evtchn;
   9.500  
   9.501 -    /*
   9.502 -     * If this fails, it usually just indicates that we're dealing with a virq 
   9.503 -     * or IPI channel, which don't actually need to be rebound. Ignore it, 
   9.504 -     * but don't do the xenlinux-level rebind in that case.
   9.505 -     */
   9.506 -    if (HYPERVISOR_event_channel_op(&op) >= 0)
   9.507 -        bind_evtchn_to_cpu(evtchn, tcpu);
   9.508 -
   9.509 -    spin_unlock(&irq_mapping_update_lock);
   9.510 +	spin_lock(&irq_mapping_update_lock);
   9.511 +	evtchn = irq_to_evtchn[irq];
   9.512 +	if (!VALID_EVTCHN(evtchn)) {
   9.513 +		spin_unlock(&irq_mapping_update_lock);
   9.514 +		return;
   9.515 +	}
   9.516  
   9.517 -    /*
   9.518 -     * Now send the new target processor a NOP IPI. When this returns, it 
   9.519 -     * will check for any pending interrupts, and so service any that got 
   9.520 -     * delivered to the wrong processor by mistake.
   9.521 -     * 
   9.522 -     * XXX: The only time this is called with interrupts disabled is from the 
   9.523 -     * hotplug/hotunplug path. In that case, all cpus are stopped with 
   9.524 -     * interrupts disabled, and the missed interrupts will be picked up when 
   9.525 -     * they start again. This is kind of a hack.
   9.526 -     */
   9.527 -    if (!irqs_disabled())
   9.528 -        smp_call_function(do_nothing_function, NULL, 0, 0);
   9.529 +	/* Send future instances of this interrupt to other vcpu. */
   9.530 +	op.cmd = EVTCHNOP_bind_vcpu;
   9.531 +	op.u.bind_vcpu.port = evtchn;
   9.532 +	op.u.bind_vcpu.vcpu = tcpu;
   9.533 +
   9.534 +	/*
   9.535 +	 * If this fails, it usually just indicates that we're dealing with a 
   9.536 +	 * virq or IPI channel, which don't actually need to be rebound. Ignore
   9.537 +	 * it, but don't do the xenlinux-level rebind in that case.
   9.538 +	 */
   9.539 +	if (HYPERVISOR_event_channel_op(&op) >= 0)
   9.540 +		bind_evtchn_to_cpu(evtchn, tcpu);
   9.541 +
   9.542 +	spin_unlock(&irq_mapping_update_lock);
   9.543 +
   9.544 +	/*
   9.545 +	 * Now send the new target processor a NOP IPI. When this returns, it
   9.546 +	 * will check for any pending interrupts, and so service any that got 
   9.547 +	 * delivered to the wrong processor by mistake.
   9.548 +	 * 
   9.549 +	 * XXX: The only time this is called with interrupts disabled is from
   9.550 +	 * the hotplug/hotunplug path. In that case, all cpus are stopped with 
   9.551 +	 * interrupts disabled, and the missed interrupts will be picked up
   9.552 +	 * when they start again. This is kind of a hack.
   9.553 +	 */
   9.554 +	if (!irqs_disabled())
   9.555 +		smp_call_function(do_nothing_function, NULL, 0, 0);
   9.556  }
   9.557  
   9.558  
   9.559  static void set_affinity_irq(unsigned irq, cpumask_t dest)
   9.560  {
   9.561 -    unsigned tcpu = first_cpu(dest);
   9.562 -    rebind_irq_to_cpu(irq, tcpu);
   9.563 +	unsigned tcpu = first_cpu(dest);
   9.564 +	rebind_irq_to_cpu(irq, tcpu);
   9.565  }
   9.566  
   9.567  /*
   9.568 @@ -430,83 +428,84 @@ static void set_affinity_irq(unsigned ir
   9.569  
   9.570  static unsigned int startup_dynirq(unsigned int irq)
   9.571  {
   9.572 -    int evtchn = irq_to_evtchn[irq];
   9.573 +	int evtchn = irq_to_evtchn[irq];
   9.574  
   9.575 -    if ( !VALID_EVTCHN(evtchn) )
   9.576 -        return 0;
   9.577 -    unmask_evtchn(evtchn);
   9.578 -    return 0;
   9.579 +	if (VALID_EVTCHN(evtchn))
   9.580 +		unmask_evtchn(evtchn);
   9.581 +	return 0;
   9.582  }
   9.583  
   9.584  static void shutdown_dynirq(unsigned int irq)
   9.585  {
   9.586 -    int evtchn = irq_to_evtchn[irq];
   9.587 +	int evtchn = irq_to_evtchn[irq];
   9.588  
   9.589 -    if ( !VALID_EVTCHN(evtchn) )
   9.590 -        return;
   9.591 -    mask_evtchn(evtchn);
   9.592 +	if (VALID_EVTCHN(evtchn))
   9.593 +		mask_evtchn(evtchn);
   9.594  }
   9.595  
   9.596  static void enable_dynirq(unsigned int irq)
   9.597  {
   9.598 -    int evtchn = irq_to_evtchn[irq];
   9.599 +	int evtchn = irq_to_evtchn[irq];
   9.600  
   9.601 -    unmask_evtchn(evtchn);
   9.602 +	if (VALID_EVTCHN(evtchn))
   9.603 +		unmask_evtchn(evtchn);
   9.604  }
   9.605  
   9.606  static void disable_dynirq(unsigned int irq)
   9.607  {
   9.608 -    int evtchn = irq_to_evtchn[irq];
   9.609 +	int evtchn = irq_to_evtchn[irq];
   9.610  
   9.611 -    mask_evtchn(evtchn);
   9.612 +	if (VALID_EVTCHN(evtchn))
   9.613 +		mask_evtchn(evtchn);
   9.614  }
   9.615  
   9.616  static void ack_dynirq(unsigned int irq)
   9.617  {
   9.618 -    int evtchn = irq_to_evtchn[irq];
   9.619 +	int evtchn = irq_to_evtchn[irq];
   9.620  
   9.621 -    mask_evtchn(evtchn);
   9.622 -    clear_evtchn(evtchn);
   9.623 +	if (VALID_EVTCHN(evtchn)) {
   9.624 +		mask_evtchn(evtchn);
   9.625 +		clear_evtchn(evtchn);
   9.626 +	}
   9.627  }
   9.628  
   9.629  static void end_dynirq(unsigned int irq)
   9.630  {
   9.631 -    int evtchn = irq_to_evtchn[irq];
   9.632 +	int evtchn = irq_to_evtchn[irq];
   9.633  
   9.634 -    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
   9.635 -        unmask_evtchn(evtchn);
   9.636 +	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
   9.637 +		unmask_evtchn(evtchn);
   9.638  }
   9.639  
   9.640  static struct hw_interrupt_type dynirq_type = {
   9.641 -    "Dynamic-irq",
   9.642 -    startup_dynirq,
   9.643 -    shutdown_dynirq,
   9.644 -    enable_dynirq,
   9.645 -    disable_dynirq,
   9.646 -    ack_dynirq,
   9.647 -    end_dynirq,
   9.648 -    set_affinity_irq
   9.649 +	"Dynamic-irq",
   9.650 +	startup_dynirq,
   9.651 +	shutdown_dynirq,
   9.652 +	enable_dynirq,
   9.653 +	disable_dynirq,
   9.654 +	ack_dynirq,
   9.655 +	end_dynirq,
   9.656 +	set_affinity_irq
   9.657  };
   9.658  
   9.659  static inline void pirq_unmask_notify(int pirq)
   9.660  {
   9.661 -    physdev_op_t op;
   9.662 -    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
   9.663 -    {
   9.664 -        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
   9.665 -        (void)HYPERVISOR_physdev_op(&op);
   9.666 -    }
   9.667 +	physdev_op_t op;
   9.668 +	if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
   9.669 +		op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
   9.670 +		(void)HYPERVISOR_physdev_op(&op);
   9.671 +	}
   9.672  }
   9.673  
   9.674  static inline void pirq_query_unmask(int pirq)
   9.675  {
   9.676 -    physdev_op_t op;
   9.677 -    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
   9.678 -    op.u.irq_status_query.irq = pirq;
   9.679 -    (void)HYPERVISOR_physdev_op(&op);
   9.680 -    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
   9.681 -    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
   9.682 -        set_bit(pirq, &pirq_needs_unmask_notify[0]);
   9.683 +	physdev_op_t op;
   9.684 +	op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
   9.685 +	op.u.irq_status_query.irq = pirq;
   9.686 +	(void)HYPERVISOR_physdev_op(&op);
   9.687 +	clear_bit(pirq, &pirq_needs_unmask_notify[0]);
   9.688 +	if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
   9.689 +		set_bit(pirq, &pirq_needs_unmask_notify[0]);
   9.690  }
   9.691  
   9.692  /*
   9.693 @@ -517,218 +516,252 @@ static inline void pirq_query_unmask(int
   9.694  
   9.695  static unsigned int startup_pirq(unsigned int irq)
   9.696  {
   9.697 -    evtchn_op_t op;
   9.698 -    int evtchn;
   9.699 +	evtchn_op_t op;
   9.700 +	int evtchn;
   9.701  
   9.702 -    op.cmd               = EVTCHNOP_bind_pirq;
   9.703 -    op.u.bind_pirq.pirq  = irq;
   9.704 -    /* NB. We are happy to share unless we are probing. */
   9.705 -    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
   9.706 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
   9.707 -    {
   9.708 -        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
   9.709 -            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
   9.710 -        return 0;
   9.711 -    }
   9.712 -    evtchn = op.u.bind_pirq.port;
   9.713 +	op.cmd               = EVTCHNOP_bind_pirq;
   9.714 +	op.u.bind_pirq.pirq  = irq;
   9.715 +	/* NB. We are happy to share unless we are probing. */
   9.716 +	op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
   9.717 +	if (HYPERVISOR_event_channel_op(&op) != 0) {
   9.718 +		if ( !probing_irq(irq) )
   9.719 +			printk(KERN_INFO "Failed to obtain physical "
   9.720 +			       "IRQ %d\n", irq);
   9.721 +		return 0;
   9.722 +	}
   9.723 +	evtchn = op.u.bind_pirq.port;
   9.724  
   9.725 -    pirq_query_unmask(irq_to_pirq(irq));
   9.726 +	pirq_query_unmask(irq_to_pirq(irq));
   9.727  
   9.728 -    bind_evtchn_to_cpu(evtchn, 0);
   9.729 -    evtchn_to_irq[evtchn] = irq;
   9.730 -    irq_to_evtchn[irq]    = evtchn;
   9.731 +	bind_evtchn_to_cpu(evtchn, 0);
   9.732 +	evtchn_to_irq[evtchn] = irq;
   9.733 +	irq_to_evtchn[irq]    = evtchn;
   9.734  
   9.735 -    unmask_evtchn(evtchn);
   9.736 -    pirq_unmask_notify(irq_to_pirq(irq));
   9.737 +	unmask_evtchn(evtchn);
   9.738 +	pirq_unmask_notify(irq_to_pirq(irq));
   9.739  
   9.740 -    return 0;
   9.741 +	return 0;
   9.742  }
   9.743  
   9.744  static void shutdown_pirq(unsigned int irq)
   9.745  {
   9.746 -    evtchn_op_t op;
   9.747 -    int evtchn = irq_to_evtchn[irq];
   9.748 -
   9.749 -    if ( !VALID_EVTCHN(evtchn) )
   9.750 -        return;
   9.751 -
   9.752 -    mask_evtchn(evtchn);
   9.753 +	evtchn_op_t op;
   9.754 +	int evtchn = irq_to_evtchn[irq];
   9.755  
   9.756 -    op.cmd          = EVTCHNOP_close;
   9.757 -    op.u.close.dom  = DOMID_SELF;
   9.758 -    op.u.close.port = evtchn;
   9.759 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
   9.760 -        panic("Failed to unbind physical IRQ %d\n", irq);
   9.761 +	if (!VALID_EVTCHN(evtchn))
   9.762 +		return;
   9.763  
   9.764 -    bind_evtchn_to_cpu(evtchn, 0);
   9.765 -    evtchn_to_irq[evtchn] = -1;
   9.766 -    irq_to_evtchn[irq]    = -1;
   9.767 +	mask_evtchn(evtchn);
   9.768 +
   9.769 +	op.cmd          = EVTCHNOP_close;
   9.770 +	op.u.close.dom  = DOMID_SELF;
   9.771 +	op.u.close.port = evtchn;
   9.772 +	BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   9.773 +
   9.774 +	bind_evtchn_to_cpu(evtchn, 0);
   9.775 +	evtchn_to_irq[evtchn] = -1;
   9.776 +	irq_to_evtchn[irq]    = -1;
   9.777  }
   9.778  
   9.779  static void enable_pirq(unsigned int irq)
   9.780  {
   9.781 -    int evtchn = irq_to_evtchn[irq];
   9.782 -    if ( !VALID_EVTCHN(evtchn) )
   9.783 -        return;
   9.784 -    unmask_evtchn(evtchn);
   9.785 -    pirq_unmask_notify(irq_to_pirq(irq));
   9.786 +	int evtchn = irq_to_evtchn[irq];
   9.787 +
   9.788 +	if (VALID_EVTCHN(evtchn)) {
   9.789 +		unmask_evtchn(evtchn);
   9.790 +		pirq_unmask_notify(irq_to_pirq(irq));
   9.791 +	}
   9.792  }
   9.793  
   9.794  static void disable_pirq(unsigned int irq)
   9.795  {
   9.796 -    int evtchn = irq_to_evtchn[irq];
   9.797 -    if ( !VALID_EVTCHN(evtchn) )
   9.798 -        return;
   9.799 -    mask_evtchn(evtchn);
   9.800 +	int evtchn = irq_to_evtchn[irq];
   9.801 +
   9.802 +	if (VALID_EVTCHN(evtchn))
   9.803 +		mask_evtchn(evtchn);
   9.804  }
   9.805  
   9.806  static void ack_pirq(unsigned int irq)
   9.807  {
   9.808 -    int evtchn = irq_to_evtchn[irq];
   9.809 -    if ( !VALID_EVTCHN(evtchn) )
   9.810 -        return;
   9.811 -    mask_evtchn(evtchn);
   9.812 -    clear_evtchn(evtchn);
   9.813 +	int evtchn = irq_to_evtchn[irq];
   9.814 +
   9.815 +	if (VALID_EVTCHN(evtchn)) {
   9.816 +		mask_evtchn(evtchn);
   9.817 +		clear_evtchn(evtchn);
   9.818 +	}
   9.819  }
   9.820  
   9.821  static void end_pirq(unsigned int irq)
   9.822  {
   9.823 -    int evtchn = irq_to_evtchn[irq];
   9.824 -    if ( !VALID_EVTCHN(evtchn) )
   9.825 -        return;
   9.826 -    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
   9.827 -    {
   9.828 -        unmask_evtchn(evtchn);
   9.829 -        pirq_unmask_notify(irq_to_pirq(irq));
   9.830 -    }
   9.831 +	int evtchn = irq_to_evtchn[irq];
   9.832 +
   9.833 +	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
   9.834 +		unmask_evtchn(evtchn);
   9.835 +		pirq_unmask_notify(irq_to_pirq(irq));
   9.836 +	}
   9.837  }
   9.838  
   9.839  static struct hw_interrupt_type pirq_type = {
   9.840 -    "Phys-irq",
   9.841 -    startup_pirq,
   9.842 -    shutdown_pirq,
   9.843 -    enable_pirq,
   9.844 -    disable_pirq,
   9.845 -    ack_pirq,
   9.846 -    end_pirq,
   9.847 -    set_affinity_irq
   9.848 +	"Phys-irq",
   9.849 +	startup_pirq,
   9.850 +	shutdown_pirq,
   9.851 +	enable_pirq,
   9.852 +	disable_pirq,
   9.853 +	ack_pirq,
   9.854 +	end_pirq,
   9.855 +	set_affinity_irq
   9.856  };
   9.857  
   9.858  void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
   9.859  {
   9.860 -    int evtchn = irq_to_evtchn[i];
   9.861 -    shared_info_t *s = HYPERVISOR_shared_info;
   9.862 -    if ( !VALID_EVTCHN(evtchn) )
   9.863 -        return;
   9.864 -    BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
   9.865 -    synch_set_bit(evtchn, &s->evtchn_pending[0]);
   9.866 +	int evtchn = irq_to_evtchn[i];
   9.867 +	shared_info_t *s = HYPERVISOR_shared_info;
   9.868 +	if (!VALID_EVTCHN(evtchn))
   9.869 +		return;
   9.870 +	BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
   9.871 +	synch_set_bit(evtchn, &s->evtchn_pending[0]);
   9.872  }
   9.873  
   9.874 -void irq_suspend(void)
   9.875 +void notify_remote_via_irq(int irq)
   9.876  {
   9.877 -    int pirq, virq, irq, evtchn;
   9.878 -    int cpu = smp_processor_id(); /* XXX */
   9.879 +	int evtchn = irq_to_evtchn[irq];
   9.880  
   9.881 -    /* Unbind VIRQs from event channels. */
   9.882 -    for ( virq = 0; virq < NR_VIRQS; virq++ )
   9.883 -    {
   9.884 -        if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
   9.885 -            continue;
   9.886 -        evtchn = irq_to_evtchn[irq];
   9.887 -
   9.888 -        /* Mark the event channel as unused in our table. */
   9.889 -        evtchn_to_irq[evtchn] = -1;
   9.890 -        irq_to_evtchn[irq]    = -1;
   9.891 -    }
   9.892 -
   9.893 -    /* Check that no PIRQs are still bound. */
   9.894 -    for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
   9.895 -        if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
   9.896 -            panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
   9.897 -                  pirq, evtchn);
   9.898 +	if (VALID_EVTCHN(evtchn))
   9.899 +		notify_remote_via_evtchn(evtchn);
   9.900  }
   9.901  
   9.902  void irq_resume(void)
   9.903  {
   9.904 -    evtchn_op_t op;
   9.905 -    int         virq, irq, evtchn;
   9.906 -    int cpu = smp_processor_id(); /* XXX */
   9.907 -
   9.908 -    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
   9.909 -        mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
   9.910 -
   9.911 -    for ( virq = 0; virq < NR_VIRQS; virq++ )
   9.912 -    {
   9.913 -        if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
   9.914 -            continue;
   9.915 +	evtchn_op_t op;
   9.916 +	int         cpu, pirq, virq, ipi, irq, evtchn;
   9.917  
   9.918 -        /* Get a new binding from Xen. */
   9.919 -        op.cmd              = EVTCHNOP_bind_virq;
   9.920 -        op.u.bind_virq.virq = virq;
   9.921 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   9.922 -            panic("Failed to bind virtual IRQ %d\n", virq);
   9.923 -        evtchn = op.u.bind_virq.port;
   9.924 +	init_evtchn_cpu_bindings();
   9.925 +
   9.926 +	/* New event-channel space is not 'live' yet. */
   9.927 +	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
   9.928 +		mask_evtchn(evtchn);
   9.929 +
   9.930 +	/* Check that no PIRQs are still bound. */
   9.931 +	for (pirq = 0; pirq < NR_PIRQS; pirq++)
   9.932 +		BUG_ON(irq_to_evtchn[pirq_to_irq(pirq)] != -1);
   9.933 +
   9.934 +	/* Secondary CPUs must have no VIRQ or IPI bindings. */
   9.935 +	for (cpu = 1; cpu < NR_CPUS; cpu++) {
   9.936 +		for (virq = 0; virq < NR_VIRQS; virq++)
   9.937 +			BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
   9.938 +		for (ipi = 0; ipi < NR_IPIS; ipi++)
   9.939 +			BUG_ON(per_cpu(ipi_to_evtchn, cpu)[ipi] != -1);
   9.940 +	}
   9.941 +
   9.942 +	/* No IRQ -> event-channel mappings. */
   9.943 +	for (irq = 0; irq < NR_IRQS; irq++)
   9.944 +		irq_to_evtchn[irq] = -1;
   9.945 +
   9.946 +	/* Primary CPU: rebind VIRQs automatically. */
   9.947 +	for (virq = 0; virq < NR_VIRQS; virq++) {
   9.948 +		if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
   9.949 +			continue;
   9.950 +
   9.951 +		/* Get a new binding from Xen. */
   9.952 +		op.cmd              = EVTCHNOP_bind_virq;
   9.953 +		op.u.bind_virq.virq = virq;
   9.954 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   9.955 +		evtchn = op.u.bind_virq.port;
   9.956          
   9.957 -        /* Record the new mapping. */
   9.958 -        bind_evtchn_to_cpu(evtchn, 0);
   9.959 -        evtchn_to_irq[evtchn] = irq;
   9.960 -        irq_to_evtchn[irq]    = evtchn;
   9.961 +		/* Record the new mapping. */
   9.962 +		evtchn_to_irq[evtchn] = irq;
   9.963 +		irq_to_evtchn[irq]    = evtchn;
   9.964  
   9.965 -        /* Ready for use. */
   9.966 -        unmask_evtchn(evtchn);
   9.967 -    }
   9.968 +		/* Ready for use. */
   9.969 +		unmask_evtchn(evtchn);
   9.970 +	}
   9.971 +
   9.972 +	/* Primary CPU: rebind IPIs automatically. */
   9.973 +	for (ipi = 0; ipi < NR_IPIS; ipi++) {
   9.974 +		if ((evtchn = per_cpu(ipi_to_evtchn, 0)[ipi]) == -1)
   9.975 +			continue;
   9.976 +
   9.977 +		irq = evtchn_to_irq[evtchn];
   9.978 +		evtchn_to_irq[evtchn] = -1;
   9.979 +
   9.980 +		/* Get a new binding from Xen. */
   9.981 +		op.cmd = EVTCHNOP_bind_ipi;
   9.982 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   9.983 +		evtchn = op.u.bind_ipi.port;
   9.984 +        
   9.985 +		/* Record the new mapping. */
   9.986 +		evtchn_to_irq[evtchn] = irq;
   9.987 +		irq_to_evtchn[irq]    = evtchn;
   9.988 +
   9.989 +		/* Ready for use. */
   9.990 +		unmask_evtchn(evtchn);
   9.991 +	}
   9.992 +
   9.993 +	/* Remove defunct event-channel -> IRQ mappings. */
   9.994 +	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) {
   9.995 +		if ((evtchn_to_irq[evtchn] != -1) &&
   9.996 +		    (irq_to_evtchn[evtchn_to_irq[evtchn]] == -1))
   9.997 +			evtchn_to_irq[evtchn] = -1;
   9.998 +	}
   9.999  }
  9.1000  
  9.1001  void __init init_IRQ(void)
  9.1002  {
  9.1003 -    int i;
  9.1004 -    int cpu;
  9.1005 -
  9.1006 -    irq_ctx_init(0);
  9.1007 -
  9.1008 -    spin_lock_init(&irq_mapping_update_lock);
  9.1009 -
  9.1010 -#ifdef CONFIG_SMP
  9.1011 -    /* By default all event channels notify CPU#0. */
  9.1012 -    memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
  9.1013 -#endif
  9.1014 -
  9.1015 -    for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
  9.1016 -        /* No VIRQ -> IRQ mappings. */
  9.1017 -        for ( i = 0; i < NR_VIRQS; i++ )
  9.1018 -            per_cpu(virq_to_irq, cpu)[i] = -1;
  9.1019 -    }
  9.1020 -
  9.1021 -    /* No event-channel -> IRQ mappings. */
  9.1022 -    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
  9.1023 -    {
  9.1024 -        evtchn_to_irq[i] = -1;
  9.1025 -        mask_evtchn(i); /* No event channels are 'live' right now. */
  9.1026 -    }
  9.1027 +	int i;
  9.1028 +	int cpu;
  9.1029  
  9.1030 -    /* No IRQ -> event-channel mappings. */
  9.1031 -    for ( i = 0; i < NR_IRQS; i++ )
  9.1032 -        irq_to_evtchn[i] = -1;
  9.1033 -
  9.1034 -    for ( i = 0; i < NR_DYNIRQS; i++ )
  9.1035 -    {
  9.1036 -        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
  9.1037 -        irq_bindcount[dynirq_to_irq(i)] = 0;
  9.1038 +	irq_ctx_init(0);
  9.1039  
  9.1040 -        irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
  9.1041 -        irq_desc[dynirq_to_irq(i)].action  = 0;
  9.1042 -        irq_desc[dynirq_to_irq(i)].depth   = 1;
  9.1043 -        irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
  9.1044 -    }
  9.1045 +	spin_lock_init(&irq_mapping_update_lock);
  9.1046  
  9.1047 -    for ( i = 0; i < NR_PIRQS; i++ )
  9.1048 -    {
  9.1049 -        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
  9.1050 -        irq_bindcount[pirq_to_irq(i)] = 1;
  9.1051 +	init_evtchn_cpu_bindings();
  9.1052  
  9.1053 -        irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
  9.1054 -        irq_desc[pirq_to_irq(i)].action  = 0;
  9.1055 -        irq_desc[pirq_to_irq(i)].depth   = 1;
  9.1056 -        irq_desc[pirq_to_irq(i)].handler = &pirq_type;
  9.1057 -    }
  9.1058 +	/* No VIRQ or IPI bindings. */
  9.1059 +	for (cpu = 0; cpu < NR_CPUS; cpu++) {
  9.1060 +		for (i = 0; i < NR_VIRQS; i++)
  9.1061 +			per_cpu(virq_to_irq, cpu)[i] = -1;
  9.1062 +		for (i = 0; i < NR_IPIS; i++)
  9.1063 +			per_cpu(ipi_to_evtchn, cpu)[i] = -1;
  9.1064 +	}
  9.1065 +
  9.1066 +	/* No event-channel -> IRQ mappings. */
  9.1067 +	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
  9.1068 +		evtchn_to_irq[i] = -1;
  9.1069 +		mask_evtchn(i); /* No event channels are 'live' right now. */
  9.1070 +	}
  9.1071 +
  9.1072 +	/* No IRQ -> event-channel mappings. */
  9.1073 +	for (i = 0; i < NR_IRQS; i++)
  9.1074 +		irq_to_evtchn[i] = -1;
  9.1075 +
  9.1076 +	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
  9.1077 +	for (i = 0; i < NR_DYNIRQS; i++) {
  9.1078 +		irq_bindcount[dynirq_to_irq(i)] = 0;
  9.1079 +
  9.1080 +		irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
  9.1081 +		irq_desc[dynirq_to_irq(i)].action  = 0;
  9.1082 +		irq_desc[dynirq_to_irq(i)].depth   = 1;
  9.1083 +		irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
  9.1084 +	}
  9.1085 +
  9.1086 +	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
  9.1087 +	for (i = 0; i < NR_PIRQS; i++)
  9.1088 +	{
  9.1089 +		irq_bindcount[pirq_to_irq(i)] = 1;
  9.1090 +
  9.1091 +		irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
  9.1092 +		irq_desc[pirq_to_irq(i)].action  = 0;
  9.1093 +		irq_desc[pirq_to_irq(i)].depth   = 1;
  9.1094 +		irq_desc[pirq_to_irq(i)].handler = &pirq_type;
  9.1095 +	}
  9.1096  }
  9.1097 +
  9.1098 +/*
  9.1099 + * Local variables:
  9.1100 + *  c-file-style: "linux"
  9.1101 + *  indent-tabs-mode: t
  9.1102 + *  c-indent-level: 8
  9.1103 + *  c-basic-offset: 8
  9.1104 + *  tab-width: 8
  9.1105 + * End:
  9.1106 + */
    10.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/fixup.c	Thu Sep 29 16:22:02 2005 -0600
    10.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/fixup.c	Thu Sep 29 17:28:44 2005 -0600
    10.3 @@ -37,51 +37,57 @@
    10.4  
    10.5  #define DP(_f, _args...) printk(KERN_ALERT "  " _f "\n" , ## _args )
    10.6  
    10.7 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
    10.8 -#define __LINKAGE fastcall
    10.9 -#else
   10.10 -#define __LINKAGE asmlinkage
   10.11 -#endif
   10.12 -
   10.13 -__LINKAGE void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
   10.14 +fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
   10.15  {
   10.16 -    static unsigned long printed = 0;
   10.17 -    char info[100];
   10.18 -    int i;
   10.19 -
   10.20 -    if ( !test_and_set_bit(0, &printed) )
   10.21 -    {
   10.22 -        HYPERVISOR_vm_assist(VMASST_CMD_disable,
   10.23 -			     VMASST_TYPE_4gb_segments_notify);
   10.24 -
   10.25 -        sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
   10.26 +	static unsigned long printed = 0;
   10.27 +	char info[100];
   10.28 +	int i;
   10.29  
   10.30 -        DP("");
   10.31 -        DP("***************************************************************");
   10.32 -        DP("***************************************************************");
   10.33 -        DP("** WARNING: Currently emulating unsupported memory accesses  **");
   10.34 -        DP("**          in /lib/tls libraries. The emulation is very     **");
   10.35 -        DP("**          slow. To ensure full performance you should      **");
   10.36 -        DP("**          execute the following as root:                   **");
   10.37 -        DP("**          mv /lib/tls /lib/tls.disabled                    **");
   10.38 -        DP("** Offending process: %-38.38s **", info);
   10.39 -        DP("***************************************************************");
   10.40 -        DP("***************************************************************");
   10.41 -        DP("");
   10.42 +	if (test_and_set_bit(0, &printed))
   10.43 +		return;
   10.44  
   10.45 -        for ( i = 5; i > 0; i-- )
   10.46 -        {
   10.47 -            printk("Pausing... %d", i);
   10.48 -            mdelay(1000);
   10.49 -            printk("\b\b\b\b\b\b\b\b\b\b\b\b");
   10.50 -        }
   10.51 -        printk("Continuing...\n\n");
   10.52 -    }
   10.53 +	HYPERVISOR_vm_assist(
   10.54 +		VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
   10.55 +
   10.56 +	sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
   10.57 +
   10.58 +
   10.59 +	DP("");
   10.60 +	DP("***************************************************************");
   10.61 +	DP("***************************************************************");
   10.62 +	DP("** WARNING: Currently emulating unsupported memory accesses  **");
   10.63 +	DP("**          in /lib/tls libraries. The emulation is very     **");
   10.64 +	DP("**          slow. To ensure full performance you should      **");
   10.65 +	DP("**          execute the following as root:                   **");
   10.66 +	DP("**          mv /lib/tls /lib/tls.disabled                    **");
   10.67 +	DP("** Offending process: %-38.38s **", info);
   10.68 +	DP("***************************************************************");
   10.69 +	DP("***************************************************************");
   10.70 +	DP("");
   10.71 +
   10.72 +	for (i = 5; i > 0; i--) {
   10.73 +		printk("Pausing... %d", i);
   10.74 +		mdelay(1000);
   10.75 +		printk("\b\b\b\b\b\b\b\b\b\b\b\b");
   10.76 +	}
   10.77 +
   10.78 +	printk("Continuing...\n\n");
   10.79  }
   10.80  
   10.81  static int __init fixup_init(void)
   10.82  {
   10.83 -    HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
   10.84 -    return 0;
   10.85 +	HYPERVISOR_vm_assist(
   10.86 +		VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
   10.87 +	return 0;
   10.88  }
   10.89  __initcall(fixup_init);
   10.90 +
   10.91 +/*
   10.92 + * Local variables:
   10.93 + *  c-file-style: "linux"
   10.94 + *  indent-tabs-mode: t
   10.95 + *  c-indent-level: 8
   10.96 + *  c-basic-offset: 8
   10.97 + *  tab-width: 8
   10.98 + * End:
   10.99 + */
    11.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c	Thu Sep 29 16:22:02 2005 -0600
    11.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c	Thu Sep 29 17:28:44 2005 -0600
    11.3 @@ -1,13 +1,10 @@
    11.4  /******************************************************************************
    11.5   * gnttab.c
    11.6   * 
    11.7 - * Two sets of functionality:
    11.8 - * 1. Granting foreign access to our memory reservation.
    11.9 - * 2. Accessing others' memory reservations via grant references.
   11.10 - * (i.e., mechanisms for both sender and recipient of grant references)
   11.11 + * Granting foreign access to our memory reservation.
   11.12   * 
   11.13   * Copyright (c) 2005, Christopher Clark
   11.14 - * Copyright (c) 2004, K A Fraser
   11.15 + * Copyright (c) 2004-2005, K A Fraser
   11.16   */
   11.17  
   11.18  #include <linux/config.h>
   11.19 @@ -23,15 +20,15 @@
   11.20  #include <asm/synch_bitops.h>
   11.21  
   11.22  #if 1
   11.23 -#define ASSERT(_p) \
   11.24 -    if ( !(_p) ) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
   11.25 -    #_p , __LINE__, __FILE__); *(int*)0=0; }
   11.26 +#define ASSERT(_p)							      \
   11.27 +	if ( !(_p) ) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
   11.28 +	#_p , __LINE__, __FILE__); *(int*)0=0; }
   11.29  #else
   11.30  #define ASSERT(_p) ((void)0)
   11.31  #endif
   11.32  
   11.33 -#define WPRINTK(fmt, args...) \
   11.34 -    printk(KERN_WARNING "xen_grant: " fmt, ##args)
   11.35 +#define WPRINTK(fmt, args...)				\
   11.36 +	printk(KERN_WARNING "xen_grant: " fmt, ##args)
   11.37  
   11.38  
   11.39  EXPORT_SYMBOL(gnttab_grant_foreign_access);
   11.40 @@ -49,11 +46,14 @@ EXPORT_SYMBOL(gnttab_release_grant_refer
   11.41  EXPORT_SYMBOL(gnttab_grant_foreign_access_ref);
   11.42  EXPORT_SYMBOL(gnttab_grant_foreign_transfer_ref);
   11.43  
   11.44 +/* External tools reserve first few grant table entries. */
   11.45 +#define NR_RESERVED_ENTRIES 8
   11.46 +
   11.47  #define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t))
   11.48  #define GNTTAB_LIST_END (NR_GRANT_ENTRIES + 1)
   11.49  
   11.50  static grant_ref_t gnttab_list[NR_GRANT_ENTRIES];
   11.51 -static int gnttab_free_count = NR_GRANT_ENTRIES;
   11.52 +static int gnttab_free_count;
   11.53  static grant_ref_t gnttab_free_head;
   11.54  static spinlock_t gnttab_list_lock = SPIN_LOCK_UNLOCKED;
   11.55  
   11.56 @@ -64,22 +64,22 @@ static struct gnttab_free_callback *gntt
   11.57  static int
   11.58  get_free_entries(int count)
   11.59  {
   11.60 -    unsigned long flags;
   11.61 -    int ref;
   11.62 -    grant_ref_t head;
   11.63 -    spin_lock_irqsave(&gnttab_list_lock, flags);
   11.64 -    if (gnttab_free_count < count) {
   11.65 +	unsigned long flags;
   11.66 +	int ref;
   11.67 +	grant_ref_t head;
   11.68 +	spin_lock_irqsave(&gnttab_list_lock, flags);
   11.69 +	if (gnttab_free_count < count) {
   11.70 +		spin_unlock_irqrestore(&gnttab_list_lock, flags);
   11.71 +		return -1;
   11.72 +	}
   11.73 +	ref = head = gnttab_free_head;
   11.74 +	gnttab_free_count -= count;
   11.75 +	while (count-- > 1)
   11.76 +		head = gnttab_list[head];
   11.77 +	gnttab_free_head = gnttab_list[head];
   11.78 +	gnttab_list[head] = GNTTAB_LIST_END;
   11.79  	spin_unlock_irqrestore(&gnttab_list_lock, flags);
   11.80 -	return -1;
   11.81 -    }
   11.82 -    ref = head = gnttab_free_head;
   11.83 -    gnttab_free_count -= count;
   11.84 -    while (count-- > 1)
   11.85 -	head = gnttab_list[head];
   11.86 -    gnttab_free_head = gnttab_list[head];
   11.87 -    gnttab_list[head] = GNTTAB_LIST_END;
   11.88 -    spin_unlock_irqrestore(&gnttab_list_lock, flags);
   11.89 -    return ref;
   11.90 +	return ref;
   11.91  }
   11.92  
   11.93  #define get_free_entry() get_free_entries(1)
   11.94 @@ -87,38 +87,41 @@ get_free_entries(int count)
   11.95  static void
   11.96  do_free_callbacks(void)
   11.97  {
   11.98 -    struct gnttab_free_callback *callback = gnttab_free_callback_list, *next;
   11.99 -    gnttab_free_callback_list = NULL;
  11.100 -    while (callback) {
  11.101 -	next = callback->next;
  11.102 -	if (gnttab_free_count >= callback->count) {
  11.103 -	    callback->next = NULL;
  11.104 -	    callback->fn(callback->arg);
  11.105 -	} else {
  11.106 -	    callback->next = gnttab_free_callback_list;
  11.107 -	    gnttab_free_callback_list = callback;
  11.108 +	struct gnttab_free_callback *callback, *next;
  11.109 +
  11.110 +	callback = gnttab_free_callback_list;
  11.111 +	gnttab_free_callback_list = NULL;
  11.112 +
  11.113 +	while (callback != NULL) {
  11.114 +		next = callback->next;
  11.115 +		if (gnttab_free_count >= callback->count) {
  11.116 +			callback->next = NULL;
  11.117 +			callback->fn(callback->arg);
  11.118 +		} else {
  11.119 +			callback->next = gnttab_free_callback_list;
  11.120 +			gnttab_free_callback_list = callback;
  11.121 +		}
  11.122 +		callback = next;
  11.123  	}
  11.124 -	callback = next;
  11.125 -    }
  11.126  }
  11.127  
  11.128  static inline void
  11.129  check_free_callbacks(void)
  11.130  {
  11.131 -    if (unlikely(gnttab_free_callback_list))
  11.132 -	do_free_callbacks();
  11.133 +	if (unlikely(gnttab_free_callback_list))
  11.134 +		do_free_callbacks();
  11.135  }
  11.136  
  11.137  static void
  11.138  put_free_entry(grant_ref_t ref)
  11.139  {
  11.140 -    unsigned long flags;
  11.141 -    spin_lock_irqsave(&gnttab_list_lock, flags);
  11.142 -    gnttab_list[ref] = gnttab_free_head;
  11.143 -    gnttab_free_head = ref;
  11.144 -    gnttab_free_count++;
  11.145 -    check_free_callbacks();
  11.146 -    spin_unlock_irqrestore(&gnttab_list_lock, flags);
  11.147 +	unsigned long flags;
  11.148 +	spin_lock_irqsave(&gnttab_list_lock, flags);
  11.149 +	gnttab_list[ref] = gnttab_free_head;
  11.150 +	gnttab_free_head = ref;
  11.151 +	gnttab_free_count++;
  11.152 +	check_free_callbacks();
  11.153 +	spin_unlock_irqrestore(&gnttab_list_lock, flags);
  11.154  }
  11.155  
  11.156  /*
  11.157 @@ -128,187 +131,189 @@ put_free_entry(grant_ref_t ref)
  11.158  int
  11.159  gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly)
  11.160  {
  11.161 -    int ref;
  11.162 +	int ref;
  11.163      
  11.164 -    if ( unlikely((ref = get_free_entry()) == -1) )
  11.165 -        return -ENOSPC;
  11.166 +	if (unlikely((ref = get_free_entry()) == -1))
  11.167 +		return -ENOSPC;
  11.168  
  11.169 -    shared[ref].frame = frame;
  11.170 -    shared[ref].domid = domid;
  11.171 -    wmb();
  11.172 -    shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
  11.173 +	shared[ref].frame = frame;
  11.174 +	shared[ref].domid = domid;
  11.175 +	wmb();
  11.176 +	shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
  11.177  
  11.178 -    return ref;
  11.179 +	return ref;
  11.180  }
  11.181  
  11.182  void
  11.183  gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
  11.184  				unsigned long frame, int readonly)
  11.185  {
  11.186 -    shared[ref].frame = frame;
  11.187 -    shared[ref].domid = domid;
  11.188 -    wmb();
  11.189 -    shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
  11.190 +	shared[ref].frame = frame;
  11.191 +	shared[ref].domid = domid;
  11.192 +	wmb();
  11.193 +	shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
  11.194  }
  11.195  
  11.196  
  11.197  int
  11.198  gnttab_query_foreign_access(grant_ref_t ref)
  11.199  {
  11.200 -    u16 nflags;
  11.201 +	u16 nflags;
  11.202  
  11.203 -    nflags = shared[ref].flags;
  11.204 +	nflags = shared[ref].flags;
  11.205  
  11.206 -    return ( nflags & (GTF_reading|GTF_writing) );
  11.207 +	return (nflags & (GTF_reading|GTF_writing));
  11.208  }
  11.209  
  11.210  void
  11.211  gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
  11.212  {
  11.213 -    u16 flags, nflags;
  11.214 +	u16 flags, nflags;
  11.215  
  11.216 -    nflags = shared[ref].flags;
  11.217 -    do {
  11.218 -        if ( (flags = nflags) & (GTF_reading|GTF_writing) )
  11.219 -            printk(KERN_ALERT "WARNING: g.e. still in use!\n");
  11.220 -    }
  11.221 -    while ( (nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) != flags );
  11.222 +	nflags = shared[ref].flags;
  11.223 +	do {
  11.224 +		if ( (flags = nflags) & (GTF_reading|GTF_writing) )
  11.225 +			printk(KERN_ALERT "WARNING: g.e. still in use!\n");
  11.226 +	}
  11.227 +	while ((nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) !=
  11.228 +	       flags);
  11.229  }
  11.230  
  11.231  void
  11.232  gnttab_end_foreign_access(grant_ref_t ref, int readonly)
  11.233  {
  11.234 -    gnttab_end_foreign_access_ref(ref, readonly);
  11.235 -    put_free_entry(ref);
  11.236 +	gnttab_end_foreign_access_ref(ref, readonly);
  11.237 +	put_free_entry(ref);
  11.238  }
  11.239  
  11.240  int
  11.241  gnttab_grant_foreign_transfer(domid_t domid)
  11.242  {
  11.243 -    int ref;
  11.244 -
  11.245 -    if ( unlikely((ref = get_free_entry()) == -1) )
  11.246 -        return -ENOSPC;
  11.247 +	int ref;
  11.248  
  11.249 -    shared[ref].frame = 0;
  11.250 -    shared[ref].domid = domid;
  11.251 -    wmb();
  11.252 -    shared[ref].flags = GTF_accept_transfer;
  11.253 +	if (unlikely((ref = get_free_entry()) == -1))
  11.254 +		return -ENOSPC;
  11.255  
  11.256 -    return ref;
  11.257 +	shared[ref].frame = 0;
  11.258 +	shared[ref].domid = domid;
  11.259 +	wmb();
  11.260 +	shared[ref].flags = GTF_accept_transfer;
  11.261 +
  11.262 +	return ref;
  11.263  }
  11.264  
  11.265  void
  11.266  gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid)
  11.267  {
  11.268 -    shared[ref].frame = 0;
  11.269 -    shared[ref].domid = domid;
  11.270 -    wmb();
  11.271 -    shared[ref].flags = GTF_accept_transfer;
  11.272 +	shared[ref].frame = 0;
  11.273 +	shared[ref].domid = domid;
  11.274 +	wmb();
  11.275 +	shared[ref].flags = GTF_accept_transfer;
  11.276  }
  11.277  
  11.278  unsigned long
  11.279  gnttab_end_foreign_transfer_ref(grant_ref_t ref)
  11.280  {
  11.281 -    unsigned long frame = 0;
  11.282 -    u16           flags;
  11.283 -
  11.284 -    flags = shared[ref].flags;
  11.285 +	unsigned long frame = 0;
  11.286 +	u16           flags;
  11.287  
  11.288 -    /*
  11.289 -     * If a transfer is committed then wait for the frame address to appear.
  11.290 -     * Otherwise invalidate the grant entry against future use.
  11.291 -     */
  11.292 -    if ( likely(flags != GTF_accept_transfer) ||
  11.293 -         (synch_cmpxchg(&shared[ref].flags, flags, 0) != GTF_accept_transfer) )
  11.294 -        while ( unlikely((frame = shared[ref].frame) == 0) )
  11.295 -            cpu_relax();
  11.296 +	flags = shared[ref].flags;
  11.297  
  11.298 -    return frame;
  11.299 +	/*
  11.300 +	 * If a transfer is committed then wait for the frame address to
  11.301 +	 * appear. Otherwise invalidate the grant entry against future use.
  11.302 +	 */
  11.303 +	if (likely(flags != GTF_accept_transfer) ||
  11.304 +	    (synch_cmpxchg(&shared[ref].flags, flags, 0) !=
  11.305 +	     GTF_accept_transfer))
  11.306 +		while (unlikely((frame = shared[ref].frame) == 0))
  11.307 +			cpu_relax();
  11.308 +
  11.309 +	return frame;
  11.310  }
  11.311  
  11.312  unsigned long
  11.313  gnttab_end_foreign_transfer(grant_ref_t ref)
  11.314  {
  11.315 -    unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
  11.316 -    put_free_entry(ref);
  11.317 -    return frame;
  11.318 +	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
  11.319 +	put_free_entry(ref);
  11.320 +	return frame;
  11.321  }
  11.322  
  11.323  void
  11.324  gnttab_free_grant_reference(grant_ref_t ref)
  11.325  {
  11.326  
  11.327 -    put_free_entry(ref);
  11.328 +	put_free_entry(ref);
  11.329  }
  11.330  
  11.331  void
  11.332  gnttab_free_grant_references(grant_ref_t head)
  11.333  {
  11.334 -    grant_ref_t ref;
  11.335 -    unsigned long flags;
  11.336 -    int count = 1;
  11.337 -    if (head == GNTTAB_LIST_END)
  11.338 -	return;
  11.339 -    spin_lock_irqsave(&gnttab_list_lock, flags);
  11.340 -    ref = head;
  11.341 -    while (gnttab_list[ref] != GNTTAB_LIST_END) {
  11.342 -	ref = gnttab_list[ref];
  11.343 -	count++;
  11.344 -    }
  11.345 -    gnttab_list[ref] = gnttab_free_head;
  11.346 -    gnttab_free_head = head;
  11.347 -    gnttab_free_count += count;
  11.348 -    check_free_callbacks();
  11.349 -    spin_unlock_irqrestore(&gnttab_list_lock, flags);
  11.350 +	grant_ref_t ref;
  11.351 +	unsigned long flags;
  11.352 +	int count = 1;
  11.353 +	if (head == GNTTAB_LIST_END)
  11.354 +		return;
  11.355 +	spin_lock_irqsave(&gnttab_list_lock, flags);
  11.356 +	ref = head;
  11.357 +	while (gnttab_list[ref] != GNTTAB_LIST_END) {
  11.358 +		ref = gnttab_list[ref];
  11.359 +		count++;
  11.360 +	}
  11.361 +	gnttab_list[ref] = gnttab_free_head;
  11.362 +	gnttab_free_head = head;
  11.363 +	gnttab_free_count += count;
  11.364 +	check_free_callbacks();
  11.365 +	spin_unlock_irqrestore(&gnttab_list_lock, flags);
  11.366  }
  11.367  
  11.368  int
  11.369  gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
  11.370  {
  11.371 -    int h = get_free_entries(count);
  11.372 +	int h = get_free_entries(count);
  11.373  
  11.374 -    if (h == -1)
  11.375 -	return -ENOSPC;
  11.376 +	if (h == -1)
  11.377 +		return -ENOSPC;
  11.378  
  11.379 -    *head = h;
  11.380 +	*head = h;
  11.381  
  11.382 -    return 0;
  11.383 +	return 0;
  11.384  }
  11.385  
  11.386  int
  11.387  gnttab_claim_grant_reference(grant_ref_t *private_head)
  11.388  {
  11.389 -    grant_ref_t g = *private_head;
  11.390 -    if (unlikely(g == GNTTAB_LIST_END))
  11.391 -        return -ENOSPC;
  11.392 -    *private_head = gnttab_list[g];
  11.393 -    return g;
  11.394 +	grant_ref_t g = *private_head;
  11.395 +	if (unlikely(g == GNTTAB_LIST_END))
  11.396 +		return -ENOSPC;
  11.397 +	*private_head = gnttab_list[g];
  11.398 +	return g;
  11.399  }
  11.400  
  11.401  void
  11.402  gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t  release)
  11.403  {
  11.404 -    gnttab_list[release] = *private_head;
  11.405 -    *private_head = release;
  11.406 +	gnttab_list[release] = *private_head;
  11.407 +	*private_head = release;
  11.408  }
  11.409  
  11.410  void
  11.411  gnttab_request_free_callback(struct gnttab_free_callback *callback,
  11.412  			     void (*fn)(void *), void *arg, u16 count)
  11.413  {
  11.414 -    unsigned long flags;
  11.415 -    spin_lock_irqsave(&gnttab_list_lock, flags);
  11.416 -    if (callback->next)
  11.417 -	goto out;
  11.418 -    callback->fn = fn;
  11.419 -    callback->arg = arg;
  11.420 -    callback->count = count;
  11.421 -    callback->next = gnttab_free_callback_list;
  11.422 -    gnttab_free_callback_list = callback;
  11.423 -    check_free_callbacks();
  11.424 +	unsigned long flags;
  11.425 +	spin_lock_irqsave(&gnttab_list_lock, flags);
  11.426 +	if (callback->next)
  11.427 +		goto out;
  11.428 +	callback->fn = fn;
  11.429 +	callback->arg = arg;
  11.430 +	callback->count = count;
  11.431 +	callback->next = gnttab_free_callback_list;
  11.432 +	gnttab_free_callback_list = callback;
  11.433 +	check_free_callbacks();
  11.434   out:
  11.435 -    spin_unlock_irqrestore(&gnttab_list_lock, flags);
  11.436 +	spin_unlock_irqrestore(&gnttab_list_lock, flags);
  11.437  }
  11.438  
  11.439  /*
  11.440 @@ -323,79 +328,83 @@ static int
  11.441  grant_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
  11.442  	    unsigned long data)
  11.443  {
  11.444 -    int                     ret;
  11.445 -    privcmd_hypercall_t     hypercall;
  11.446 -
  11.447 -    /* XXX Need safety checks here if using for anything other
  11.448 -     *     than debugging */
  11.449 -    return -ENOSYS;
  11.450 -
  11.451 -    if ( cmd != IOCTL_PRIVCMD_HYPERCALL )
  11.452 -        return -ENOSYS;
  11.453 -
  11.454 -    if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
  11.455 -        return -EFAULT;
  11.456 +	int                     ret;
  11.457 +	privcmd_hypercall_t     hypercall;
  11.458  
  11.459 -    if ( hypercall.op != __HYPERVISOR_grant_table_op )
  11.460 -        return -ENOSYS;
  11.461 +	/*
  11.462 +	 * XXX Need safety checks here if using for anything other
  11.463 +	 *     than debugging.
  11.464 +	 */
  11.465 +	return -ENOSYS;
  11.466  
  11.467 -    /* hypercall-invoking asm taken from privcmd.c */
  11.468 -    __asm__ __volatile__ (
  11.469 -        "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
  11.470 -        "movl  4(%%eax),%%ebx ;"
  11.471 -        "movl  8(%%eax),%%ecx ;"
  11.472 -        "movl 12(%%eax),%%edx ;"
  11.473 -        "movl 16(%%eax),%%esi ;"
  11.474 -        "movl 20(%%eax),%%edi ;"
  11.475 -        "movl   (%%eax),%%eax ;"
  11.476 -        TRAP_INSTR "; "
  11.477 -        "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
  11.478 -        : "=a" (ret) : "0" (&hypercall) : "memory" );
  11.479 +	if ( cmd != IOCTL_PRIVCMD_HYPERCALL )
  11.480 +		return -ENOSYS;
  11.481  
  11.482 -    return ret;
  11.483 +	if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
  11.484 +		return -EFAULT;
  11.485 +
  11.486 +	if ( hypercall.op != __HYPERVISOR_grant_table_op )
  11.487 +		return -ENOSYS;
  11.488 +
  11.489 +	/* hypercall-invoking asm taken from privcmd.c */
  11.490 +	__asm__ __volatile__ (
  11.491 +		"pushl %%ebx; pushl %%ecx; pushl %%edx; "
  11.492 +		"pushl %%esi; pushl %%edi; "
  11.493 +		"movl  4(%%eax),%%ebx ;"
  11.494 +		"movl  8(%%eax),%%ecx ;"
  11.495 +		"movl 12(%%eax),%%edx ;"
  11.496 +		"movl 16(%%eax),%%esi ;"
  11.497 +		"movl 20(%%eax),%%edi ;"
  11.498 +		"movl   (%%eax),%%eax ;"
  11.499 +		TRAP_INSTR "; "
  11.500 +		"popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
  11.501 +		: "=a" (ret) : "0" (&hypercall) : "memory" );
  11.502 +
  11.503 +	return ret;
  11.504  }
  11.505  
  11.506  static struct file_operations grant_file_ops = {
  11.507 -    ioctl:  grant_ioctl,
  11.508 +	ioctl:  grant_ioctl,
  11.509  };
  11.510  
  11.511  static int
  11.512  grant_read(char *page, char **start, off_t off, int count, int *eof,
  11.513  	   void *data)
  11.514  {
  11.515 -    int             len;
  11.516 -    unsigned int    i;
  11.517 -    grant_entry_t  *gt;
  11.518 -
  11.519 -    gt = (grant_entry_t *)shared;
  11.520 -    len = 0;
  11.521 +	int             len;
  11.522 +	unsigned int    i;
  11.523 +	grant_entry_t  *gt;
  11.524  
  11.525 -    for ( i = 0; i < NR_GRANT_ENTRIES; i++ )
  11.526 -        /* TODO: safety catch here until this can handle >PAGE_SIZE output */
  11.527 -        if (len > (PAGE_SIZE - 200))
  11.528 -        {
  11.529 -            len += sprintf( page + len, "Truncated.\n");
  11.530 -            break;
  11.531 -        }
  11.532 +	gt = (grant_entry_t *)shared;
  11.533 +	len = 0;
  11.534  
  11.535 -        if ( gt[i].flags )
  11.536 -            len += sprintf( page + len,
  11.537 -                    "Grant: ref (0x%x) flags (0x%hx) dom (0x%hx) frame (0x%x)\n", 
  11.538 -                    i,
  11.539 -                    gt[i].flags,
  11.540 -                    gt[i].domid,
  11.541 -                    gt[i].frame );
  11.542 +	for (i = 0; i < NR_GRANT_ENTRIES; i++) {
  11.543 +		if (len > (PAGE_SIZE - 200)) {
  11.544 +			len += sprintf( page + len, "Truncated.\n");
  11.545 +			break;
  11.546 +		}
  11.547 +	}
  11.548  
  11.549 -    *eof = 1;
  11.550 -    return len;
  11.551 +	if (gt[i].flags) {
  11.552 +		len += sprintf(page + len,
  11.553 +			       "Grant: ref (0x%x) flags (0x%hx) "
  11.554 +			       "dom (0x%hx) frame (0x%x)\n", 
  11.555 +			       i,
  11.556 +			       gt[i].flags,
  11.557 +			       gt[i].domid,
  11.558 +			       gt[i].frame );
  11.559 +	}
  11.560 +
  11.561 +	*eof = 1;
  11.562 +	return len;
  11.563  }
  11.564  
  11.565  static int
  11.566  grant_write(struct file *file, const char __user *buffer, unsigned long count,
  11.567  	    void *data)
  11.568  {
  11.569 -    /* TODO: implement this */
  11.570 -    return -ENOSYS;
  11.571 +	/* TODO: implement this */
  11.572 +	return -ENOSYS;
  11.573  }
  11.574  
  11.575  #endif /* CONFIG_PROC_FS */
  11.576 @@ -403,70 +412,81 @@ grant_write(struct file *file, const cha
  11.577  int
  11.578  gnttab_resume(void)
  11.579  {
  11.580 -    gnttab_setup_table_t setup;
  11.581 -    unsigned long        frames[NR_GRANT_FRAMES];
  11.582 -    int                  i;
  11.583 +	gnttab_setup_table_t setup;
  11.584 +	unsigned long        frames[NR_GRANT_FRAMES];
  11.585 +	int                  i;
  11.586  
  11.587 -    setup.dom        = DOMID_SELF;
  11.588 -    setup.nr_frames  = NR_GRANT_FRAMES;
  11.589 -    setup.frame_list = frames;
  11.590 +	setup.dom        = DOMID_SELF;
  11.591 +	setup.nr_frames  = NR_GRANT_FRAMES;
  11.592 +	setup.frame_list = frames;
  11.593  
  11.594 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0);
  11.595 -    BUG_ON(setup.status != 0);
  11.596 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
  11.597 +	BUG_ON(setup.status != 0);
  11.598  
  11.599 -    for ( i = 0; i < NR_GRANT_FRAMES; i++ )
  11.600 -        set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
  11.601 +	for (i = 0; i < NR_GRANT_FRAMES; i++)
  11.602 +		set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
  11.603  
  11.604 -    return 0;
  11.605 +	return 0;
  11.606  }
  11.607  
  11.608  int
  11.609  gnttab_suspend(void)
  11.610  {
  11.611 -    int i;
  11.612 +	int i;
  11.613  
  11.614 -    for ( i = 0; i < NR_GRANT_FRAMES; i++ )
  11.615 -	clear_fixmap(FIX_GNTTAB_END - i);
  11.616 +	for (i = 0; i < NR_GRANT_FRAMES; i++)
  11.617 +		clear_fixmap(FIX_GNTTAB_END - i);
  11.618  
  11.619 -    return 0;
  11.620 +	return 0;
  11.621  }
  11.622  
  11.623  static int __init
  11.624  gnttab_init(void)
  11.625  {
  11.626 -    int i;
  11.627 -
  11.628 -    if (xen_init() < 0)
  11.629 -        return -ENODEV;
  11.630 +	int i;
  11.631  
  11.632 -    BUG_ON(gnttab_resume());
  11.633 +	if (xen_init() < 0)
  11.634 +		return -ENODEV;
  11.635  
  11.636 -    shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
  11.637 +	BUG_ON(gnttab_resume());
  11.638  
  11.639 -    for ( i = 0; i < NR_GRANT_ENTRIES; i++ )
  11.640 -        gnttab_list[i] = i + 1;
  11.641 -    
  11.642 +	shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
  11.643 +
  11.644 +	for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
  11.645 +		gnttab_list[i] = i + 1;
  11.646 +	gnttab_free_count = NR_GRANT_ENTRIES - NR_RESERVED_ENTRIES;
  11.647 +	gnttab_free_head  = NR_RESERVED_ENTRIES;
  11.648 +
  11.649  #ifdef CONFIG_PROC_FS
  11.650 -    /*
  11.651 -     *  /proc/xen/grant : used by libxc to access grant tables
  11.652 -     */
  11.653 -    if ( (grant_pde = create_xen_proc_entry("grant", 0600)) == NULL )
  11.654 -    {
  11.655 -        WPRINTK("Unable to create grant xen proc entry\n");
  11.656 -        return -1;
  11.657 -    }
  11.658 +	/*
  11.659 +	 *  /proc/xen/grant : used by libxc to access grant tables
  11.660 +	 */
  11.661 +	if ((grant_pde = create_xen_proc_entry("grant", 0600)) == NULL) {
  11.662 +		WPRINTK("Unable to create grant xen proc entry\n");
  11.663 +		return -1;
  11.664 +	}
  11.665  
  11.666 -    grant_file_ops.read   = grant_pde->proc_fops->read;
  11.667 -    grant_file_ops.write  = grant_pde->proc_fops->write;
  11.668 +	grant_file_ops.read   = grant_pde->proc_fops->read;
  11.669 +	grant_file_ops.write  = grant_pde->proc_fops->write;
  11.670  
  11.671 -    grant_pde->proc_fops  = &grant_file_ops;
  11.672 +	grant_pde->proc_fops  = &grant_file_ops;
  11.673  
  11.674 -    grant_pde->read_proc  = &grant_read;
  11.675 -    grant_pde->write_proc = &grant_write;
  11.676 +	grant_pde->read_proc  = &grant_read;
  11.677 +	grant_pde->write_proc = &grant_write;
  11.678  #endif
  11.679  
  11.680 -    printk("Grant table initialized\n");
  11.681 -    return 0;
  11.682 +	printk("Grant table initialized\n");
  11.683 +	return 0;
  11.684  }
  11.685  
  11.686  __initcall(gnttab_init);
  11.687 +
  11.688 +/*
  11.689 + * Local variables:
  11.690 + *  c-file-style: "linux"
  11.691 + *  indent-tabs-mode: t
  11.692 + *  c-indent-level: 8
  11.693 + *  c-basic-offset: 8
  11.694 + *  tab-width: 8
  11.695 + * End:
  11.696 + */
    12.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Thu Sep 29 16:22:02 2005 -0600
    12.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Thu Sep 29 17:28:44 2005 -0600
    12.3 @@ -12,7 +12,6 @@
    12.4  #include <asm-xen/evtchn.h>
    12.5  #include <asm/hypervisor.h>
    12.6  #include <asm-xen/xen-public/dom0_ops.h>
    12.7 -#include <asm-xen/queues.h>
    12.8  #include <asm-xen/xenbus.h>
    12.9  #include <linux/cpu.h>
   12.10  #include <linux/kthread.h>
   12.11 @@ -43,12 +42,10 @@ void machine_power_off(void)
   12.12  	HYPERVISOR_shutdown();
   12.13  }
   12.14  
   12.15 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
   12.16  int reboot_thru_bios = 0;	/* for dmi_scan.c */
   12.17  EXPORT_SYMBOL(machine_restart);
   12.18  EXPORT_SYMBOL(machine_halt);
   12.19  EXPORT_SYMBOL(machine_power_off);
   12.20 -#endif
   12.21  
   12.22  
   12.23  /******************************************************************************
   12.24 @@ -66,227 +63,194 @@ static int shutting_down = SHUTDOWN_INVA
   12.25  
   12.26  static int __do_suspend(void *ignore)
   12.27  {
   12.28 -    int i, j, k, fpp;
   12.29 -
   12.30 -#ifdef CONFIG_XEN_USB_FRONTEND
   12.31 -    extern void usbif_resume();
   12.32 -#else
   12.33 -#define usbif_resume() do{}while(0)
   12.34 -#endif
   12.35 -
   12.36 -    extern int gnttab_suspend(void);
   12.37 -    extern int gnttab_resume(void);
   12.38 -
   12.39 -    extern void time_suspend(void);
   12.40 -    extern void time_resume(void);
   12.41 -    extern unsigned long max_pfn;
   12.42 -    extern unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[];
   12.43 -
   12.44 -#ifdef CONFIG_SMP
   12.45 -    extern void smp_suspend(void);
   12.46 -    extern void smp_resume(void);
   12.47 -
   12.48 -    static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
   12.49 -    cpumask_t prev_online_cpus, prev_present_cpus;
   12.50 -
   12.51 -    void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
   12.52 -    int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
   12.53 -#endif
   12.54 -
   12.55 -    extern void xencons_suspend(void);
   12.56 -    extern void xencons_resume(void);
   12.57 -
   12.58 -    int err = 0;
   12.59 -
   12.60 -    BUG_ON(smp_processor_id() != 0);
   12.61 -    BUG_ON(in_interrupt());
   12.62 +	int i, j, k, fpp;
   12.63  
   12.64 -#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
   12.65 -    if (num_online_cpus() > 1) {
   12.66 -	printk(KERN_WARNING 
   12.67 -               "Can't suspend SMP guests without CONFIG_HOTPLUG_CPU\n");
   12.68 -	return -EOPNOTSUPP;
   12.69 -    }
   12.70 -#endif
   12.71 +	extern int gnttab_suspend(void);
   12.72 +	extern int gnttab_resume(void);
   12.73  
   12.74 -    preempt_disable();
   12.75 -#ifdef CONFIG_SMP
   12.76 -    /* Take all of the other cpus offline.  We need to be careful not
   12.77 -       to get preempted between the final test for num_online_cpus()
   12.78 -       == 1 and disabling interrupts, since otherwise userspace could
   12.79 -       bring another cpu online, and then we'd be stuffed.  At the
   12.80 -       same time, cpu_down can reschedule, so we need to enable
   12.81 -       preemption while doing that.  This kind of sucks, but should be
   12.82 -       correct. */
   12.83 -    /* (We don't need to worry about other cpus bringing stuff up,
   12.84 -       since by the time num_online_cpus() == 1, there aren't any
   12.85 -       other cpus) */
   12.86 -    cpus_clear(prev_online_cpus);
   12.87 -    while (num_online_cpus() > 1) {
   12.88 -	preempt_enable();
   12.89 -	for_each_online_cpu(i) {
   12.90 -	    if (i == 0)
   12.91 -		continue;
   12.92 -	    err = cpu_down(i);
   12.93 -	    if (err != 0) {
   12.94 -		printk(KERN_CRIT "Failed to take all CPUs down: %d.\n", err);
   12.95 -		goto out_reenable_cpus;
   12.96 -	    }
   12.97 -	    cpu_set(i, prev_online_cpus);
   12.98 -	}
   12.99 -	preempt_disable();
  12.100 -    }
  12.101 -#endif
  12.102 -
  12.103 -    __cli();
  12.104 -
  12.105 -    preempt_enable();
  12.106 +	extern void time_resume(void);
  12.107 +	extern unsigned long max_pfn;
  12.108 +	extern unsigned long *pfn_to_mfn_frame_list_list;
  12.109 +	extern unsigned long *pfn_to_mfn_frame_list[];
  12.110  
  12.111  #ifdef CONFIG_SMP
  12.112 -    cpus_clear(prev_present_cpus);
  12.113 -    for_each_present_cpu(i) {
  12.114 -	if (i == 0)
  12.115 -	    continue;
  12.116 -	save_vcpu_context(i, &suspended_cpu_records[i]);
  12.117 -	cpu_set(i, prev_present_cpus);
  12.118 -    }
  12.119 -#endif
  12.120 +	static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
  12.121 +	cpumask_t prev_online_cpus, prev_present_cpus;
  12.122  
  12.123 -#ifdef __i386__
  12.124 -    mm_pin_all();
  12.125 -    kmem_cache_shrink(pgd_cache);
  12.126 -#endif
  12.127 -
  12.128 -    time_suspend();
  12.129 -
  12.130 -#ifdef CONFIG_SMP
  12.131 -    smp_suspend();
  12.132 +	void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
  12.133 +	int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
  12.134  #endif
  12.135  
  12.136 -    xenbus_suspend();
  12.137 -
  12.138 -    xencons_suspend();
  12.139 -
  12.140 -    irq_suspend();
  12.141 -
  12.142 -    gnttab_suspend();
  12.143 -
  12.144 -    HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
  12.145 -    clear_fixmap(FIX_SHARED_INFO);
  12.146 +	extern void xencons_resume(void);
  12.147  
  12.148 -    xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
  12.149 -    xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
  12.150 -
  12.151 -    /* We'll stop somewhere inside this hypercall.  When it returns,
  12.152 -       we'll start resuming after the restore. */
  12.153 -    HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
  12.154 +	int err = 0;
  12.155  
  12.156 -    shutting_down = SHUTDOWN_INVALID; 
  12.157 -
  12.158 -    set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
  12.159 +	BUG_ON(smp_processor_id() != 0);
  12.160 +	BUG_ON(in_interrupt());
  12.161  
  12.162 -    HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
  12.163 +#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
  12.164 +	if (num_online_cpus() > 1) {
  12.165 +		printk(KERN_WARNING "Can't suspend SMP guests "
  12.166 +		       "without CONFIG_HOTPLUG_CPU\n");
  12.167 +		return -EOPNOTSUPP;
  12.168 +	}
  12.169 +#endif
  12.170  
  12.171 -    memset(empty_zero_page, 0, PAGE_SIZE);
  12.172 +	xenbus_suspend();
  12.173 +
  12.174 +	preempt_disable();
  12.175 +#ifdef CONFIG_SMP
  12.176 +	/* Take all of the other cpus offline.  We need to be careful not
  12.177 +	   to get preempted between the final test for num_online_cpus()
  12.178 +	   == 1 and disabling interrupts, since otherwise userspace could
  12.179 +	   bring another cpu online, and then we'd be stuffed.  At the
  12.180 +	   same time, cpu_down can reschedule, so we need to enable
  12.181 +	   preemption while doing that.  This kind of sucks, but should be
  12.182 +	   correct. */
  12.183 +	/* (We don't need to worry about other cpus bringing stuff up,
  12.184 +	   since by the time num_online_cpus() == 1, there aren't any
  12.185 +	   other cpus) */
  12.186 +	cpus_clear(prev_online_cpus);
  12.187 +	while (num_online_cpus() > 1) {
  12.188 +		preempt_enable();
  12.189 +		for_each_online_cpu(i) {
  12.190 +			if (i == 0)
  12.191 +				continue;
  12.192 +			err = cpu_down(i);
  12.193 +			if (err != 0) {
  12.194 +				printk(KERN_CRIT "Failed to take all CPUs "
  12.195 +				       "down: %d.\n", err);
  12.196 +				goto out_reenable_cpus;
  12.197 +			}
  12.198 +			cpu_set(i, prev_online_cpus);
  12.199 +		}
  12.200 +		preempt_disable();
  12.201 +	}
  12.202 +#endif
  12.203 +
  12.204 +	__cli();
  12.205 +
  12.206 +	preempt_enable();
  12.207 +
  12.208 +#ifdef CONFIG_SMP
  12.209 +	cpus_clear(prev_present_cpus);
  12.210 +	for_each_present_cpu(i) {
  12.211 +		if (i == 0)
  12.212 +			continue;
  12.213 +		save_vcpu_context(i, &suspended_cpu_records[i]);
  12.214 +		cpu_set(i, prev_present_cpus);
  12.215 +	}
  12.216 +#endif
  12.217 +
  12.218 +	gnttab_suspend();
  12.219 +
  12.220 +#ifdef __i386__
  12.221 +	mm_pin_all();
  12.222 +	kmem_cache_shrink(pgd_cache);
  12.223 +#endif
  12.224 +
  12.225 +	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
  12.226 +	clear_fixmap(FIX_SHARED_INFO);
  12.227 +
  12.228 +	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
  12.229 +	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
  12.230 +
  12.231 +	/* We'll stop somewhere inside this hypercall.  When it returns,
  12.232 +	   we'll start resuming after the restore. */
  12.233 +	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
  12.234 +
  12.235 +	shutting_down = SHUTDOWN_INVALID; 
  12.236 +
  12.237 +	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
  12.238 +
  12.239 +	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
  12.240 +
  12.241 +	memset(empty_zero_page, 0, PAGE_SIZE);
  12.242  	     
  12.243 -    HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
  12.244 +	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
  12.245  		virt_to_mfn(pfn_to_mfn_frame_list_list);
  12.246    
  12.247 -    fpp = PAGE_SIZE/sizeof(unsigned long);
  12.248 -    for ( i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++ )
  12.249 -    {
  12.250 -	if ( (j % fpp) == 0 )
  12.251 -	{
  12.252 -	    k++;
  12.253 -	    pfn_to_mfn_frame_list_list[k] = 
  12.254 -		    virt_to_mfn(pfn_to_mfn_frame_list[k]);
  12.255 -	    j=0;
  12.256 +	fpp = PAGE_SIZE/sizeof(unsigned long);
  12.257 +	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
  12.258 +		if ((j % fpp) == 0) {
  12.259 +			k++;
  12.260 +			pfn_to_mfn_frame_list_list[k] = 
  12.261 +				virt_to_mfn(pfn_to_mfn_frame_list[k]);
  12.262 +			j = 0;
  12.263 +		}
  12.264 +		pfn_to_mfn_frame_list[k][j] = 
  12.265 +			virt_to_mfn(&phys_to_machine_mapping[i]);
  12.266  	}
  12.267 -	pfn_to_mfn_frame_list[k][j] = 
  12.268 -		virt_to_mfn(&phys_to_machine_mapping[i]);
  12.269 -    }
  12.270 -    HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
  12.271 +	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
  12.272  
  12.273 -    gnttab_resume();
  12.274 +	gnttab_resume();
  12.275  
  12.276 -    irq_resume();
  12.277 +	irq_resume();
  12.278  
  12.279 -    xencons_resume();
  12.280 -
  12.281 -    xenbus_resume();
  12.282 +	time_resume();
  12.283  
  12.284  #ifdef CONFIG_SMP
  12.285 -    smp_resume();
  12.286 +	for_each_cpu_mask(i, prev_present_cpus)
  12.287 +		restore_vcpu_context(i, &suspended_cpu_records[i]);
  12.288  #endif
  12.289  
  12.290 -    time_resume();
  12.291 -
  12.292 -    usbif_resume();
  12.293 +	__sti();
  12.294  
  12.295 -#ifdef CONFIG_SMP
  12.296 -    for_each_cpu_mask(i, prev_present_cpus)
  12.297 -	restore_vcpu_context(i, &suspended_cpu_records[i]);
  12.298 -#endif
  12.299 +	xencons_resume();
  12.300  
  12.301 -    __sti();
  12.302 +	xenbus_resume();
  12.303  
  12.304  #ifdef CONFIG_SMP
  12.305   out_reenable_cpus:
  12.306 -    for_each_cpu_mask(i, prev_online_cpus) {
  12.307 -	j = cpu_up(i);
  12.308 -	if (j != 0) {
  12.309 -	    printk(KERN_CRIT "Failed to bring cpu %d back up (%d).\n",
  12.310 -		   i, j);
  12.311 -	    err = j;
  12.312 +	for_each_cpu_mask(i, prev_online_cpus) {
  12.313 +		j = cpu_up(i);
  12.314 +		if (j != 0) {
  12.315 +			printk(KERN_CRIT "Failed to bring cpu "
  12.316 +			       "%d back up (%d).\n",
  12.317 +			       i, j);
  12.318 +			err = j;
  12.319 +		}
  12.320  	}
  12.321 -    }
  12.322  #endif
  12.323  
  12.324 -    return err;
  12.325 +	return err;
  12.326  }
  12.327  
  12.328  static int shutdown_process(void *__unused)
  12.329  {
  12.330 -    static char *envp[] = { "HOME=/", "TERM=linux", 
  12.331 -                            "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
  12.332 -    static char *restart_argv[]  = { "/sbin/reboot", NULL };
  12.333 -    static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
  12.334 -
  12.335 -    extern asmlinkage long sys_reboot(int magic1, int magic2,
  12.336 -                                      unsigned int cmd, void *arg);
  12.337 -
  12.338 -    daemonize(
  12.339 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  12.340 -        "shutdown"
  12.341 -#endif
  12.342 -        );
  12.343 +	static char *envp[] = { "HOME=/", "TERM=linux", 
  12.344 +				"PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
  12.345 +	static char *restart_argv[]  = { "/sbin/reboot", NULL };
  12.346 +	static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
  12.347  
  12.348 -    switch ( shutting_down )
  12.349 -    {
  12.350 -    case SHUTDOWN_POWEROFF:
  12.351 -        if ( execve("/sbin/poweroff", poweroff_argv, envp) < 0 )
  12.352 -        {
  12.353 -            sys_reboot(LINUX_REBOOT_MAGIC1,
  12.354 -                       LINUX_REBOOT_MAGIC2,
  12.355 -                       LINUX_REBOOT_CMD_POWER_OFF,
  12.356 -                       NULL);
  12.357 -        }
  12.358 -        break;
  12.359 +	extern asmlinkage long sys_reboot(int magic1, int magic2,
  12.360 +					  unsigned int cmd, void *arg);
  12.361  
  12.362 -    case SHUTDOWN_REBOOT:
  12.363 -        if ( execve("/sbin/reboot", restart_argv, envp) < 0 )
  12.364 -        {
  12.365 -            sys_reboot(LINUX_REBOOT_MAGIC1,
  12.366 -                       LINUX_REBOOT_MAGIC2,
  12.367 -                       LINUX_REBOOT_CMD_RESTART,
  12.368 -                       NULL);
  12.369 -        }
  12.370 -        break;
  12.371 -    }
  12.372 +	daemonize("shutdown");
  12.373  
  12.374 -    shutting_down = SHUTDOWN_INVALID; /* could try again */
  12.375 +	switch (shutting_down) {
  12.376 +	case SHUTDOWN_POWEROFF:
  12.377 +		if (execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
  12.378 +			sys_reboot(LINUX_REBOOT_MAGIC1,
  12.379 +				   LINUX_REBOOT_MAGIC2,
  12.380 +				   LINUX_REBOOT_CMD_POWER_OFF,
  12.381 +				   NULL);
  12.382 +		}
  12.383 +		break;
  12.384  
  12.385 -    return 0;
  12.386 +	case SHUTDOWN_REBOOT:
  12.387 +		if (execve("/sbin/reboot", restart_argv, envp) < 0) {
  12.388 +			sys_reboot(LINUX_REBOOT_MAGIC1,
  12.389 +				   LINUX_REBOOT_MAGIC2,
  12.390 +				   LINUX_REBOOT_CMD_RESTART,
  12.391 +				   NULL);
  12.392 +		}
  12.393 +		break;
  12.394 +	}
  12.395 +
  12.396 +	shutting_down = SHUTDOWN_INVALID; /* could try again */
  12.397 +
  12.398 +	return 0;
  12.399  }
  12.400  
  12.401  static struct task_struct *kthread_create_on_cpu(int (*f)(void *arg),
  12.402 @@ -294,113 +258,109 @@ static struct task_struct *kthread_creat
  12.403  						 const char *name,
  12.404  						 int cpu)
  12.405  {
  12.406 -    struct task_struct *p;
  12.407 -    p = kthread_create(f, arg, name);
  12.408 -    kthread_bind(p, cpu);
  12.409 -    wake_up_process(p);
  12.410 -    return p;
  12.411 +	struct task_struct *p;
  12.412 +	p = kthread_create(f, arg, name);
  12.413 +	kthread_bind(p, cpu);
  12.414 +	wake_up_process(p);
  12.415 +	return p;
  12.416  }
  12.417  
  12.418  static void __shutdown_handler(void *unused)
  12.419  {
  12.420 -    int err;
  12.421 +	int err;
  12.422  
  12.423 -    if ( shutting_down != SHUTDOWN_SUSPEND )
  12.424 -    {
  12.425 -        err = kernel_thread(shutdown_process, NULL, CLONE_FS | CLONE_FILES);
  12.426 -        if ( err < 0 )
  12.427 -            printk(KERN_ALERT "Error creating shutdown process!\n");
  12.428 -    }
  12.429 -    else
  12.430 -    {
  12.431 -	kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
  12.432 -    }
  12.433 +	if (shutting_down != SHUTDOWN_SUSPEND) {
  12.434 +		err = kernel_thread(shutdown_process, NULL,
  12.435 +				    CLONE_FS | CLONE_FILES);
  12.436 +		if ( err < 0 )
  12.437 +			printk(KERN_ALERT "Error creating shutdown "
  12.438 +			       "process!\n");
  12.439 +	} else {
  12.440 +		kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
  12.441 +	}
  12.442  }
  12.443  
  12.444  static void shutdown_handler(struct xenbus_watch *watch, const char *node)
  12.445  {
  12.446 -    static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
  12.447 -    char *str;
  12.448 -    int err;
  12.449 +	static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
  12.450 +	char *str;
  12.451 +	int err;
  12.452  
  12.453   again:
  12.454 -    err = xenbus_transaction_start();
  12.455 -    if (err)
  12.456 -	return;
  12.457 -    str = (char *)xenbus_read("control", "shutdown", NULL);
  12.458 -    /* Ignore read errors and empty reads. */
  12.459 -    if (XENBUS_IS_ERR_READ(str)) {
  12.460 -	xenbus_transaction_end(1);
  12.461 -	return;
  12.462 -    }
  12.463 +	err = xenbus_transaction_start();
  12.464 +	if (err)
  12.465 +		return;
  12.466 +	str = (char *)xenbus_read("control", "shutdown", NULL);
  12.467 +	/* Ignore read errors and empty reads. */
  12.468 +	if (XENBUS_IS_ERR_READ(str)) {
  12.469 +		xenbus_transaction_end(1);
  12.470 +		return;
  12.471 +	}
  12.472  
  12.473 -    xenbus_write("control", "shutdown", "");
  12.474 +	xenbus_write("control", "shutdown", "");
  12.475  
  12.476 -    err = xenbus_transaction_end(0);
  12.477 -    if (err == -EAGAIN) {
  12.478 +	err = xenbus_transaction_end(0);
  12.479 +	if (err == -EAGAIN) {
  12.480 +		kfree(str);
  12.481 +		goto again;
  12.482 +	}
  12.483 +
  12.484 +	if (strcmp(str, "poweroff") == 0)
  12.485 +		shutting_down = SHUTDOWN_POWEROFF;
  12.486 +	else if (strcmp(str, "reboot") == 0)
  12.487 +		shutting_down = SHUTDOWN_REBOOT;
  12.488 +	else if (strcmp(str, "suspend") == 0)
  12.489 +		shutting_down = SHUTDOWN_SUSPEND;
  12.490 +	else {
  12.491 +		printk("Ignoring shutdown request: %s\n", str);
  12.492 +		shutting_down = SHUTDOWN_INVALID;
  12.493 +	}
  12.494 +
  12.495  	kfree(str);
  12.496 -	goto again;
  12.497 -    }
  12.498  
  12.499 -    if (strcmp(str, "poweroff") == 0)
  12.500 -        shutting_down = SHUTDOWN_POWEROFF;
  12.501 -    else if (strcmp(str, "reboot") == 0)
  12.502 -        shutting_down = SHUTDOWN_REBOOT;
  12.503 -    else if (strcmp(str, "suspend") == 0)
  12.504 -        shutting_down = SHUTDOWN_SUSPEND;
  12.505 -    else {
  12.506 -        printk("Ignoring shutdown request: %s\n", str);
  12.507 -        shutting_down = SHUTDOWN_INVALID;
  12.508 -    }
  12.509 -
  12.510 -    kfree(str);
  12.511 -
  12.512 -    if (shutting_down != SHUTDOWN_INVALID)
  12.513 -        schedule_work(&shutdown_work);
  12.514 +	if (shutting_down != SHUTDOWN_INVALID)
  12.515 +		schedule_work(&shutdown_work);
  12.516  }
  12.517  
  12.518  #ifdef CONFIG_MAGIC_SYSRQ
  12.519  static void sysrq_handler(struct xenbus_watch *watch, const char *node)
  12.520  {
  12.521 -    char sysrq_key = '\0';
  12.522 -    int err;
  12.523 +	char sysrq_key = '\0';
  12.524 +	int err;
  12.525  
  12.526   again:
  12.527 -    err = xenbus_transaction_start();
  12.528 -    if (err)
  12.529 -	return;
  12.530 -    if (!xenbus_scanf("control", "sysrq", "%c", &sysrq_key)) {
  12.531 -        printk(KERN_ERR "Unable to read sysrq code in control/sysrq\n");
  12.532 -	xenbus_transaction_end(1);
  12.533 -	return;
  12.534 -    }
  12.535 +	err = xenbus_transaction_start();
  12.536 +	if (err)
  12.537 +		return;
  12.538 +	if (!xenbus_scanf("control", "sysrq", "%c", &sysrq_key)) {
  12.539 +		printk(KERN_ERR "Unable to read sysrq code in "
  12.540 +		       "control/sysrq\n");
  12.541 +		xenbus_transaction_end(1);
  12.542 +		return;
  12.543 +	}
  12.544  
  12.545 -    if (sysrq_key != '\0')
  12.546 -	xenbus_printf("control", "sysrq", "%c", '\0');
  12.547 -
  12.548 -    err = xenbus_transaction_end(0);
  12.549 -    if (err == -EAGAIN)
  12.550 -	goto again;
  12.551 +	if (sysrq_key != '\0')
  12.552 +		xenbus_printf("control", "sysrq", "%c", '\0');
  12.553  
  12.554 -    if (sysrq_key != '\0') {
  12.555 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  12.556 -        handle_sysrq(sysrq_key, NULL, NULL);
  12.557 -#else
  12.558 -        handle_sysrq(sysrq_key, NULL, NULL, NULL);
  12.559 -#endif
  12.560 -    }
  12.561 +	err = xenbus_transaction_end(0);
  12.562 +	if (err == -EAGAIN)
  12.563 +		goto again;
  12.564 +
  12.565 +	if (sysrq_key != '\0') {
  12.566 +		handle_sysrq(sysrq_key, NULL, NULL);
  12.567 +	}
  12.568  }
  12.569  #endif
  12.570  
  12.571  static struct xenbus_watch shutdown_watch = {
  12.572 -    .node = "control/shutdown",
  12.573 -    .callback = shutdown_handler
  12.574 +	.node = "control/shutdown",
  12.575 +	.callback = shutdown_handler
  12.576  };
  12.577  
  12.578  #ifdef CONFIG_MAGIC_SYSRQ
  12.579  static struct xenbus_watch sysrq_watch = {
  12.580 -    .node ="control/sysrq",
  12.581 -    .callback = sysrq_handler
  12.582 +	.node ="control/sysrq",
  12.583 +	.callback = sysrq_handler
  12.584  };
  12.585  #endif
  12.586  
  12.587 @@ -413,39 +373,50 @@ static int setup_shutdown_watcher(struct
  12.588                                    unsigned long event,
  12.589                                    void *data)
  12.590  {
  12.591 -    int err1 = 0;
  12.592 +	int err1 = 0;
  12.593  #ifdef CONFIG_MAGIC_SYSRQ
  12.594 -    int err2 = 0;
  12.595 +	int err2 = 0;
  12.596  #endif
  12.597  
  12.598 -    BUG_ON(down_trylock(&xenbus_lock) == 0);
  12.599 +	BUG_ON(down_trylock(&xenbus_lock) == 0);
  12.600  
  12.601 -    err1 = register_xenbus_watch(&shutdown_watch);
  12.602 +	err1 = register_xenbus_watch(&shutdown_watch);
  12.603  #ifdef CONFIG_MAGIC_SYSRQ
  12.604 -    err2 = register_xenbus_watch(&sysrq_watch);
  12.605 +	err2 = register_xenbus_watch(&sysrq_watch);
  12.606  #endif
  12.607  
  12.608 -    if (err1) {
  12.609 -        printk(KERN_ERR "Failed to set shutdown watcher\n");
  12.610 -    }
  12.611 +	if (err1) {
  12.612 +		printk(KERN_ERR "Failed to set shutdown watcher\n");
  12.613 +	}
  12.614      
  12.615  #ifdef CONFIG_MAGIC_SYSRQ
  12.616 -    if (err2) {
  12.617 -        printk(KERN_ERR "Failed to set sysrq watcher\n");
  12.618 -    }
  12.619 +	if (err2) {
  12.620 +		printk(KERN_ERR "Failed to set sysrq watcher\n");
  12.621 +	}
  12.622  #endif
  12.623  
  12.624 -    return NOTIFY_DONE;
  12.625 +	return NOTIFY_DONE;
  12.626  }
  12.627  
  12.628  static int __init setup_shutdown_event(void)
  12.629  {
  12.630      
  12.631 -    xenstore_notifier.notifier_call = setup_shutdown_watcher;
  12.632 +	xenstore_notifier.notifier_call = setup_shutdown_watcher;
  12.633  
  12.634 -    register_xenstore_notifier(&xenstore_notifier);
  12.635 +	register_xenstore_notifier(&xenstore_notifier);
  12.636      
  12.637 -    return 0;
  12.638 +	return 0;
  12.639  }
  12.640  
  12.641  subsys_initcall(setup_shutdown_event);
  12.642 +
  12.643 +/*
  12.644 + * Local variables:
  12.645 + *  c-file-style: "linux"
  12.646 + *  indent-tabs-mode: t
  12.647 + *  c-indent-level: 8
  12.648 + *  c-basic-offset: 8
  12.649 + *  tab-width: 8
  12.650 + * End:
  12.651 + */
  12.652 +#
    13.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/smp.c	Thu Sep 29 16:22:02 2005 -0600
    13.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/smp.c	Thu Sep 29 17:28:44 2005 -0600
    13.3 @@ -11,6 +11,15 @@
    13.4  int setup_profiling_timer(unsigned int multiplier)
    13.5  {
    13.6  	printk("setup_profiling_timer\n");
    13.7 -
    13.8  	return 0;
    13.9  }
   13.10 +
   13.11 +/*
   13.12 + * Local variables:
   13.13 + *  c-file-style: "linux"
   13.14 + *  indent-tabs-mode: t
   13.15 + *  c-indent-level: 8
   13.16 + *  c-basic-offset: 8
   13.17 + *  tab-width: 8
   13.18 + * End:
   13.19 + */
    14.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c	Thu Sep 29 16:22:02 2005 -0600
    14.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c	Thu Sep 29 17:28:44 2005 -0600
    14.3 @@ -6,13 +6,23 @@ static struct proc_dir_entry *xen_base;
    14.4  
    14.5  struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
    14.6  {
    14.7 -    if ( xen_base == NULL )
    14.8 -        if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
    14.9 -            panic("Couldn't create /proc/xen");
   14.10 -    return create_proc_entry(name, mode, xen_base);
   14.11 +	if ( xen_base == NULL )
   14.12 +		if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
   14.13 +			panic("Couldn't create /proc/xen");
   14.14 +	return create_proc_entry(name, mode, xen_base);
   14.15  }
   14.16  
   14.17  void remove_xen_proc_entry(const char *name)
   14.18  {
   14.19 -    remove_proc_entry(name, xen_base);
   14.20 +	remove_proc_entry(name, xen_base);
   14.21  }
   14.22 +
   14.23 +/*
   14.24 + * Local variables:
   14.25 + *  c-file-style: "linux"
   14.26 + *  indent-tabs-mode: t
   14.27 + *  c-indent-level: 8
   14.28 + *  c-basic-offset: 8
   14.29 + *  tab-width: 8
   14.30 + * End:
   14.31 + */
    15.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/entry.S	Thu Sep 29 16:22:02 2005 -0600
    15.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/entry.S	Thu Sep 29 17:28:44 2005 -0600
    15.3 @@ -536,7 +536,7 @@ retint_kernel:
    15.4  	CFI_ENDPROC
    15.5  	.endm
    15.6  
    15.7 -#ifdef CONFIG_SMP	
    15.8 +#if 0
    15.9  ENTRY(reschedule_interrupt)
   15.10  	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
   15.11  
    16.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c	Thu Sep 29 16:22:02 2005 -0600
    16.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c	Thu Sep 29 17:28:44 2005 -0600
    16.3 @@ -31,14 +31,9 @@ DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IP
    16.4  
    16.5  static inline void __send_IPI_one(unsigned int cpu, int vector)
    16.6  {
    16.7 -	unsigned int evtchn;
    16.8 -	Dprintk("%s\n", __FUNCTION__);
    16.9 -
   16.10 -	evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
   16.11 -	if (evtchn)
   16.12 -		notify_via_evtchn(evtchn);
   16.13 -	else
   16.14 -		printk("send_IPI to unbound port %d/%d", cpu, vector);
   16.15 +	int evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
   16.16 +	BUG_ON(evtchn < 0);
   16.17 +	notify_remote_via_evtchn(evtchn);
   16.18  }
   16.19  
   16.20  void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
    17.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c	Thu Sep 29 16:22:02 2005 -0600
    17.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c	Thu Sep 29 17:28:44 2005 -0600
    17.3 @@ -771,11 +771,14 @@ void __init setup_arch(char **cmdline_p)
    17.4  				virt_to_mfn(&phys_to_machine_mapping[i]);
    17.5  		}
    17.6  		HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
    17.7 -		
    17.8 -		
    17.9 +	}
   17.10  
   17.11 -
   17.12 -
   17.13 +	if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
   17.14 +	{
   17.15 +		acpi_disabled = 1;
   17.16 +#ifdef  CONFIG_ACPI_BOOT
   17.17 +		acpi_ht = 0;
   17.18 +#endif
   17.19  	}
   17.20  #endif
   17.21  
    18.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c	Thu Sep 29 16:22:02 2005 -0600
    18.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c	Thu Sep 29 17:28:44 2005 -0600
    18.3 @@ -445,25 +445,6 @@ void __cpuinit smp_callin(void)
    18.4  }
    18.5  
    18.6  #ifdef CONFIG_XEN
    18.7 -static irqreturn_t ldebug_interrupt(
    18.8 -	int irq, void *dev_id, struct pt_regs *regs)
    18.9 -{
   18.10 -	return IRQ_HANDLED;
   18.11 -}
   18.12 -
   18.13 -static DEFINE_PER_CPU(int, ldebug_irq);
   18.14 -static char ldebug_name[NR_CPUS][15];
   18.15 -
   18.16 -void ldebug_setup(void)
   18.17 -{
   18.18 -	int cpu = smp_processor_id();
   18.19 -
   18.20 -	per_cpu(ldebug_irq, cpu) = bind_virq_to_irq(VIRQ_DEBUG);
   18.21 -	sprintf(ldebug_name[cpu], "ldebug%d", cpu);
   18.22 -	BUG_ON(request_irq(per_cpu(ldebug_irq, cpu), ldebug_interrupt,
   18.23 -	                   SA_INTERRUPT, ldebug_name[cpu], NULL));
   18.24 -}
   18.25 -
   18.26  extern void local_setup_timer(void);
   18.27  #endif
   18.28  
   18.29 @@ -498,7 +479,6 @@ void __cpuinit start_secondary(void)
   18.30  	enable_APIC_timer();
   18.31  #else
   18.32  	local_setup_timer();
   18.33 -	ldebug_setup();
   18.34  	smp_intr_init();
   18.35  	local_irq_enable();
   18.36  #endif
    19.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c	Thu Sep 29 16:22:02 2005 -0600
    19.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c	Thu Sep 29 17:28:44 2005 -0600
    19.3 @@ -149,7 +149,9 @@ void dump_pagetable(unsigned long addres
    19.4  	pmd_t *pmd;
    19.5  	pte_t *pte;
    19.6  
    19.7 +	preempt_disable();
    19.8  	pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
    19.9 +	preempt_enable();
   19.10  	pgd += pgd_index(address);
   19.11  
   19.12  	printk("PGD %lx ", pgd_val(*pgd));
   19.13 @@ -252,7 +254,9 @@ static int vmalloc_fault(unsigned long a
   19.14  
   19.15  	/* On Xen the line below does not always work. Needs investigating! */
   19.16  	/*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
   19.17 +	preempt_disable();
   19.18  	pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
   19.19 +	preempt_enable();
   19.20  	pgd += pgd_index(address);
   19.21  
   19.22  	pgd_ref = pgd_offset_k(address);
    20.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Thu Sep 29 16:22:02 2005 -0600
    20.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Thu Sep 29 17:28:44 2005 -0600
    20.3 @@ -481,7 +481,7 @@ static void make_response(blkif_t *blkif
    20.4  	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
    20.5  
    20.6  	/* Kick the relevant domain. */
    20.7 -	notify_via_evtchn(blkif->evtchn);
    20.8 +	notify_remote_via_irq(blkif->irq);
    20.9  }
   20.10  
   20.11  void blkif_deschedule(blkif_t *blkif)
    21.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/common.h	Thu Sep 29 16:22:02 2005 -0600
    21.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h	Thu Sep 29 17:28:44 2005 -0600
    21.3 @@ -44,7 +44,7 @@ typedef struct blkif_st {
    21.4  	unsigned int      handle;
    21.5  	/* Physical parameters of the comms window. */
    21.6  	unsigned int      evtchn;
    21.7 -	unsigned int      remote_evtchn;
    21.8 +	unsigned int      irq;
    21.9  	/* Comms information. */
   21.10  	blkif_back_ring_t blk_ring;
   21.11  	struct vm_struct *blk_ring_area;
    22.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Sep 29 16:22:02 2005 -0600
    22.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Sep 29 17:28:44 2005 -0600
    22.3 @@ -71,8 +71,6 @@ int blkif_map(blkif_t *blkif, unsigned l
    22.4  	evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
    22.5  	int err;
    22.6  
    22.7 -	BUG_ON(blkif->remote_evtchn);
    22.8 -
    22.9  	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
   22.10  		return -ENOMEM;
   22.11  
   22.12 @@ -94,13 +92,12 @@ int blkif_map(blkif_t *blkif, unsigned l
   22.13  	}
   22.14  
   22.15  	blkif->evtchn = op.u.bind_interdomain.port1;
   22.16 -	blkif->remote_evtchn = evtchn;
   22.17  
   22.18  	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
   22.19  	SHARED_RING_INIT(sring);
   22.20  	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
   22.21  
   22.22 -	bind_evtchn_to_irqhandler(
   22.23 +	blkif->irq = bind_evtchn_to_irqhandler(
   22.24  		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
   22.25  	blkif->status = CONNECTED;
   22.26  
   22.27 @@ -109,21 +106,13 @@ int blkif_map(blkif_t *blkif, unsigned l
   22.28  
   22.29  static void free_blkif(void *arg)
   22.30  {
   22.31 -	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   22.32  	blkif_t *blkif = (blkif_t *)arg;
   22.33  
   22.34 -	op.u.close.port = blkif->evtchn;
   22.35 -	op.u.close.dom = DOMID_SELF;
   22.36 -	HYPERVISOR_event_channel_op(&op);
   22.37 -	op.u.close.port = blkif->remote_evtchn;
   22.38 -	op.u.close.dom = blkif->domid;
   22.39 -	HYPERVISOR_event_channel_op(&op);
   22.40 +	if (blkif->irq)
   22.41 +		unbind_evtchn_from_irqhandler(blkif->irq, blkif);
   22.42  
   22.43  	vbd_free(&blkif->vbd);
   22.44  
   22.45 -	if (blkif->evtchn)
   22.46 -		unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
   22.47 -
   22.48  	if (blkif->blk_ring.sring) {
   22.49  		unmap_frontend_page(blkif);
   22.50  		free_vm_area(blkif->blk_ring_area);
    23.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Thu Sep 29 16:22:02 2005 -0600
    23.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Thu Sep 29 17:28:44 2005 -0600
    23.3 @@ -80,6 +80,15 @@ static void frontend_changed(struct xenb
    23.4  		return;
    23.5  	}
    23.6  
    23.7 +	/* Map the shared frame, irq etc. */
    23.8 +	err = blkif_map(be->blkif, ring_ref, evtchn);
    23.9 +	if (err) {
   23.10 +		xenbus_dev_error(be->dev, err, "mapping ring-ref %lu port %u",
   23.11 +				 ring_ref, evtchn);
   23.12 +		return;
   23.13 +	}
   23.14 +	/* XXX From here on should 'blkif_unmap' on error. */
   23.15 +
   23.16  again:
   23.17  	/* Supply the information about the device the frontend needs */
   23.18  	err = xenbus_transaction_start();
   23.19 @@ -112,14 +121,6 @@ again:
   23.20  		goto abort;
   23.21  	}
   23.22  
   23.23 -	/* Map the shared frame, irq etc. */
   23.24 -	err = blkif_map(be->blkif, ring_ref, evtchn);
   23.25 -	if (err) {
   23.26 -		xenbus_dev_error(be->dev, err, "mapping ring-ref %lu port %u",
   23.27 -				 ring_ref, evtchn);
   23.28 -		goto abort;
   23.29 -	}
   23.30 -
   23.31  	err = xenbus_transaction_end(0);
   23.32  	if (err == -EAGAIN)
   23.33  		goto again;
    24.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Thu Sep 29 16:22:02 2005 -0600
    24.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Thu Sep 29 17:28:44 2005 -0600
    24.3 @@ -57,10 +57,7 @@ static unsigned int blkif_state = BLKIF_
    24.4  
    24.5  #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
    24.6      (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE)
    24.7 -#define GRANTREF_INVALID (1<<15)
    24.8 -#define GRANT_INVALID_REF	(0xFFFF)
    24.9 -
   24.10 -static int recovery = 0; /* Recovery in progress: protected by blkif_io_lock */
   24.11 +#define GRANT_INVALID_REF	0
   24.12  
   24.13  static void kick_pending_request_queues(struct blkfront_info *info);
   24.14  
   24.15 @@ -84,22 +81,10 @@ static inline void ADD_ID_TO_FREELIST(
   24.16  	info->shadow_free = id;
   24.17  }
   24.18  
   24.19 -static inline void pickle_request(struct blk_shadow *s, blkif_request_t *r)
   24.20 -{
   24.21 -
   24.22 -	s->req = *r;
   24.23 -}
   24.24 -
   24.25 -static inline void unpickle_request(blkif_request_t *r, struct blk_shadow *s)
   24.26 -{
   24.27 -
   24.28 -	*r = s->req;
   24.29 -}
   24.30 -
   24.31  static inline void flush_requests(struct blkfront_info *info)
   24.32  {
   24.33  	RING_PUSH_REQUESTS(&info->ring);
   24.34 -	notify_via_evtchn(info->evtchn);
   24.35 +	notify_remote_via_irq(info->irq);
   24.36  }
   24.37  
   24.38  static void kick_pending_request_queues(struct blkfront_info *info)
   24.39 @@ -235,7 +220,7 @@ static int blkif_queue_request(struct re
   24.40  				rq_data_dir(req) );
   24.41  
   24.42  			info->shadow[id].frame[ring_req->nr_segments] =
   24.43 -				buffer_mfn;
   24.44 +				mfn_to_pfn(buffer_mfn);
   24.45  
   24.46  			ring_req->frame_and_sects[ring_req->nr_segments] =
   24.47  				blkif_fas_from_gref(ref, fsect, lsect);
   24.48 @@ -247,7 +232,7 @@ static int blkif_queue_request(struct re
   24.49  	info->ring.req_prod_pvt++;
   24.50  
   24.51  	/* Keep a private copy so we can reissue requests when recovering. */
   24.52 -	pickle_request(&info->shadow[id], ring_req);
   24.53 +	info->shadow[id].req = *ring_req;
   24.54  
   24.55  	gnttab_free_grant_references(gref_head);
   24.56  
   24.57 @@ -312,7 +297,7 @@ static irqreturn_t blkif_int(int irq, vo
   24.58  
   24.59  	spin_lock_irqsave(&blkif_io_lock, flags);
   24.60  
   24.61 -	if (unlikely(info->connected != BLKIF_STATE_CONNECTED || recovery)) {
   24.62 +	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
   24.63  		spin_unlock_irqrestore(&blkif_io_lock, flags);
   24.64  		return IRQ_HANDLED;
   24.65  	}
   24.66 @@ -372,8 +357,9 @@ static void blkif_free(struct blkfront_i
   24.67  	if (info->ring_ref != GRANT_INVALID_REF)
   24.68  		gnttab_end_foreign_access(info->ring_ref, 0);
   24.69  	info->ring_ref = GRANT_INVALID_REF;
   24.70 -	unbind_evtchn_from_irqhandler(info->evtchn, info); 
   24.71 -	info->evtchn = 0;
   24.72 +	if (info->irq)
   24.73 +		unbind_evtchn_from_irqhandler(info->irq, info); 
   24.74 +	info->evtchn = info->irq = 0;
   24.75  }
   24.76  
   24.77  static void blkif_recover(struct blkfront_info *info)
   24.78 @@ -401,28 +387,24 @@ static void blkif_recover(struct blkfron
   24.79  		if (copy[i].request == 0)
   24.80  			continue;
   24.81  
   24.82 -		/* Grab a request slot and unpickle shadow state into it. */
   24.83 +		/* Grab a request slot and copy shadow state into it. */
   24.84  		req = RING_GET_REQUEST(
   24.85  			&info->ring, info->ring.req_prod_pvt);
   24.86 -		unpickle_request(req, &copy[i]);
   24.87 +		*req = copy[i].req;
   24.88  
   24.89  		/* We get a new request id, and must reset the shadow state. */
   24.90  		req->id = GET_ID_FROM_FREELIST(info);
   24.91  		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
   24.92  
   24.93  		/* Rewrite any grant references invalidated by susp/resume. */
   24.94 -		for (j = 0; j < req->nr_segments; j++) {
   24.95 -			if ( req->frame_and_sects[j] & GRANTREF_INVALID )
   24.96 -				gnttab_grant_foreign_access_ref(
   24.97 -					blkif_gref_from_fas(
   24.98 -						req->frame_and_sects[j]),
   24.99 -					info->backend_id,
  24.100 -					info->shadow[req->id].frame[j],
  24.101 -					rq_data_dir(
  24.102 -						(struct request *)
  24.103 -						info->shadow[req->id].request));
  24.104 -			req->frame_and_sects[j] &= ~GRANTREF_INVALID;
  24.105 -		}
  24.106 +		for (j = 0; j < req->nr_segments; j++)
  24.107 +			gnttab_grant_foreign_access_ref(
  24.108 +				blkif_gref_from_fas(req->frame_and_sects[j]),
  24.109 +				info->backend_id,
  24.110 +				pfn_to_mfn(info->shadow[req->id].frame[j]),
  24.111 +				rq_data_dir(
  24.112 +					(struct request *)
  24.113 +					info->shadow[req->id].request));
  24.114  		info->shadow[req->id].req = *req;
  24.115  
  24.116  		info->ring.req_prod_pvt++;
  24.117 @@ -430,15 +412,13 @@ static void blkif_recover(struct blkfron
  24.118  
  24.119  	kfree(copy);
  24.120  
  24.121 -	recovery = 0;
  24.122 -
  24.123  	/* info->ring->req_prod will be set when we flush_requests().*/
  24.124  	wmb();
  24.125  
  24.126  	/* Kicks things back into life. */
  24.127  	flush_requests(info);
  24.128  
  24.129 -	/* Now safe to left other people use the interface. */
  24.130 +	/* Now safe to let other people use the interface. */
  24.131  	info->connected = BLKIF_STATE_CONNECTED;
  24.132  }
  24.133  
  24.134 @@ -450,10 +430,12 @@ static void blkif_connect(struct blkfron
  24.135  
  24.136  	err = bind_evtchn_to_irqhandler(
  24.137  		info->evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
  24.138 -	if (err != 0) {
  24.139 +	if (err <= 0) {
  24.140  		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
  24.141  		return;
  24.142  	}
  24.143 +
  24.144 +	info->irq = err;
  24.145  }
  24.146  
  24.147  
  24.148 @@ -487,8 +469,8 @@ static void watch_for_status(struct xenb
  24.149  		return;
  24.150  	}
  24.151  
  24.152 +	info->connected = BLKIF_STATE_CONNECTED;
  24.153  	xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
  24.154 -	info->connected = BLKIF_STATE_CONNECTED;
  24.155  
  24.156  	blkif_state = BLKIF_STATE_CONNECTED;
  24.157  
  24.158 @@ -591,17 +573,6 @@ again:
  24.159  		goto abort_transaction;
  24.160  	}
  24.161  
  24.162 -	info->backend = backend;
  24.163 -	backend = NULL;
  24.164 -
  24.165 -	info->watch.node = info->backend;
  24.166 -	info->watch.callback = watch_for_status;
  24.167 -	err = register_xenbus_watch(&info->watch);
  24.168 -	if (err) {
  24.169 -		message = "registering watch on backend";
  24.170 -		goto abort_transaction;
  24.171 -	}
  24.172 -
  24.173  	err = xenbus_transaction_end(0);
  24.174  	if (err) {
  24.175  		if (err == -EAGAIN)
  24.176 @@ -610,10 +581,17 @@ again:
  24.177  		goto destroy_blkring;
  24.178  	}
  24.179  
  24.180 - out:
  24.181 -	if (backend)
  24.182 -		kfree(backend);
  24.183 -	return err;
  24.184 +	info->watch.node = backend;
  24.185 +	info->watch.callback = watch_for_status;
  24.186 +	err = register_xenbus_watch(&info->watch);
  24.187 +	if (err) {
  24.188 +		message = "registering watch on backend";
  24.189 +		goto destroy_blkring;
  24.190 +	}
  24.191 +
  24.192 +	info->backend = backend;
  24.193 +
  24.194 +	return 0;
  24.195  
  24.196   abort_transaction:
  24.197  	xenbus_transaction_end(1);
  24.198 @@ -621,7 +599,10 @@ again:
  24.199  	xenbus_dev_error(dev, err, "%s", message);
  24.200   destroy_blkring:
  24.201  	blkif_free(info);
  24.202 -	goto out;
  24.203 + out:
  24.204 +	if (backend)
  24.205 +		kfree(backend);
  24.206 +	return err;
  24.207  }
  24.208  
  24.209  /* Setup supplies the backend dir, virtual device.
  24.210 @@ -702,9 +683,6 @@ static int blkfront_suspend(struct xenbu
  24.211  	kfree(info->backend);
  24.212  	info->backend = NULL;
  24.213  
  24.214 -	recovery = 1;
  24.215 -	blkif_free(info);
  24.216 -
  24.217  	return 0;
  24.218  }
  24.219  
  24.220 @@ -713,11 +691,12 @@ static int blkfront_resume(struct xenbus
  24.221  	struct blkfront_info *info = dev->data;
  24.222  	int err;
  24.223  
  24.224 -	/* FIXME: Check geometry hasn't changed here... */
  24.225 +	blkif_free(info);
  24.226 +
  24.227  	err = talk_to_backend(dev, info);
  24.228 -	if (!err) {
  24.229 +	if (!err)
  24.230  		blkif_recover(info);
  24.231 -	}
  24.232 +
  24.233  	return err;
  24.234  }
  24.235  
    25.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Thu Sep 29 16:22:02 2005 -0600
    25.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Thu Sep 29 17:28:44 2005 -0600
    25.3 @@ -123,7 +123,7 @@ struct blkfront_info
    25.4  	int backend_id;
    25.5  	int ring_ref;
    25.6  	blkif_front_ring_t ring;
    25.7 -	unsigned int evtchn;
    25.8 +	unsigned int evtchn, irq;
    25.9  	struct xlbd_major_info *mi;
   25.10  	request_queue_t *rq;
   25.11  	struct work_struct work;
    26.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Thu Sep 29 16:22:02 2005 -0600
    26.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Thu Sep 29 17:28:44 2005 -0600
    26.3 @@ -842,7 +842,7 @@ static void make_response(blkif_t *blkif
    26.4  	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
    26.5  
    26.6  	/* Kick the relevant domain. */
    26.7 -	notify_via_evtchn(blkif->evtchn);
    26.8 +	notify_remote_via_irq(blkif->irq);
    26.9  }
   26.10  
   26.11  static struct miscdevice blktap_miscdev = {
    27.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/common.h	Thu Sep 29 16:22:02 2005 -0600
    27.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/common.h	Thu Sep 29 17:28:44 2005 -0600
    27.3 @@ -46,7 +46,7 @@ typedef struct blkif_st {
    27.4  	unsigned int      handle;
    27.5  	/* Physical parameters of the comms window. */
    27.6  	unsigned int      evtchn;
    27.7 -	unsigned int      remote_evtchn;
    27.8 +	unsigned int      irq;
    27.9  	/* Comms information. */
   27.10  	blkif_back_ring_t blk_ring;
   27.11  	struct vm_struct *blk_ring_area;
    28.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c	Thu Sep 29 16:22:02 2005 -0600
    28.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c	Thu Sep 29 17:28:44 2005 -0600
    28.3 @@ -71,8 +71,6 @@ int blkif_map(blkif_t *blkif, unsigned l
    28.4  	evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
    28.5  	int err;
    28.6  
    28.7 -	BUG_ON(blkif->remote_evtchn);
    28.8 -
    28.9  	if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
   28.10  		return -ENOMEM;
   28.11  
   28.12 @@ -93,35 +91,26 @@ int blkif_map(blkif_t *blkif, unsigned l
   28.13  		return err;
   28.14  	}
   28.15  
   28.16 -
   28.17  	blkif->evtchn = op.u.bind_interdomain.port1;
   28.18 -	blkif->remote_evtchn = evtchn;
   28.19  
   28.20  	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
   28.21  	SHARED_RING_INIT(sring);
   28.22  	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
   28.23  
   28.24 -	bind_evtchn_to_irqhandler(
   28.25 +	blkif->irq = bind_evtchn_to_irqhandler(
   28.26  		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
   28.27 -	blkif->status        = CONNECTED;
   28.28 +
   28.29 +	blkif->status = CONNECTED;
   28.30  
   28.31  	return 0;
   28.32  }
   28.33  
   28.34  static void free_blkif(void *arg)
   28.35  {
   28.36 -	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   28.37  	blkif_t *blkif = (blkif_t *)arg;
   28.38  
   28.39 -	op.u.close.port = blkif->evtchn;
   28.40 -	op.u.close.dom = DOMID_SELF;
   28.41 -	HYPERVISOR_event_channel_op(&op);
   28.42 -	op.u.close.port = blkif->remote_evtchn;
   28.43 -	op.u.close.dom = blkif->domid;
   28.44 -	HYPERVISOR_event_channel_op(&op);
   28.45 -
   28.46 -	if (blkif->evtchn)
   28.47 -		unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
   28.48 +	if (blkif->irq)
   28.49 +		unbind_evtchn_from_irqhandler(blkif->irq, blkif);
   28.50  
   28.51  	if (blkif->blk_ring.sring) {
   28.52  		unmap_frontend_page(blkif);
    29.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Thu Sep 29 16:22:02 2005 -0600
    29.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Thu Sep 29 17:28:44 2005 -0600
    29.3 @@ -21,7 +21,6 @@
    29.4  #include <linux/err.h>
    29.5  #include "xencons_ring.h"
    29.6  
    29.7 -
    29.8  struct ring_head
    29.9  {
   29.10  	u32 cons;
   29.11 @@ -29,6 +28,7 @@ struct ring_head
   29.12  	char buf[0];
   29.13  } __attribute__((packed));
   29.14  
   29.15 +static int xencons_irq;
   29.16  
   29.17  #define XENCONS_RING_SIZE (PAGE_SIZE/2 - sizeof (struct ring_head))
   29.18  #define XENCONS_IDX(cnt) ((cnt) % XENCONS_RING_SIZE)
   29.19 @@ -46,7 +46,8 @@ static inline struct ring_head *inring(v
   29.20  
   29.21  
   29.22  /* don't block -  write as much as possible and return */
   29.23 -static int __xencons_ring_send(struct ring_head *ring, const char *data, unsigned len)
   29.24 +static int __xencons_ring_send(
   29.25 +	struct ring_head *ring, const char *data, unsigned len)
   29.26  {
   29.27  	int copied = 0;
   29.28  
   29.29 @@ -63,13 +64,9 @@ static int __xencons_ring_send(struct ri
   29.30  
   29.31  int xencons_ring_send(const char *data, unsigned len)
   29.32  {
   29.33 -	struct ring_head *out = outring();
   29.34 -	int sent = 0;
   29.35 -	
   29.36 -	sent = __xencons_ring_send(out, data, len);
   29.37 -	notify_via_evtchn(xen_start_info->console_evtchn);
   29.38 +	int sent = __xencons_ring_send(outring(), data, len);
   29.39 +	notify_remote_via_irq(xencons_irq);
   29.40  	return sent;
   29.41 -
   29.42  }	
   29.43  
   29.44  
   29.45 @@ -97,32 +94,28 @@ int xencons_ring_init(void)
   29.46  {
   29.47  	int err;
   29.48  
   29.49 +	if (xencons_irq)
   29.50 +		unbind_evtchn_from_irqhandler(xencons_irq, inring());
   29.51 +	xencons_irq = 0;
   29.52 +
   29.53  	if (!xen_start_info->console_evtchn)
   29.54  		return 0;
   29.55  
   29.56 -	err = bind_evtchn_to_irqhandler(xen_start_info->console_evtchn,
   29.57 -					handle_input, 0, "xencons", inring());
   29.58 -	if (err) {
   29.59 +	err = bind_evtchn_to_irqhandler(
   29.60 +		xen_start_info->console_evtchn,
   29.61 +		handle_input, 0, "xencons", inring());
   29.62 +	if (err <= 0) {
   29.63  		xprintk("XEN console request irq failed %i\n", err);
   29.64  		return err;
   29.65  	}
   29.66  
   29.67 +	xencons_irq = err;
   29.68 +
   29.69  	return 0;
   29.70  }
   29.71  
   29.72 -void xencons_suspend(void)
   29.73 -{
   29.74 -
   29.75 -	if (!xen_start_info->console_evtchn)
   29.76 -		return;
   29.77 -
   29.78 -	unbind_evtchn_from_irqhandler(xen_start_info->console_evtchn,
   29.79 -				      inring());
   29.80 -}
   29.81 -
   29.82  void xencons_resume(void)
   29.83  {
   29.84 -
   29.85  	(void)xencons_ring_init();
   29.86  }
   29.87  
    30.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Thu Sep 29 16:22:02 2005 -0600
    30.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Thu Sep 29 17:28:44 2005 -0600
    30.3 @@ -22,8 +22,6 @@
    30.4  #include <asm-xen/gnttab.h>
    30.5  #include <asm-xen/driver_util.h>
    30.6  
    30.7 -#define GRANT_INVALID_REF (0xFFFF)
    30.8 -
    30.9  #if 0
   30.10  #define ASSERT(_p) \
   30.11      if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
   30.12 @@ -52,7 +50,7 @@ typedef struct netif_st {
   30.13  	u16              rx_shmem_handle;
   30.14  	grant_ref_t      rx_shmem_ref; 
   30.15  	unsigned int     evtchn;
   30.16 -	unsigned int     remote_evtchn;
   30.17 +	unsigned int     irq;
   30.18  
   30.19  	/* The shared rings and indexes. */
   30.20  	netif_tx_interface_t *tx;
    31.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Thu Sep 29 16:22:02 2005 -0600
    31.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Thu Sep 29 17:28:44 2005 -0600
    31.3 @@ -15,18 +15,17 @@ static void __netif_up(netif_t *netif)
    31.4  	spin_lock_bh(&dev->xmit_lock);
    31.5  	netif->active = 1;
    31.6  	spin_unlock_bh(&dev->xmit_lock);
    31.7 -	(void)bind_evtchn_to_irqhandler(
    31.8 -		netif->evtchn, netif_be_int, 0, dev->name, netif);
    31.9 +	enable_irq(netif->irq);
   31.10  	netif_schedule_work(netif);
   31.11  }
   31.12  
   31.13  static void __netif_down(netif_t *netif)
   31.14  {
   31.15  	struct net_device *dev = netif->dev;
   31.16 +	disable_irq(netif->irq);
   31.17  	spin_lock_bh(&dev->xmit_lock);
   31.18  	netif->active = 0;
   31.19  	spin_unlock_bh(&dev->xmit_lock);
   31.20 -	unbind_evtchn_from_irqhandler(netif->evtchn, netif);
   31.21  	netif_deschedule_work(netif);
   31.22  }
   31.23  
   31.24 @@ -203,7 +202,10 @@ int netif_map(netif_t *netif, unsigned l
   31.25  	}
   31.26  
   31.27  	netif->evtchn = op.u.bind_interdomain.port1;
   31.28 -	netif->remote_evtchn = evtchn;
   31.29 +
   31.30 +	netif->irq = bind_evtchn_to_irqhandler(
   31.31 +		netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
   31.32 +	disable_irq(netif->irq);
   31.33  
   31.34  	netif->tx = (netif_tx_interface_t *)netif->comms_area->addr;
   31.35  	netif->rx = (netif_rx_interface_t *)
   31.36 @@ -224,21 +226,15 @@ int netif_map(netif_t *netif, unsigned l
   31.37  
   31.38  static void free_netif_callback(void *arg)
   31.39  {
   31.40 -	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   31.41  	netif_t *netif = (netif_t *)arg;
   31.42  
   31.43  	/*
   31.44 -	 * These can't be done in netif_disconnect() because at that point
   31.45 +	 * This can't be done in netif_disconnect() because at that point
   31.46  	 * there may be outstanding requests in the network stack whose
   31.47  	 * asynchronous responses must still be notified to the remote driver.
   31.48  	 */
   31.49 -
   31.50 -	op.u.close.port = netif->evtchn;
   31.51 -	op.u.close.dom = DOMID_SELF;
   31.52 -	HYPERVISOR_event_channel_op(&op);
   31.53 -	op.u.close.port = netif->remote_evtchn;
   31.54 -	op.u.close.dom = netif->domid;
   31.55 -	HYPERVISOR_event_channel_op(&op);
   31.56 +	if (netif->irq)
   31.57 +		unbind_evtchn_from_irqhandler(netif->irq, netif);
   31.58  
   31.59  	unregister_netdev(netif->dev);
   31.60  
    32.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Sep 29 16:22:02 2005 -0600
    32.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Sep 29 17:28:44 2005 -0600
    32.3 @@ -42,7 +42,7 @@ static multicall_entry_t rx_mcl[NETIF_RX
    32.4  static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
    32.5  
    32.6  static gnttab_transfer_t grant_rx_op[MAX_PENDING_REQS];
    32.7 -static unsigned char rx_notify[NR_EVENT_CHANNELS];
    32.8 +static unsigned char rx_notify[NR_IRQS];
    32.9  
   32.10  /* Don't currently gate addition of an interface to the tx scheduling list. */
   32.11  #define tx_work_exists(_if) (1)
   32.12 @@ -209,7 +209,7 @@ static void net_rx_action(unsigned long 
   32.13  {
   32.14  	netif_t *netif = NULL; 
   32.15  	s8 status;
   32.16 -	u16 size, id, evtchn;
   32.17 +	u16 size, id, irq;
   32.18  	multicall_entry_t *mcl;
   32.19  	mmu_update_t *mmu;
   32.20  	gnttab_transfer_t *gop;
   32.21 @@ -320,16 +320,16 @@ static void net_rx_action(unsigned long 
   32.22  				gop->status, netif->domid);
   32.23  			/* XXX SMH: should free 'old_mfn' here */
   32.24  			status = NETIF_RSP_ERROR; 
   32.25 -		} 
   32.26 -		evtchn = netif->evtchn;
   32.27 +		}
   32.28 +		irq = netif->irq;
   32.29  		id = netif->rx->ring[
   32.30  			MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
   32.31  		if (make_rx_response(netif, id, status,
   32.32  				     (unsigned long)skb->data & ~PAGE_MASK,
   32.33  				     size, skb->proto_csum_valid) &&
   32.34 -		    (rx_notify[evtchn] == 0)) {
   32.35 -			rx_notify[evtchn] = 1;
   32.36 -			notify_list[notify_nr++] = evtchn;
   32.37 +		    (rx_notify[irq] == 0)) {
   32.38 +			rx_notify[irq] = 1;
   32.39 +			notify_list[notify_nr++] = irq;
   32.40  		}
   32.41  
   32.42  		netif_put(netif);
   32.43 @@ -339,9 +339,9 @@ static void net_rx_action(unsigned long 
   32.44  	}
   32.45  
   32.46  	while (notify_nr != 0) {
   32.47 -		evtchn = notify_list[--notify_nr];
   32.48 -		rx_notify[evtchn] = 0;
   32.49 -		notify_via_evtchn(evtchn);
   32.50 +		irq = notify_list[--notify_nr];
   32.51 +		rx_notify[irq] = 0;
   32.52 +		notify_remote_via_irq(irq);
   32.53  	}
   32.54  
   32.55  	/* More work to do? */
   32.56 @@ -434,7 +434,6 @@ inline static void net_tx_action_dealloc
   32.57  		gop->host_addr    = MMAP_VADDR(pending_idx);
   32.58  		gop->dev_bus_addr = 0;
   32.59  		gop->handle       = grant_tx_ref[pending_idx];
   32.60 -		grant_tx_ref[pending_idx] = GRANT_INVALID_REF;
   32.61  		gop++;
   32.62  	}
   32.63  	BUG_ON(HYPERVISOR_grant_table_op(
   32.64 @@ -718,7 +717,7 @@ static void make_tx_response(netif_t *ne
   32.65  
   32.66  	mb(); /* Update producer before checking event threshold. */
   32.67  	if (i == netif->tx->event)
   32.68 -		notify_via_evtchn(netif->evtchn);
   32.69 +		notify_remote_via_irq(netif->irq);
   32.70  }
   32.71  
   32.72  static int make_rx_response(netif_t *netif, 
    33.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Sep 29 16:22:02 2005 -0600
    33.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Sep 29 17:28:44 2005 -0600
    33.3 @@ -1,7 +1,7 @@
    33.4  /******************************************************************************
    33.5   * Virtual network driver for conversing with remote driver backends.
    33.6   * 
    33.7 - * Copyright (c) 2002-2004, K A Fraser
    33.8 + * Copyright (c) 2002-2005, K A Fraser
    33.9   * 
   33.10   * This file may be distributed separately from the Linux kernel, or
   33.11   * incorporated into other software packages, subject to the following license:
   33.12 @@ -57,7 +57,7 @@
   33.13  #include <asm-xen/xen-public/grant_table.h>
   33.14  #include <asm-xen/gnttab.h>
   33.15  
   33.16 -#define GRANT_INVALID_REF	(0xFFFF)
   33.17 +#define GRANT_INVALID_REF	0
   33.18  
   33.19  #ifndef __GFP_NOWARN
   33.20  #define __GFP_NOWARN 0
   33.21 @@ -127,7 +127,7 @@ struct net_private
   33.22  	spinlock_t   rx_lock;
   33.23  
   33.24  	unsigned int handle;
   33.25 -	unsigned int evtchn;
   33.26 +	unsigned int evtchn, irq;
   33.27  
   33.28  	/* What is the status of our connection to the remote backend? */
   33.29  #define BEST_CLOSED       0
   33.30 @@ -457,7 +457,7 @@ static int network_start_xmit(struct sk_
   33.31  	/* Only notify Xen if we really have to. */
   33.32  	mb();
   33.33  	if (np->tx->TX_TEST_IDX == i)
   33.34 -		notify_via_evtchn(np->evtchn);
   33.35 +		notify_remote_via_irq(np->irq);
   33.36  
   33.37  	return 0;
   33.38  
   33.39 @@ -700,6 +700,7 @@ static void network_connect(struct net_d
   33.40  	struct net_private *np;
   33.41  	int i, requeue_idx;
   33.42  	netif_tx_request_t *tx;
   33.43 +	struct sk_buff *skb;
   33.44  
   33.45  	np = netdev_priv(dev);
   33.46  	spin_lock_irq(&np->tx_lock);
   33.47 @@ -711,7 +712,8 @@ static void network_connect(struct net_d
   33.48  	np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
   33.49  	np->rx->event = np->tx->event = 1;
   33.50  
   33.51 -	/* Step 2: Rebuild the RX and TX ring contents.
   33.52 +	/*
   33.53 +	 * Step 2: Rebuild the RX and TX ring contents.
   33.54  	 * NB. We could just free the queued TX packets now but we hope
   33.55  	 * that sending them out might do some good.  We have to rebuild
   33.56  	 * the RX ring because some of our pages are currently flipped out
   33.57 @@ -722,56 +724,59 @@ static void network_connect(struct net_d
   33.58  	 * them.
   33.59  	 */
   33.60  
   33.61 -	/* Rebuild the TX buffer freelist and the TX ring itself.
   33.62 +	/*
   33.63 +	 * Rebuild the TX buffer freelist and the TX ring itself.
   33.64  	 * NB. This reorders packets.  We could keep more private state
   33.65  	 * to avoid this but maybe it doesn't matter so much given the
   33.66  	 * interface has been down.
   33.67  	 */
   33.68  	for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
   33.69 -		if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
   33.70 -			struct sk_buff *skb = np->tx_skbs[i];
   33.71 -
   33.72 -			tx = &np->tx->ring[requeue_idx++].req;
   33.73 +		if ((unsigned long)np->tx_skbs[i] < __PAGE_OFFSET)
   33.74 +			continue;
   33.75  
   33.76 -			tx->id   = i;
   33.77 -			gnttab_grant_foreign_access_ref(
   33.78 -				np->grant_tx_ref[i], np->backend_id, 
   33.79 -				virt_to_mfn(np->tx_skbs[i]->data),
   33.80 -				GNTMAP_readonly); 
   33.81 -			tx->gref = np->grant_tx_ref[i];
   33.82 -			tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
   33.83 -			tx->size = skb->len;
   33.84 +		skb = np->tx_skbs[i];
   33.85  
   33.86 -			np->stats.tx_bytes += skb->len;
   33.87 -			np->stats.tx_packets++;
   33.88 -		}
   33.89 +		tx = &np->tx->ring[requeue_idx++].req;
   33.90 +
   33.91 +		tx->id = i;
   33.92 +		gnttab_grant_foreign_access_ref(
   33.93 +			np->grant_tx_ref[i], np->backend_id, 
   33.94 +			virt_to_mfn(np->tx_skbs[i]->data),
   33.95 +			GNTMAP_readonly); 
   33.96 +		tx->gref = np->grant_tx_ref[i];
   33.97 +		tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
   33.98 +		tx->size = skb->len;
   33.99 +		tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
  33.100 +
  33.101 +		np->stats.tx_bytes += skb->len;
  33.102 +		np->stats.tx_packets++;
  33.103  	}
  33.104  	wmb();
  33.105  	np->tx->req_prod = requeue_idx;
  33.106  
  33.107  	/* Rebuild the RX buffer freelist and the RX ring itself. */
  33.108  	for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
  33.109 -		if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
  33.110 -			gnttab_grant_foreign_transfer_ref(
  33.111 -				np->grant_rx_ref[i], np->backend_id);
  33.112 -			np->rx->ring[requeue_idx].req.gref =
  33.113 -				np->grant_rx_ref[i];
  33.114 -			np->rx->ring[requeue_idx].req.id = i;
  33.115 -			requeue_idx++; 
  33.116 -		}
  33.117 +		if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
  33.118 +			continue;
  33.119 +		gnttab_grant_foreign_transfer_ref(
  33.120 +			np->grant_rx_ref[i], np->backend_id);
  33.121 +		np->rx->ring[requeue_idx].req.gref =
  33.122 +			np->grant_rx_ref[i];
  33.123 +		np->rx->ring[requeue_idx].req.id = i;
  33.124 +		requeue_idx++; 
  33.125  	}
  33.126 -
  33.127  	wmb();                
  33.128  	np->rx->req_prod = requeue_idx;
  33.129  
  33.130 -	/* Step 3: All public and private state should now be sane.  Get
  33.131 +	/*
  33.132 +	 * Step 3: All public and private state should now be sane.  Get
  33.133  	 * ready to start sending and receiving packets and give the driver
  33.134  	 * domain a kick because we've probably just requeued some
  33.135  	 * packets.
  33.136  	 */
  33.137  	np->backend_state = BEST_CONNECTED;
  33.138  	wmb();
  33.139 -	notify_via_evtchn(np->evtchn);  
  33.140 +	notify_remote_via_irq(np->irq);
  33.141  	network_tx_buf_gc(dev);
  33.142  
  33.143  	if (np->user_state == UST_OPEN)
  33.144 @@ -798,7 +803,8 @@ static void show_device(struct net_priva
  33.145  #endif
  33.146  }
  33.147  
  33.148 -/* Move the vif into connected state.
  33.149 +/*
  33.150 + * Move the vif into connected state.
  33.151   * Sets the mac and event channel from the message.
  33.152   * Binds the irq to the event channel.
  33.153   */
  33.154 @@ -809,7 +815,7 @@ connect_device(struct net_private *np, u
  33.155  	memcpy(dev->dev_addr, np->mac, ETH_ALEN);
  33.156  	np->evtchn = evtchn;
  33.157  	network_connect(dev);
  33.158 -	(void)bind_evtchn_to_irqhandler(
  33.159 +	np->irq = bind_evtchn_to_irqhandler(
  33.160  		np->evtchn, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
  33.161  	(void)send_fake_arp(dev);
  33.162  	show_device(np);
  33.163 @@ -1049,12 +1055,12 @@ static void netif_free(struct netfront_i
  33.164  		gnttab_end_foreign_access(info->rx_ring_ref, 0);
  33.165  	info->rx_ring_ref = GRANT_INVALID_REF;
  33.166  
  33.167 -	unbind_evtchn_from_irqhandler(info->evtchn, info->netdev);
  33.168 -	info->evtchn = 0;
  33.169 +	if (info->irq)
  33.170 +		unbind_evtchn_from_irqhandler(info->irq, info->netdev);
  33.171 +	info->evtchn = info->irq = 0;
  33.172  }
  33.173  
  33.174 -/* Stop network device and free tx/rx queues and irq.
  33.175 - */
  33.176 +/* Stop network device and free tx/rx queues and irq. */
  33.177  static void shutdown_device(struct net_private *np)
  33.178  {
  33.179  	/* Stop old i/f to prevent errors whilst we rebuild the state. */
  33.180 @@ -1148,17 +1154,6 @@ again:
  33.181  		goto abort_transaction;
  33.182  	}
  33.183  
  33.184 -	info->backend = backend;
  33.185 -	backend = NULL;
  33.186 -
  33.187 -	info->watch.node = info->backend;
  33.188 -	info->watch.callback = watch_for_status;
  33.189 -	err = register_xenbus_watch(&info->watch);
  33.190 -	if (err) {
  33.191 -		message = "registering watch on backend";
  33.192 -		goto abort_transaction;
  33.193 -	}
  33.194 -
  33.195  	err = xenbus_transaction_end(0);
  33.196  	if (err) {
  33.197  		if (err == -EAGAIN)
  33.198 @@ -1167,12 +1162,19 @@ again:
  33.199  		goto destroy_ring;
  33.200  	}
  33.201  
  33.202 +	info->watch.node = backend;
  33.203 +	info->watch.callback = watch_for_status;
  33.204 +	err = register_xenbus_watch(&info->watch);
  33.205 +	if (err) {
  33.206 +		message = "registering watch on backend";
  33.207 +		goto destroy_ring;
  33.208 +	}
  33.209 +
  33.210 +	info->backend = backend;
  33.211 +
  33.212  	netif_state = NETIF_STATE_CONNECTED;
  33.213  
  33.214 - out:
  33.215 -	if (backend)
  33.216 -		kfree(backend);
  33.217 -	return err;
  33.218 +	return 0;
  33.219  
  33.220   abort_transaction:
  33.221  	xenbus_transaction_end(1);
  33.222 @@ -1180,13 +1182,17 @@ again:
  33.223  	xenbus_dev_error(dev, err, "%s", message);
  33.224   destroy_ring:
  33.225  	shutdown_device(info);
  33.226 -	goto out;
  33.227 + out:
  33.228 +	if (backend)
  33.229 +		kfree(backend);
  33.230 +	return err;
  33.231  }
  33.232  
  33.233 -/* Setup supplies the backend dir, virtual device.
  33.234 -
  33.235 -   We place an event channel and shared frame entries.
  33.236 -   We watch backend to wait if it's ok. */
  33.237 +/*
  33.238 + * Setup supplies the backend dir, virtual device.
  33.239 + * We place an event channel and shared frame entries.
  33.240 + * We watch backend to wait if it's ok.
  33.241 + */
  33.242  static int netfront_probe(struct xenbus_device *dev,
  33.243  			  const struct xenbus_device_id *id)
  33.244  {
  33.245 @@ -1241,24 +1247,17 @@ static int netfront_remove(struct xenbus
  33.246  static int netfront_suspend(struct xenbus_device *dev)
  33.247  {
  33.248  	struct netfront_info *info = dev->data;
  33.249 -
  33.250  	unregister_xenbus_watch(&info->watch);
  33.251  	kfree(info->backend);
  33.252  	info->backend = NULL;
  33.253 -
  33.254 -	netif_free(info);
  33.255 -
  33.256  	return 0;
  33.257  }
  33.258  
  33.259  static int netfront_resume(struct xenbus_device *dev)
  33.260  {
  33.261 -	struct net_private *np = dev->data;
  33.262 -	int err;
  33.263 -
  33.264 -	err = talk_to_backend(dev, np);
  33.265 -
  33.266 -	return err;
  33.267 +	struct netfront_info *info = dev->data;
  33.268 +	netif_free(info);
  33.269 +	return talk_to_backend(dev, info);
  33.270  }
  33.271  
  33.272  static struct xenbus_driver netfront = {
    34.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu Sep 29 16:22:02 2005 -0600
    34.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu Sep 29 17:28:44 2005 -0600
    34.3 @@ -35,7 +35,7 @@ typedef struct tpmif_st {
    34.4  
    34.5  	/* Physical parameters of the comms window. */
    34.6  	unsigned int evtchn;
    34.7 -	unsigned int remote_evtchn;
    34.8 +	unsigned int irq;
    34.9  
   34.10  	/* The shared rings and indexes. */
   34.11  	tpmif_tx_interface_t *tx;
    35.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Thu Sep 29 16:22:02 2005 -0600
    35.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Thu Sep 29 17:28:44 2005 -0600
    35.3 @@ -120,8 +120,6 @@ tpmif_map(tpmif_t *tpmif, unsigned long 
    35.4  	evtchn_op_t op = {.cmd = EVTCHNOP_bind_interdomain };
    35.5  	int err;
    35.6  
    35.7 -	BUG_ON(tpmif->remote_evtchn);
    35.8 -
    35.9  	if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
   35.10  		return -ENOMEM;
   35.11  
   35.12 @@ -143,12 +141,11 @@ tpmif_map(tpmif_t *tpmif, unsigned long 
   35.13  	}
   35.14  
   35.15  	tpmif->evtchn = op.u.bind_interdomain.port1;
   35.16 -	tpmif->remote_evtchn = evtchn;
   35.17  
   35.18  	tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
   35.19  
   35.20 -	bind_evtchn_to_irqhandler(tpmif->evtchn,
   35.21 -				  tpmif_be_int, 0, "tpmif-backend", tpmif);
   35.22 +	tpmif->irq = bind_evtchn_to_irqhandler(
   35.23 +		tpmif->evtchn, tpmif_be_int, 0, "tpmif-backend", tpmif);
   35.24  	tpmif->status = CONNECTED;
   35.25  	tpmif->shmem_ref = shared_page;
   35.26  	tpmif->active = 1;
   35.27 @@ -159,18 +156,10 @@ tpmif_map(tpmif_t *tpmif, unsigned long 
   35.28  static void
   35.29  __tpmif_disconnect_complete(void *arg)
   35.30  {
   35.31 -	evtchn_op_t op = {.cmd = EVTCHNOP_close };
   35.32  	tpmif_t *tpmif = (tpmif_t *) arg;
   35.33  
   35.34 -	op.u.close.port = tpmif->evtchn;
   35.35 -	op.u.close.dom = DOMID_SELF;
   35.36 -	HYPERVISOR_event_channel_op(&op);
   35.37 -	op.u.close.port = tpmif->remote_evtchn;
   35.38 -	op.u.close.dom = tpmif->domid;
   35.39 -	HYPERVISOR_event_channel_op(&op);
   35.40 -
   35.41 -	if (tpmif->evtchn)
   35.42 -		unbind_evtchn_from_irqhandler(tpmif->evtchn, tpmif);
   35.43 +	if (tpmif->irq)
   35.44 +		unbind_evtchn_from_irqhandler(tpmif->irq, tpmif);
   35.45  
   35.46  	if (tpmif->tx) {
   35.47  		unmap_frontend_page(tpmif);
    36.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu Sep 29 16:22:02 2005 -0600
    36.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu Sep 29 17:28:44 2005 -0600
    36.3 @@ -308,7 +308,7 @@ static int
    36.4  	rc = offset;
    36.5  	DPRINTK("Notifying frontend via event channel %d\n",
    36.6  	        tpmif->evtchn);
    36.7 -	notify_via_evtchn(tpmif->evtchn);
    36.8 +	notify_remote_via_irq(tpmif->irq);
    36.9  
   36.10  	return rc;
   36.11  }
    37.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Thu Sep 29 16:22:02 2005 -0600
    37.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Thu Sep 29 17:28:44 2005 -0600
    37.3 @@ -88,6 +88,26 @@ static void frontend_changed(struct xenb
    37.4  		return;
    37.5  	}
    37.6  
    37.7 +	err = tpmif_map(be->tpmif, ringref, evtchn);
    37.8 +	if (err) {
    37.9 +		xenbus_dev_error(be->dev, err,
   37.10 +				 "mapping shared-frame %lu port %u",
   37.11 +				 ringref, evtchn);
   37.12 +		return;
   37.13 +	}
   37.14 +
   37.15 +	err = tpmif_vtpm_open(be->tpmif,
   37.16 +	                      be->frontend_id,
   37.17 +	                      be->instance);
   37.18 +	if (err) {
   37.19 +		xenbus_dev_error(be->dev, err,
   37.20 +		                 "queueing vtpm open packet");
   37.21 +		/*
   37.22 +		 * Should close down this device and notify FE
   37.23 +		 * about closure.
   37.24 +		 */
   37.25 +		return;
   37.26 +	}
   37.27  
   37.28  	/*
   37.29  	 * Tell the front-end that we are ready to go -
   37.30 @@ -107,27 +127,6 @@ again:
   37.31  		goto abort;
   37.32  	}
   37.33  
   37.34 -	err = tpmif_map(be->tpmif, ringref, evtchn);
   37.35 -	if (err) {
   37.36 -		xenbus_dev_error(be->dev, err,
   37.37 -				 "mapping shared-frame %lu port %u",
   37.38 -				 ringref, evtchn);
   37.39 -		goto abort;
   37.40 -	}
   37.41 -
   37.42 -	err = tpmif_vtpm_open(be->tpmif,
   37.43 -	                      be->frontend_id,
   37.44 -	                      be->instance);
   37.45 -	if (err) {
   37.46 -		xenbus_dev_error(be->dev, err,
   37.47 -		                 "queueing vtpm open packet");
   37.48 -		/*
   37.49 -		 * Should close down this device and notify FE
   37.50 -		 * about closure.
   37.51 -		 */
   37.52 -		goto abort;
   37.53 -	}
   37.54 -
   37.55  	err = xenbus_transaction_end(0);
   37.56  	if (err == -EAGAIN)
   37.57  		goto again;
    38.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu Sep 29 16:22:02 2005 -0600
    38.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu Sep 29 17:28:44 2005 -0600
    38.3 @@ -292,8 +292,10 @@ static void destroy_tpmring(struct tpmfr
    38.4  		free_page((unsigned long)tp->tx);
    38.5  		tp->tx = NULL;
    38.6  	}
    38.7 -	unbind_evtchn_from_irqhandler(tp->evtchn, NULL);
    38.8 -	tp->evtchn = 0;
    38.9 +
   38.10 +	if (tpm->irq)
   38.11 +		unbind_evtchn_from_irqhandler(tp->irq, NULL);
   38.12 +	tp->evtchn = tpm->irq = 0;
   38.13  }
   38.14  
   38.15  
   38.16 @@ -352,17 +354,6 @@ again:
   38.17  		goto abort_transaction;
   38.18  	}
   38.19  
   38.20 -	info->backend = backend;
   38.21 -	backend = NULL;
   38.22 -
   38.23 -	info->watch.node = info->backend;
   38.24 -	info->watch.callback = watch_for_status;
   38.25 -	err = register_xenbus_watch(&info->watch);
   38.26 -	if (err) {
   38.27 -		message = "registering watch on backend";
   38.28 -		goto abort_transaction;
   38.29 -	}
   38.30 -
   38.31  	err = xenbus_transaction_end(0);
   38.32  	if (err == -EAGAIN)
   38.33  		goto again;
   38.34 @@ -371,10 +362,17 @@ again:
   38.35  		goto destroy_tpmring;
   38.36  	}
   38.37  
   38.38 -out:
   38.39 -	if (backend)
   38.40 -		kfree(backend);
   38.41 -	return err;
   38.42 +	info->watch.node = backend;
   38.43 +	info->watch.callback = watch_for_status;
   38.44 +	err = register_xenbus_watch(&info->watch);
   38.45 +	if (err) {
   38.46 +		message = "registering watch on backend";
   38.47 +		goto destroy_tpmring;
   38.48 +	}
   38.49 +
   38.50 +	info->backend = backend;
   38.51 +
   38.52 +	return 0;
   38.53  
   38.54  abort_transaction:
   38.55  	xenbus_transaction_end(1);
   38.56 @@ -382,7 +380,10 @@ abort_transaction:
   38.57  	xenbus_dev_error(dev, err, "%s", message);
   38.58  destroy_tpmring:
   38.59  	destroy_tpmring(info, &my_private);
   38.60 -	goto out;
   38.61 +out:
   38.62 +	if (backend)
   38.63 +		kfree(backend);
   38.64 +	return err;
   38.65  }
   38.66  
   38.67  
   38.68 @@ -502,10 +503,12 @@ static void tpmif_connect(u16 evtchn, do
   38.69  	err = bind_evtchn_to_irqhandler(
   38.70  		tp->evtchn,
   38.71  		tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
   38.72 -	if ( err != 0 ) {
   38.73 +	if ( err <= 0 ) {
   38.74  		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
   38.75  		return;
   38.76  	}
   38.77 +
   38.78 +	tp->irq = err;
   38.79  }
   38.80  
   38.81  static struct xenbus_device_id tpmfront_ids[] = {
   38.82 @@ -679,7 +682,7 @@ tpm_xmit(struct tpm_private *tp,
   38.83  	DPRINTK("Notifying backend via event channel %d\n",
   38.84  	        tp->evtchn);
   38.85  
   38.86 -	notify_via_evtchn(tp->evtchn);
   38.87 +	notify_remote_via_irq(tp->irq);
   38.88  
   38.89  	spin_unlock_irq(&tp->tx_lock);
   38.90  	return offset;
    39.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu Sep 29 16:22:02 2005 -0600
    39.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu Sep 29 17:28:44 2005 -0600
    39.3 @@ -5,7 +5,7 @@
    39.4  struct tpm_private
    39.5  {
    39.6  	tpmif_tx_interface_t *tx;
    39.7 -	unsigned int evtchn;
    39.8 +	unsigned int evtchn, irq;
    39.9  	int connected;
   39.10  
   39.11  	spinlock_t tx_lock;
    40.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Thu Sep 29 16:22:02 2005 -0600
    40.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Thu Sep 29 17:28:44 2005 -0600
    40.3 @@ -44,6 +44,8 @@ struct ringbuf_head
    40.4  	char buf[0];
    40.5  } __attribute__((packed));
    40.6  
    40.7 +static int xenbus_irq;
    40.8 +
    40.9  DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
   40.10  
   40.11  static inline struct ringbuf_head *outbuf(void)
   40.12 @@ -145,7 +147,7 @@ int xb_write(const void *data, unsigned 
   40.13  		data += avail;
   40.14  		len -= avail;
   40.15  		update_output_chunk(out, avail);
   40.16 -		notify_via_evtchn(xen_start_info->store_evtchn);
   40.17 +		notify_remote_via_irq(xenbus_irq);
   40.18  	} while (len != 0);
   40.19  
   40.20  	return 0;
   40.21 @@ -190,7 +192,7 @@ int xb_read(void *data, unsigned len)
   40.22  		pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
   40.23  		/* If it was full, tell them we've taken some. */
   40.24  		if (was_full)
   40.25 -			notify_via_evtchn(xen_start_info->store_evtchn);
   40.26 +			notify_remote_via_irq(xenbus_irq);
   40.27  	}
   40.28  
   40.29  	/* If we left something, wake watch thread to deal with it. */
   40.30 @@ -205,33 +207,29 @@ int xb_init_comms(void)
   40.31  {
   40.32  	int err;
   40.33  
   40.34 +	if (xenbus_irq)
   40.35 +		unbind_evtchn_from_irqhandler(xenbus_irq, &xb_waitq);
   40.36 +	xenbus_irq = 0;
   40.37 +
   40.38  	if (!xen_start_info->store_evtchn)
   40.39  		return 0;
   40.40  
   40.41  	err = bind_evtchn_to_irqhandler(
   40.42  		xen_start_info->store_evtchn, wake_waiting,
   40.43  		0, "xenbus", &xb_waitq);
   40.44 -	if (err) {
   40.45 +	if (err <= 0) {
   40.46  		printk(KERN_ERR "XENBUS request irq failed %i\n", err);
   40.47 -		unbind_evtchn_from_irq(xen_start_info->store_evtchn);
   40.48  		return err;
   40.49  	}
   40.50  
   40.51 +	xenbus_irq = err;
   40.52 +
   40.53  	/* FIXME zero out page -- domain builder should probably do this*/
   40.54  	memset(mfn_to_virt(xen_start_info->store_mfn), 0, PAGE_SIZE);
   40.55  
   40.56  	return 0;
   40.57  }
   40.58  
   40.59 -void xb_suspend_comms(void)
   40.60 -{
   40.61 -
   40.62 -	if (!xen_start_info->store_evtchn)
   40.63 -		return;
   40.64 -
   40.65 -	unbind_evtchn_from_irqhandler(xen_start_info->store_evtchn, &xb_waitq);
   40.66 -}
   40.67 -
   40.68  /*
   40.69   * Local variables:
   40.70   *  c-file-style: "linux"
    41.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h	Thu Sep 29 16:22:02 2005 -0600
    41.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h	Thu Sep 29 17:28:44 2005 -0600
    41.3 @@ -30,7 +30,6 @@
    41.4  
    41.5  int xs_init(void);
    41.6  int xb_init_comms(void);
    41.7 -void xb_suspend_comms(void);
    41.8  
    41.9  /* Low level routines. */
   41.10  int xb_write(const void *data, unsigned len);
    42.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Thu Sep 29 16:22:02 2005 -0600
    42.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Thu Sep 29 17:28:44 2005 -0600
    42.3 @@ -607,7 +607,6 @@ void xenbus_suspend(void)
    42.4  	down(&xenbus_lock);
    42.5  	bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
    42.6  	bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, suspend_dev);
    42.7 -	xb_suspend_comms();
    42.8  }
    42.9  
   42.10  void xenbus_resume(void)
   42.11 @@ -651,7 +650,6 @@ int do_xenbus_probe(void *unused)
   42.12  	int err = 0;
   42.13  
   42.14  	/* Initialize xenstore comms unless already done. */
   42.15 -	printk("store_evtchn = %i\n", xen_start_info->store_evtchn);
   42.16  	err = xs_init();
   42.17  	if (err) {
   42.18  		printk("XENBUS: Error initializing xenstore comms:"
    43.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h	Thu Sep 29 16:22:02 2005 -0600
    43.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h	Thu Sep 29 17:28:44 2005 -0600
    43.3 @@ -80,11 +80,9 @@
    43.4   * the usable vector space is 0x20-0xff (224 vectors)
    43.5   */
    43.6  
    43.7 -#define NR_IPIS 8
    43.8 -
    43.9 -#define RESCHEDULE_VECTOR	1
   43.10 -#define INVALIDATE_TLB_VECTOR	2
   43.11 -#define CALL_FUNCTION_VECTOR	3
   43.12 +#define RESCHEDULE_VECTOR	0
   43.13 +#define CALL_FUNCTION_VECTOR	1
   43.14 +#define NR_IPIS			2
   43.15  
   43.16  /*
   43.17   * The maximum number of vectors supported by i386 processors
    44.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h	Thu Sep 29 16:22:02 2005 -0600
    44.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h	Thu Sep 29 17:28:44 2005 -0600
    44.3 @@ -78,11 +78,9 @@
    44.4   * the usable vector space is 0x20-0xff (224 vectors)
    44.5   */
    44.6  
    44.7 -#define NR_IPIS 8
    44.8 -
    44.9 -#define RESCHEDULE_VECTOR	1
   44.10 -#define INVALIDATE_TLB_VECTOR	2
   44.11 -#define CALL_FUNCTION_VECTOR	3
   44.12 +#define RESCHEDULE_VECTOR	0
   44.13 +#define CALL_FUNCTION_VECTOR	1
   44.14 +#define NR_IPIS			2
   44.15  
   44.16  /*
   44.17   * The maximum number of vectors supported by i386 processors
    45.1 --- a/linux-2.6-xen-sparse/include/asm-xen/balloon.h	Thu Sep 29 16:22:02 2005 -0600
    45.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/balloon.h	Thu Sep 29 17:28:44 2005 -0600
    45.3 @@ -58,3 +58,13 @@ extern spinlock_t balloon_lock;
    45.4  #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
    45.5  
    45.6  #endif /* __ASM_BALLOON_H__ */
    45.7 +
    45.8 +/*
    45.9 + * Local variables:
   45.10 + *  c-file-style: "linux"
   45.11 + *  indent-tabs-mode: t
   45.12 + *  c-indent-level: 8
   45.13 + *  c-basic-offset: 8
   45.14 + *  tab-width: 8
   45.15 + * End:
   45.16 + */
    46.1 --- a/linux-2.6-xen-sparse/include/asm-xen/driver_util.h	Thu Sep 29 16:22:02 2005 -0600
    46.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/driver_util.h	Thu Sep 29 17:28:44 2005 -0600
    46.3 @@ -14,3 +14,13 @@ extern void lock_vm_area(struct vm_struc
    46.4  extern void unlock_vm_area(struct vm_struct *area);
    46.5  
    46.6  #endif /* __ASM_XEN_DRIVER_UTIL_H__ */
    46.7 +
    46.8 +/*
    46.9 + * Local variables:
   46.10 + *  c-file-style: "linux"
   46.11 + *  indent-tabs-mode: t
   46.12 + *  c-indent-level: 8
   46.13 + *  c-basic-offset: 8
   46.14 + *  tab-width: 8
   46.15 + * End:
   46.16 + */
    47.1 --- a/linux-2.6-xen-sparse/include/asm-xen/evtchn.h	Thu Sep 29 16:22:02 2005 -0600
    47.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h	Thu Sep 29 17:28:44 2005 -0600
    47.3 @@ -4,7 +4,7 @@
    47.4   * Communication via Xen event channels.
    47.5   * Also definitions for the device that demuxes notifications to userspace.
    47.6   * 
    47.7 - * Copyright (c) 2004, K A Fraser
    47.8 + * Copyright (c) 2004-2005, K A Fraser
    47.9   * 
   47.10   * This file may be distributed separately from the Linux kernel, or
   47.11   * incorporated into other software packages, subject to the following license:
   47.12 @@ -51,24 +51,36 @@ extern void unbind_virq_from_irq(int vir
   47.13  extern int  bind_ipi_to_irq(int ipi);
   47.14  extern void unbind_ipi_from_irq(int ipi);
   47.15  
   47.16 -/* Dynamically bind an event-channel port to Linux IRQ space. */
   47.17 +/*
   47.18 + * Dynamically bind an event-channel port to Linux IRQ space.
   47.19 + * BIND:   Returns IRQ or error.
   47.20 + * UNBIND: Takes IRQ to unbind from; automatically closes the event channel.
   47.21 + */
   47.22  extern int  bind_evtchn_to_irq(unsigned int evtchn);
   47.23 -extern void unbind_evtchn_from_irq(unsigned int evtchn);
   47.24 +extern void unbind_evtchn_from_irq(unsigned int irq);
   47.25  
   47.26  /*
   47.27   * Dynamically bind an event-channel port to an IRQ-like callback handler.
   47.28   * On some platforms this may not be implemented via the Linux IRQ subsystem.
   47.29 - * You *cannot* trust the irq argument passed to the callback handler.
   47.30 + * The IRQ argument passed to the callback handler is the same as returned
   47.31 + * from the bind call. It may not correspond to a Linux IRQ number.
   47.32 + * BIND:   Returns IRQ or error.
   47.33 + * UNBIND: Takes IRQ to unbind from; automatically closes the event channel.
   47.34   */
   47.35  extern int  bind_evtchn_to_irqhandler(
   47.36 -    unsigned int evtchn,
   47.37 -    irqreturn_t (*handler)(int, void *, struct pt_regs *),
   47.38 -    unsigned long irqflags,
   47.39 -    const char *devname,
   47.40 -    void *dev_id);
   47.41 -extern void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id);
   47.42 +	unsigned int evtchn,
   47.43 +	irqreturn_t (*handler)(int, void *, struct pt_regs *),
   47.44 +	unsigned long irqflags,
   47.45 +	const char *devname,
   47.46 +	void *dev_id);
   47.47 +extern void unbind_evtchn_from_irqhandler(unsigned int irq, void *dev_id);
   47.48  
   47.49 -extern void irq_suspend(void);
   47.50 +/*
   47.51 + * Unlike notify_remote_via_evtchn(), this is safe to use across
   47.52 + * save/restore. Notifications on a broken connection are silently dropped.
   47.53 + */
   47.54 +void notify_remote_via_irq(int irq);
   47.55 +
   47.56  extern void irq_resume(void);
   47.57  
   47.58  /* Entry point for notifications into Linux subsystems. */
   47.59 @@ -79,42 +91,42 @@ void evtchn_device_upcall(int port);
   47.60  
   47.61  static inline void mask_evtchn(int port)
   47.62  {
   47.63 -    shared_info_t *s = HYPERVISOR_shared_info;
   47.64 -    synch_set_bit(port, &s->evtchn_mask[0]);
   47.65 +	shared_info_t *s = HYPERVISOR_shared_info;
   47.66 +	synch_set_bit(port, &s->evtchn_mask[0]);
   47.67  }
   47.68  
   47.69  static inline void unmask_evtchn(int port)
   47.70  {
   47.71 -    shared_info_t *s = HYPERVISOR_shared_info;
   47.72 -    vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
   47.73 -
   47.74 -    synch_clear_bit(port, &s->evtchn_mask[0]);
   47.75 +	shared_info_t *s = HYPERVISOR_shared_info;
   47.76 +	vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
   47.77  
   47.78 -    /*
   47.79 -     * The following is basically the equivalent of 'hw_resend_irq'. Just like
   47.80 -     * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
   47.81 -     */
   47.82 -    if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
   47.83 -         !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel) )
   47.84 -    {
   47.85 -        vcpu_info->evtchn_upcall_pending = 1;
   47.86 -        if ( !vcpu_info->evtchn_upcall_mask )
   47.87 -            force_evtchn_callback();
   47.88 -    }
   47.89 +	synch_clear_bit(port, &s->evtchn_mask[0]);
   47.90 +
   47.91 +	/*
   47.92 +	 * The following is basically the equivalent of 'hw_resend_irq'. Just
   47.93 +	 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
   47.94 +	 * masked.
   47.95 +	 */
   47.96 +	if (synch_test_bit         (port,    &s->evtchn_pending[0]) && 
   47.97 +	    !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel)) {
   47.98 +		vcpu_info->evtchn_upcall_pending = 1;
   47.99 +		if (!vcpu_info->evtchn_upcall_mask)
  47.100 +			force_evtchn_callback();
  47.101 +	}
  47.102  }
  47.103  
  47.104  static inline void clear_evtchn(int port)
  47.105  {
  47.106 -    shared_info_t *s = HYPERVISOR_shared_info;
  47.107 -    synch_clear_bit(port, &s->evtchn_pending[0]);
  47.108 +	shared_info_t *s = HYPERVISOR_shared_info;
  47.109 +	synch_clear_bit(port, &s->evtchn_pending[0]);
  47.110  }
  47.111  
  47.112 -static inline int notify_via_evtchn(int port)
  47.113 +static inline void notify_remote_via_evtchn(int port)
  47.114  {
  47.115 -    evtchn_op_t op;
  47.116 -    op.cmd = EVTCHNOP_send;
  47.117 -    op.u.send.local_port = port;
  47.118 -    return HYPERVISOR_event_channel_op(&op);
  47.119 +	evtchn_op_t op;
  47.120 +	op.cmd = EVTCHNOP_send;
  47.121 +	op.u.send.local_port = port;
  47.122 +	(void)HYPERVISOR_event_channel_op(&op);
  47.123  }
  47.124  
  47.125  /*
  47.126 @@ -133,3 +145,13 @@ static inline int notify_via_evtchn(int 
  47.127  #define EVTCHN_UNBIND _IO('E', 3)
  47.128  
  47.129  #endif /* __ASM_EVTCHN_H__ */
  47.130 +
  47.131 +/*
  47.132 + * Local variables:
  47.133 + *  c-file-style: "linux"
  47.134 + *  indent-tabs-mode: t
  47.135 + *  c-indent-level: 8
  47.136 + *  c-basic-offset: 8
  47.137 + *  tab-width: 8
  47.138 + * End:
  47.139 + */
    48.1 --- a/linux-2.6-xen-sparse/include/asm-xen/foreign_page.h	Thu Sep 29 16:22:02 2005 -0600
    48.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/foreign_page.h	Thu Sep 29 17:28:44 2005 -0600
    48.3 @@ -28,3 +28,13 @@
    48.4  	( (void (*) (struct page *)) (page)->mapping )
    48.5  
    48.6  #endif /* __ASM_XEN_FOREIGN_PAGE_H__ */
    48.7 +
    48.8 +/*
    48.9 + * Local variables:
   48.10 + *  c-file-style: "linux"
   48.11 + *  indent-tabs-mode: t
   48.12 + *  c-indent-level: 8
   48.13 + *  c-basic-offset: 8
   48.14 + *  tab-width: 8
   48.15 + * End:
   48.16 + */
    49.1 --- a/linux-2.6-xen-sparse/include/asm-xen/gnttab.h	Thu Sep 29 16:22:02 2005 -0600
    49.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/gnttab.h	Thu Sep 29 17:28:44 2005 -0600
    49.3 @@ -6,7 +6,7 @@
    49.4   * 2. Accessing others' memory reservations via grant references.
    49.5   * (i.e., mechanisms for both sender and recipient of grant references)
    49.6   * 
    49.7 - * Copyright (c) 2004, K A Fraser
    49.8 + * Copyright (c) 2004-2005, K A Fraser
    49.9   * Copyright (c) 2005, Christopher Clark
   49.10   */
   49.11  
   49.12 @@ -25,10 +25,10 @@
   49.13  #endif
   49.14  
   49.15  struct gnttab_free_callback {
   49.16 -    struct gnttab_free_callback *next;
   49.17 -    void (*fn)(void *);
   49.18 -    void *arg;
   49.19 -    u16 count;
   49.20 +	struct gnttab_free_callback *next;
   49.21 +	void (*fn)(void *);
   49.22 +	void *arg;
   49.23 +	u16 count;
   49.24  };
   49.25  
   49.26  int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
   49.27 @@ -73,3 +73,13 @@ void gnttab_grant_foreign_transfer_ref(g
   49.28  #endif
   49.29  
   49.30  #endif /* __ASM_GNTTAB_H__ */
   49.31 +
   49.32 +/*
   49.33 + * Local variables:
   49.34 + *  c-file-style: "linux"
   49.35 + *  indent-tabs-mode: t
   49.36 + *  c-indent-level: 8
   49.37 + *  c-basic-offset: 8
   49.38 + *  tab-width: 8
   49.39 + * End:
   49.40 + */
    50.1 --- a/linux-2.6-xen-sparse/include/asm-xen/queues.h	Thu Sep 29 16:22:02 2005 -0600
    50.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    50.3 @@ -1,81 +0,0 @@
    50.4 -
    50.5 -/*
    50.6 - * Oh dear. Task queues were removed from Linux 2.6 and replaced by work 
    50.7 - * queues. Unfortunately the semantics is not the same. With task queues we 
    50.8 - * can defer work until a particular event occurs -- this is not
    50.9 - * straightforwardly done with work queues (queued work is performed asap, or
   50.10 - * after some fixed timeout). Conversely, work queues are a (slightly) neater
   50.11 - * way of deferring work to a process context than using task queues in 2.4.
   50.12 - * 
   50.13 - * This is a bit of a needless reimplementation -- should have just pulled
   50.14 - * the code from 2.4, but I tried leveraging work queues to simplify things.
   50.15 - * They didn't help. :-(
   50.16 - */
   50.17 -
   50.18 -#ifndef __QUEUES_H__
   50.19 -#define __QUEUES_H__
   50.20 -
   50.21 -#include <linux/version.h>
   50.22 -#include <linux/list.h>
   50.23 -#include <linux/workqueue.h>
   50.24 -
   50.25 -struct tq_struct { 
   50.26 -    void (*fn)(void *);
   50.27 -    void *arg;
   50.28 -    struct list_head list;
   50.29 -    unsigned long pending;
   50.30 -};
   50.31 -#define INIT_TQUEUE(_name, _fn, _arg)               \
   50.32 -    do {                                            \
   50.33 -        INIT_LIST_HEAD(&(_name)->list);             \
   50.34 -        (_name)->pending = 0;                       \
   50.35 -        (_name)->fn = (_fn); (_name)->arg = (_arg); \
   50.36 -    } while ( 0 )
   50.37 -#define DECLARE_TQUEUE(_name, _fn, _arg)            \
   50.38 -    struct tq_struct _name = { (_fn), (_arg), LIST_HEAD_INIT((_name).list), 0 }
   50.39 -
   50.40 -typedef struct {
   50.41 -    struct list_head list;
   50.42 -    spinlock_t       lock;
   50.43 -} task_queue;
   50.44 -#define DECLARE_TASK_QUEUE(_name) \
   50.45 -    task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }
   50.46 -
   50.47 -static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
   50.48 -{
   50.49 -    unsigned long flags;
   50.50 -    if ( test_and_set_bit(0, &tqe->pending) )
   50.51 -        return 0;
   50.52 -    spin_lock_irqsave(&tql->lock, flags);
   50.53 -    list_add_tail(&tqe->list, &tql->list);
   50.54 -    spin_unlock_irqrestore(&tql->lock, flags);
   50.55 -    return 1;
   50.56 -}
   50.57 -
   50.58 -static inline void run_task_queue(task_queue *tql)
   50.59 -{
   50.60 -    struct list_head head, *ent;
   50.61 -    struct tq_struct *tqe;
   50.62 -    unsigned long flags;
   50.63 -    void (*fn)(void *);
   50.64 -    void *arg;
   50.65 -
   50.66 -    spin_lock_irqsave(&tql->lock, flags);
   50.67 -    list_add(&head, &tql->list);
   50.68 -    list_del_init(&tql->list);
   50.69 -    spin_unlock_irqrestore(&tql->lock, flags);
   50.70 -
   50.71 -    while ( !list_empty(&head) )
   50.72 -    {
   50.73 -        ent = head.next;
   50.74 -        list_del_init(ent);
   50.75 -        tqe = list_entry(ent, struct tq_struct, list);
   50.76 -        fn  = tqe->fn;
   50.77 -        arg = tqe->arg;
   50.78 -        wmb();
   50.79 -        tqe->pending = 0;
   50.80 -        fn(arg);
   50.81 -    }
   50.82 -}
   50.83 -
   50.84 -#endif /* __QUEUES_H__ */
    51.1 --- a/linux-2.6-xen-sparse/include/asm-xen/xen_proc.h	Thu Sep 29 16:22:02 2005 -0600
    51.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/xen_proc.h	Thu Sep 29 17:28:44 2005 -0600
    51.3 @@ -6,8 +6,18 @@
    51.4  #include <linux/proc_fs.h>
    51.5  
    51.6  extern struct proc_dir_entry *create_xen_proc_entry(
    51.7 -    const char *name, mode_t mode);
    51.8 +	const char *name, mode_t mode);
    51.9  extern void remove_xen_proc_entry(
   51.10 -    const char *name);
   51.11 +	const char *name);
   51.12  
   51.13  #endif /* __ASM_XEN_PROC_H__ */
   51.14 +
   51.15 +/*
   51.16 + * Local variables:
   51.17 + *  c-file-style: "linux"
   51.18 + *  indent-tabs-mode: t
   51.19 + *  c-indent-level: 8
   51.20 + *  c-basic-offset: 8
   51.21 + *  tab-width: 8
   51.22 + * End:
   51.23 + */
    52.1 --- a/linux-2.6-xen-sparse/include/asm-xen/xenbus.h	Thu Sep 29 16:22:02 2005 -0600
    52.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/xenbus.h	Thu Sep 29 17:28:44 2005 -0600
    52.3 @@ -139,3 +139,13 @@ void xenbus_resume(void);
    52.4  #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
    52.5  
    52.6  #endif /* _ASM_XEN_XENBUS_H */
    52.7 +
    52.8 +/*
    52.9 + * Local variables:
   52.10 + *  c-file-style: "linux"
   52.11 + *  indent-tabs-mode: t
   52.12 + *  c-indent-level: 8
   52.13 + *  c-basic-offset: 8
   52.14 + *  tab-width: 8
   52.15 + * End:
   52.16 + */
    53.1 --- a/tools/console/daemon/io.c	Thu Sep 29 16:22:02 2005 -0600
    53.2 +++ b/tools/console/daemon/io.c	Thu Sep 29 17:28:44 2005 -0600
    53.3 @@ -399,7 +399,7 @@ void enum_domains(void)
    53.4  
    53.5  	while (xc_domain_getinfo(xc, domid, 1, &dominfo) == 1) {
    53.6  		dom = lookup_domain(dominfo.domid);
    53.7 -		if (dominfo.dying || dominfo.crashed || dominfo.shutdown) {
    53.8 +		if (dominfo.dying) {
    53.9  			if (dom)
   53.10  				shutdown_domain(dom);
   53.11  		} else {
    54.1 --- a/tools/debugger/gdb/README	Thu Sep 29 16:22:02 2005 -0600
    54.2 +++ b/tools/debugger/gdb/README	Thu Sep 29 17:28:44 2005 -0600
    54.3 @@ -20,10 +20,18 @@ To build a debuggable guest kernel image
    54.4  
    54.5  To debug a running guest:
    54.6   1. Use 'xm list' to discover its domain id ($domid). 
    54.7 - 2. Run 'gdbserver-xen 127.0.0.1:9999 --attach $domid'
    54.8 - 3. Run 'gdb /path/to/vmlinux-syms-2.6.xx-xenU'
    54.9 + 2. Run 'gdbserver-xen 127.0.0.1:9999 --attach $domid'.
   54.10 + 3. Run 'gdb /path/to/vmlinux-syms-2.6.xx-xenU'.
   54.11   4. From within the gdb client session:
   54.12      # directory /path/to/linux-2.6.xx-xenU [*]
   54.13      # target remote 127.0.0.1:9999
   54.14      # bt
   54.15      # disass
   54.16 +
   54.17 +To debug a crashed guest:
   54.18 + 1. Add '(enable-dump yes)' to /etc/xen/xend-config.sxp before
   54.19 +    starting xend.
   54.20 + 2. When the domain crashes, a core file is written to
   54.21 +    '/var/xen/dump/<domain-name>.<domain-id>.core'.
   54.22 + 3. Run 'gdbserver-xen 127.0.0.1:9999 --file <core-file>'.
   54.23 + 4. Connect to the server as for a running guest.
    55.1 --- a/tools/firmware/vmxassist/gen.c	Thu Sep 29 16:22:02 2005 -0600
    55.2 +++ b/tools/firmware/vmxassist/gen.c	Thu Sep 29 17:28:44 2005 -0600
    55.3 @@ -23,7 +23,7 @@
    55.4  #include <vm86.h>
    55.5  
    55.6  int
    55.7 -main()
    55.8 +main(void)
    55.9  {
   55.10  	printf("/* MACHINE GENERATED; DO NOT EDIT */\n");
   55.11  	printf("#define VMX_ASSIST_CTX_GS_SEL	0x%x\n",
    56.1 --- a/tools/firmware/vmxassist/head.S	Thu Sep 29 16:22:02 2005 -0600
    56.2 +++ b/tools/firmware/vmxassist/head.S	Thu Sep 29 17:28:44 2005 -0600
    56.3 @@ -110,6 +110,10 @@ 1:
    56.4  _start:
    56.5  	cli
    56.6  
    56.7 +	/* save register parameters to C land */
    56.8 +	movl	%edx, booting_cpu
    56.9 +	movl	%ebx, booting_vector
   56.10 +
   56.11  	/* clear bss */
   56.12  	cld
   56.13  	xorb	%al, %al
   56.14 @@ -129,7 +133,6 @@ 1:
   56.15  	call    main
   56.16  	jmp	halt
   56.17  
   56.18 -
   56.19  /*
   56.20   * Something bad happened, print invoking %eip and loop forever
   56.21   */
    57.1 --- a/tools/firmware/vmxassist/setup.c	Thu Sep 29 16:22:02 2005 -0600
    57.2 +++ b/tools/firmware/vmxassist/setup.c	Thu Sep 29 17:28:44 2005 -0600
    57.3 @@ -29,6 +29,9 @@
    57.4  
    57.5  #define	min(a, b)	((a) > (b) ? (b) : (a))
    57.6  
    57.7 +/* Which CPU are we booting, and what is the initial CS segment? */
    57.8 +int booting_cpu, booting_vector;
    57.9 +
   57.10  unsigned long long gdt[] __attribute__ ((aligned(32))) = {
   57.11  	0x0000000000000000ULL,		/* 0x00: reserved */
   57.12  	0x0000890000000000ULL,		/* 0x08: 32-bit TSS */
   57.13 @@ -201,12 +204,17 @@ enter_real_mode(struct regs *regs)
   57.14  		initialize_real_mode = 0;
   57.15  		regs->eflags |= EFLAGS_VM | 0x02;
   57.16  		regs->ves = regs->vds = regs->vfs = regs->vgs = 0xF000;
   57.17 -		regs->cs = 0xF000; /* ROM BIOS POST entry point */
   57.18 +		if (booting_cpu == 0) {
   57.19 +			regs->cs = 0xF000; /* ROM BIOS POST entry point */
   57.20  #ifdef TEST
   57.21 -		regs->eip = 0xFFE0;
   57.22 +			regs->eip = 0xFFE0;
   57.23  #else
   57.24 -		regs->eip = 0xFFF0;
   57.25 +			regs->eip = 0xFFF0;
   57.26  #endif
   57.27 +		} else {
   57.28 +			regs->cs = booting_vector << 8; /* AP entry point */
   57.29 +			regs->eip = 0;
   57.30 +		}
   57.31  		regs->uesp = 0;
   57.32  		regs->uss = 0;
   57.33  		printf("Starting emulated 16-bit real-mode: ip=%04x:%04x\n",
   57.34 @@ -215,8 +223,8 @@ enter_real_mode(struct regs *regs)
   57.35  		mode = VM86_REAL; /* becomes previous mode */
   57.36  		set_mode(regs, VM86_REAL);
   57.37  
   57.38 -                /* this should get us into 16-bit mode */
   57.39 -                return;
   57.40 +		/* this should get us into 16-bit mode */
   57.41 +		return;
   57.42  	} else {
   57.43  		/* go from protected to real mode */
   57.44  		regs->eflags |= EFLAGS_VM;
   57.45 @@ -334,7 +342,12 @@ start_bios(void)
   57.46  {
   57.47  	unsigned long cr0;
   57.48  
   57.49 -	printf("Start BIOS ...\n");
   57.50 +	if (booting_cpu == 0)
   57.51 +		printf("Start BIOS ...\n");
   57.52 +	else
   57.53 +		printf("Start AP %d from %08x ...\n",
   57.54 +		       booting_cpu, booting_vector << 12);
   57.55 +
   57.56  	initialize_real_mode = 1;
   57.57  	cr0 = get_cr0();
   57.58  #ifndef TEST
   57.59 @@ -345,20 +358,28 @@ start_bios(void)
   57.60  }
   57.61  
   57.62  int
   57.63 -main()
   57.64 +main(void)
   57.65  {
   57.66 -	banner();
   57.67 +	if (booting_cpu == 0)
   57.68 +		banner();
   57.69 +
   57.70  #ifdef TEST
   57.71  	setup_paging();
   57.72  #endif
   57.73 +
   57.74  	setup_gdt();
   57.75  	setup_idt();
   57.76 +
   57.77  #ifndef	TEST
   57.78 -	set_cr4(get_cr4() | CR4_VME); 
   57.79 +	set_cr4(get_cr4() | CR4_VME);
   57.80  #endif
   57.81 +
   57.82  	setup_ctx();
   57.83 -	setup_pic();
   57.84 +
   57.85 +	if (booting_cpu == 0)
   57.86 +		setup_pic();
   57.87 +
   57.88  	start_bios();
   57.89 +
   57.90  	return 0;
   57.91  }
   57.92 -
    58.1 --- a/tools/firmware/vmxassist/vm86.c	Thu Sep 29 16:22:02 2005 -0600
    58.2 +++ b/tools/firmware/vmxassist/vm86.c	Thu Sep 29 17:28:44 2005 -0600
    58.3 @@ -470,10 +470,21 @@ load_seg(unsigned long sel, u32 *base, u
    58.4  	unsigned long long entry;
    58.5  
    58.6  	/* protected mode: use seg as index into gdt */
    58.7 -	if (sel == 0 || sel > oldctx.gdtr_limit)
    58.8 +	if (sel > oldctx.gdtr_limit)
    58.9  		return 0;
   58.10  
   58.11 +    if (sel == 0) {
   58.12 +        arbytes->fields.null_bit = 1;
   58.13 +        return 1;
   58.14 +    }
   58.15 +
   58.16  	entry =  ((unsigned long long *) oldctx.gdtr_base)[sel >> 3];
   58.17 +
   58.18 +    /* Check the P bit fisrt*/
   58.19 +    if (!((entry >> (15+32)) & 0x1) && sel != 0) {
   58.20 +        return 0;
   58.21 +    }
   58.22 +
   58.23  	*base =  (((entry >> (56-24)) & 0xFF000000) |
   58.24  		  ((entry >> (32-16)) & 0x00FF0000) |
   58.25  		  ((entry >> (   16)) & 0x0000FFFF));
   58.26 @@ -519,22 +530,42 @@ protected_mode(struct regs *regs)
   58.27  	if (load_seg(regs->ves, &oldctx.es_base,
   58.28  				&oldctx.es_limit, &oldctx.es_arbytes))
   58.29  		oldctx.es_sel = regs->ves;
   58.30 +    else {
   58.31 +        load_seg(0, &oldctx.es_base,&oldctx.es_limit, &oldctx.es_arbytes);
   58.32 +        oldctx.es_sel = 0;
   58.33 +    }
   58.34  
   58.35  	if (load_seg(regs->uss, &oldctx.ss_base,
   58.36  				&oldctx.ss_limit, &oldctx.ss_arbytes))
   58.37  		oldctx.ss_sel = regs->uss;
   58.38 +    else {
   58.39 +        load_seg(0, &oldctx.ss_base, &oldctx.ss_limit, &oldctx.ss_arbytes);
   58.40 +        oldctx.ss_sel = 0;
   58.41 +    }
   58.42  
   58.43  	if (load_seg(regs->vds, &oldctx.ds_base,
   58.44  				&oldctx.ds_limit, &oldctx.ds_arbytes))
   58.45  		oldctx.ds_sel = regs->vds;
   58.46 +    else {
   58.47 +        load_seg(0, &oldctx.ds_base, &oldctx.ds_limit, &oldctx.ds_arbytes);
   58.48 +        oldctx.ds_sel = 0;
   58.49 +    }
   58.50  
   58.51  	if (load_seg(regs->vfs, &oldctx.fs_base,
   58.52  				&oldctx.fs_limit, &oldctx.fs_arbytes))
   58.53  		oldctx.fs_sel = regs->vfs;
   58.54 +    else {
   58.55 +        load_seg(0, &oldctx.fs_base, &oldctx.fs_limit, &oldctx.fs_arbytes);
   58.56 +        oldctx.fs_sel = 0;
   58.57 +    }
   58.58  
   58.59  	if (load_seg(regs->vgs, &oldctx.gs_base,
   58.60  				&oldctx.gs_limit, &oldctx.gs_arbytes))
   58.61  		oldctx.gs_sel = regs->vgs;
   58.62 +    else {
   58.63 +        load_seg(0, &oldctx.gs_base, &oldctx.gs_limit, &oldctx.gs_arbytes);
   58.64 +        oldctx.gs_sel = 0;
   58.65 +    }
   58.66  
   58.67  	/* initialize jump environment to warp back to protected mode */
   58.68  	regs->cs = CODE_SELECTOR;
   58.69 @@ -752,6 +783,9 @@ opcode(struct regs *regs)
   58.70  					goto invalid;
   58.71  				}
   58.72  				break;
   58.73 +			case 0x09: /* wbinvd */
   58.74 +				asm volatile ( "wbinvd" );
   58.75 +				return OPC_EMULATED;
   58.76  			case 0x20: /* mov Rd, Cd (1h) */
   58.77  			case 0x22:
   58.78  				if (!movcr(regs, prefix, opc))
    59.1 --- a/tools/firmware/vmxassist/vmxloader.c	Thu Sep 29 16:22:02 2005 -0600
    59.2 +++ b/tools/firmware/vmxassist/vmxloader.c	Thu Sep 29 17:28:44 2005 -0600
    59.3 @@ -132,11 +132,12 @@ main(void)
    59.4  		 memcpy((void *)ACPI_PHYSICAL_ADDRESS, acpi, sizeof(acpi));
    59.5  	}
    59.6  #endif
    59.7 -			
    59.8 +
    59.9  	puts("Loading VMXAssist ...\n");
   59.10  	memcpy((void *)TEXTADDR, vmxassist, sizeof(vmxassist));
   59.11 +
   59.12  	puts("Go ...\n");
   59.13 -	((void (*)())TEXTADDR)();
   59.14 +	asm volatile ( "jmp *%%eax" : : "a" (TEXTADDR), "d" (0) );
   59.15 +
   59.16  	return 0;
   59.17  }
   59.18 -
    60.1 --- a/tools/ioemu/vl.c	Thu Sep 29 16:22:02 2005 -0600
    60.2 +++ b/tools/ioemu/vl.c	Thu Sep 29 17:28:44 2005 -0600
    60.3 @@ -2385,7 +2385,8 @@ int
    60.4  setup_mapping(int xc_handle, u32 dom, unsigned long toptab, unsigned long  *mem_page_array, unsigned long *page_table_array, unsigned long v_start, unsigned long v_end)
    60.5  {
    60.6      l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
    60.7 -    l2_pgentry_t *vl2tab[4], *vl2e=NULL, *vl2_table = NULL;
    60.8 +    l2_pgentry_t *vl2tab[4] = {NULL, NULL, NULL, NULL};
    60.9 +    l2_pgentry_t *vl2e=NULL, *vl2_table = NULL;
   60.10      unsigned long l1tab;
   60.11      unsigned long ppt_alloc = 0;
   60.12      unsigned long count;
    61.1 --- a/tools/python/xen/web/SrvBase.py	Thu Sep 29 16:22:02 2005 -0600
    61.2 +++ b/tools/python/xen/web/SrvBase.py	Thu Sep 29 17:28:44 2005 -0600
    61.3 @@ -81,7 +81,14 @@ class SrvBase(resource.Resource):
    61.4              req.write("Operation not implemented: " + op)
    61.5              return ''
    61.6          else:
    61.7 -            return op_method(op, req)
    61.8 +            try:
    61.9 +                return op_method(op, req)
   61.10 +            except Exception, exn:
   61.11 +                log.exception("Request %s failed.", op)
   61.12 +                if req.useSxp():
   61.13 +                    return ['xend.err', "Exception: " + str(exn)]
   61.14 +                else:
   61.15 +                    return "<p>%s</p>" % str(exn)
   61.16  
   61.17      def print_path(self, req):
   61.18          """Print the path with hyperlinks.
    62.1 --- a/tools/python/xen/web/http.py	Thu Sep 29 16:22:02 2005 -0600
    62.2 +++ b/tools/python/xen/web/http.py	Thu Sep 29 17:28:44 2005 -0600
    62.3 @@ -22,6 +22,7 @@
    62.4  from  mimetools import Message
    62.5  from cStringIO import StringIO
    62.6  import math
    62.7 +import socket
    62.8  import time
    62.9  import cgi
   62.10  
    63.1 --- a/tools/python/xen/xend/PrettyPrint.py	Thu Sep 29 16:22:02 2005 -0600
    63.2 +++ b/tools/python/xen/xend/PrettyPrint.py	Thu Sep 29 17:28:44 2005 -0600
    63.3 @@ -39,9 +39,9 @@ class PrettyItem:
    63.4          print '***PrettyItem>output>', self
    63.5          pass
    63.6  
    63.7 -    def prettyprint(self, _, width):
    63.8 +    def prettyprint(self, _):
    63.9          print '***PrettyItem>prettyprint>', self
   63.10 -        return width
   63.11 +        return self.width
   63.12  
   63.13  class PrettyString(PrettyItem):
   63.14  
   63.15 @@ -52,7 +52,7 @@ class PrettyString(PrettyItem):
   63.16      def output(self, out):
   63.17          out.write(self.value)
   63.18  
   63.19 -    def prettyprint(self, line, _):
   63.20 +    def prettyprint(self, line):
   63.21          line.output(self)
   63.22  
   63.23      def show(self, out):
   63.24 @@ -63,7 +63,7 @@ class PrettySpace(PrettyItem):
   63.25      def output(self, out):
   63.26          out.write(' ' * self.width)
   63.27  
   63.28 -    def prettyprint(self, line, _):
   63.29 +    def prettyprint(self, line):
   63.30          line.output(self)
   63.31  
   63.32      def show(self, out):
   63.33 @@ -80,7 +80,7 @@ class PrettyBreak(PrettyItem):
   63.34      def output(self, out):
   63.35          out.write(' ' * self.width)
   63.36  
   63.37 -    def prettyprint(self, line, _):
   63.38 +    def prettyprint(self, line):
   63.39          if line.breaks(self.space):
   63.40              self.active = 1
   63.41              line.newline(self.indent)
   63.42 @@ -97,7 +97,7 @@ class PrettyNewline(PrettySpace):
   63.43          block.newline()
   63.44          block.addtoline(self)
   63.45  
   63.46 -    def prettyprint(self, line, _):
   63.47 +    def prettyprint(self, line):
   63.48          line.newline(0)
   63.49          line.output(self)
   63.50  
   63.51 @@ -127,7 +127,7 @@ class PrettyLine(PrettyItem):
   63.52              lastbreak.space = (width - lastwidth)
   63.53          self.width = width
   63.54   
   63.55 -    def prettyprint(self, line, _):
   63.56 +    def prettyprint(self, line):
   63.57          for x in self.content:
   63.58              x.prettyprint(line)
   63.59  
   63.60 @@ -168,7 +168,7 @@ class PrettyBlock(PrettyItem):
   63.61      def addtoline(self, x):
   63.62          self.lines[-1].write(x)
   63.63  
   63.64 -    def prettyprint(self, line, _):
   63.65 +    def prettyprint(self, line):
   63.66          self.indent = line.used
   63.67          line.block = self
   63.68          if not line.fits(self.width):
   63.69 @@ -252,7 +252,7 @@ class PrettyPrinter:
   63.70          self.block = self.block.parent
   63.71  
   63.72      def prettyprint(self, out=sys.stdout):
   63.73 -        self.top.prettyprint(Line(out, self.width), self.width)
   63.74 +        self.top.prettyprint(Line(out, self.width))
   63.75  
   63.76  class SXPPrettyPrinter(PrettyPrinter):
   63.77      """An SXP prettyprinter.
    64.1 --- a/tools/python/xen/xend/XendCheckpoint.py	Thu Sep 29 16:22:02 2005 -0600
    64.2 +++ b/tools/python/xen/xend/XendCheckpoint.py	Thu Sep 29 17:28:44 2005 -0600
    64.3 @@ -10,8 +10,12 @@ import select
    64.4  import sxp
    64.5  from string import join
    64.6  from struct import pack, unpack, calcsize
    64.7 +
    64.8  from xen.util.xpopen import xPopen3
    64.9 +
   64.10  import xen.lowlevel.xc
   64.11 +
   64.12 +import XendDomainInfo
   64.13  from xen.xend.xenstore.xsutil import IntroduceDomain
   64.14  
   64.15  from XendError import XendError
   64.16 @@ -74,7 +78,7 @@ def save(xd, fd, dominfo, live):
   64.17                  if l.rstrip() == "suspend":
   64.18                      log.info("suspending %d" % dominfo.domid)
   64.19                      xd.domain_shutdown(dominfo.domid, reason='suspend')
   64.20 -                    dominfo.state_wait("suspended")
   64.21 +                    dominfo.state_wait(XendDomainInfo.STATE_VM_SUSPENDED)
   64.22                      log.info("suspend %d done" % dominfo.domid)
   64.23                      child.tochild.write("done\n")
   64.24                      child.tochild.flush()
    65.1 --- a/tools/python/xen/xend/XendClient.py	Thu Sep 29 16:22:02 2005 -0600
    65.2 +++ b/tools/python/xen/xend/XendClient.py	Thu Sep 29 17:28:44 2005 -0600
    65.3 @@ -302,12 +302,6 @@ class Xend:
    65.4                               {'op'      : 'devices',
    65.5                                'type'    : type })
    65.6  
    65.7 -    def xend_domain_device(self, id, type, idx):
    65.8 -        return self.xendPost(self.domainurl(id),
    65.9 -                             {'op'      : 'device',
   65.10 -                              'type'    : type,
   65.11 -                              'idx'     : idx })
   65.12 -    
   65.13      def xend_domain_device_create(self, id, config):
   65.14          return self.xendPost(self.domainurl(id),
   65.15                               {'op'      : 'device_create',
    66.1 --- a/tools/python/xen/xend/XendDomain.py	Thu Sep 29 16:22:02 2005 -0600
    66.2 +++ b/tools/python/xen/xend/XendDomain.py	Thu Sep 29 17:28:44 2005 -0600
    66.3 @@ -28,16 +28,11 @@ import xen.lowlevel.xc
    66.4  from xen.xend import sxp
    66.5  from xen.xend import XendRoot
    66.6  from xen.xend import XendCheckpoint
    66.7 -from xen.xend.XendDomainInfo import XendDomainInfo, shutdown_reason
    66.8 +from xen.xend.XendDomainInfo import XendDomainInfo
    66.9  from xen.xend import EventServer
   66.10  from xen.xend.XendError import XendError
   66.11  from xen.xend.XendLogging import log
   66.12 -from xen.xend import scheduler
   66.13  from xen.xend.server import relocate
   66.14 -from xen.xend.uuid import getUuid
   66.15 -from xen.xend.xenstore import XenNode, DBMap
   66.16 -from xen.xend.xenstore.xstransact import xstransact
   66.17 -from xen.xend.xenstore.xsutil import GetDomainPath
   66.18  
   66.19  
   66.20  xc = xen.lowlevel.xc.new()
   66.21 @@ -47,14 +42,7 @@ eserver = EventServer.instance()
   66.22  
   66.23  __all__ = [ "XendDomain" ]
   66.24  
   66.25 -SHUTDOWN_TIMEOUT = 30
   66.26 -PRIV_DOMAIN      =  0
   66.27 -
   66.28 -def is_dead(dom):
   66.29 -    return dom['crashed'] or dom['shutdown'] or (
   66.30 -        dom['dying'] and not(dom['running'] or dom['paused'] or
   66.31 -                             dom['blocked']))
   66.32 -
   66.33 +PRIV_DOMAIN = 0
   66.34  
   66.35  class XendDomainDict(dict):
   66.36      def get_by_name(self, name):
   66.37 @@ -77,11 +65,8 @@ class XendDomain:
   66.38          # So we stuff the XendDomain instance (self) into xroot's components.
   66.39          xroot.add_component("xen.xend.XendDomain", self)
   66.40          self.domains = XendDomainDict()
   66.41 -        self.domroot = "/domain"
   66.42 -        self.vmroot = "/domain"
   66.43 -        self.dbmap = DBMap(db=XenNode(self.vmroot))
   66.44          self.watchReleaseDomain()
   66.45 -        self.initial_refresh()
   66.46 +        self.refresh()
   66.47          self.dom0_setup()
   66.48  
   66.49      def list(self):
   66.50 @@ -110,9 +95,7 @@ class XendDomain:
   66.51          return map(lambda x: x.getName(), doms)
   66.52  
   66.53      def onReleaseDomain(self):
   66.54 -        self.reap()
   66.55          self.refresh()
   66.56 -        self.domain_restarts()
   66.57  
   66.58      def watchReleaseDomain(self):
   66.59          from xen.xend.xenstore.xswatch import xswatch
   66.60 @@ -141,43 +124,22 @@ class XendDomain:
   66.61              dominfo = dominfo[0]
   66.62          return dominfo
   66.63  
   66.64 -    def initial_refresh(self):
   66.65 -        """Refresh initial domain info from db.
   66.66 -        """
   66.67 -        doms = self.xen_domains()
   66.68 -        self.dbmap.readDB()             # XXX only needed for "xend"
   66.69 -        for dom in doms.values():
   66.70 -            domid = dom['dom']
   66.71 -            dompath = GetDomainPath(domid)
   66.72 -            if not dompath:
   66.73 -                continue
   66.74 -            vmpath = xstransact.Read(dompath, "vm")
   66.75 -            if not vmpath:
   66.76 -                continue
   66.77 -            uuid = xstransact.Read(vmpath, "uuid")
   66.78 -            if not uuid:
   66.79 -                continue
   66.80 -            log.info("recreating domain %d, uuid %s" % (domid, uuid))
   66.81 -            dompath = "/".join(dompath.split("/")[0:-1])
   66.82 -            try:
   66.83 -                dominfo = XendDomainInfo.recreate(uuid, dompath, domid, dom)
   66.84 -            except Exception, ex:
   66.85 -                log.exception("Error recreating domain info: id=%d", domid)
   66.86 -                continue
   66.87 -            self._add_domain(dominfo)
   66.88 -        self.reap()
   66.89 -        self.refresh()
   66.90 -        self.domain_restarts()
   66.91 +
   66.92 +    def recreate_domain(self, xeninfo):
   66.93 +        """Refresh initial domain info from db."""
   66.94 +
   66.95 +        dominfo = XendDomainInfo.recreate(xeninfo)
   66.96 +        self._add_domain(dominfo)
   66.97 +        return dominfo
   66.98 +
   66.99  
  66.100      def dom0_setup(self):
  66.101          dom0 = self.domain_lookup(PRIV_DOMAIN)
  66.102          if not dom0:
  66.103 -            dom0 = self.dom0_unknown()
  66.104 -        dom0.dom0_init_store()    
  66.105 +            dom0 = self.recreate_domain(self.xen_domain(PRIV_DOMAIN))
  66.106 +        dom0.dom0_init_store()
  66.107          dom0.dom0_enforce_vcpus()
  66.108  
  66.109 -    def close(self):
  66.110 -        pass
  66.111  
  66.112      def _add_domain(self, info, notify=True):
  66.113          """Add a domain entry to the tables.
  66.114 @@ -193,70 +155,45 @@ class XendDomain:
  66.115              eserver.inject('xend.domain.create', [info.getName(),
  66.116                                                    info.getDomid()])
  66.117  
  66.118 -    def _delete_domain(self, id, notify=True):
  66.119 +    def _delete_domain(self, domid, notify=True):
  66.120          """Remove a domain from the tables.
  66.121  
  66.122          @param id:     domain id
  66.123          @param notify: send a domain died event if true
  66.124          """
  66.125 -        info = self.domains.get(id)
  66.126 +        info = self.domains.get(domid)
  66.127          if info:
  66.128 -            del self.domains[id]
  66.129 +            del self.domains[domid]
  66.130              info.cleanup()
  66.131              info.delete()
  66.132              if notify:
  66.133                  eserver.inject('xend.domain.died', [info.getName(),
  66.134                                                      info.getDomid()])
  66.135 -        # XXX this should not be needed
  66.136 -        for domdb in self.dbmap.values():
  66.137 -            if not domdb.has_key("xend"):
  66.138 -                continue
  66.139 -            db = domdb.addChild("xend")
  66.140 -            try:
  66.141 -                domid = int(domdb["domid"].getData())
  66.142 -            except:
  66.143 -                domid = None
  66.144 -            if (domid is None) or (domid == id):
  66.145 -                domdb.delete()
  66.146  
  66.147 -    def reap(self):
  66.148 -        """Look for domains that have crashed or stopped.
  66.149 -        Tidy them up.
  66.150 -        """
  66.151 -        doms = self.xen_domains()
  66.152 -        for d in doms.values():
  66.153 -            if not is_dead(d):
  66.154 -                continue
  66.155 -            domid = d['dom']
  66.156 -            dominfo = self.domains.get(domid)
  66.157 -            if not dominfo or dominfo.is_terminated():
  66.158 -                continue
  66.159 -            log.debug('domain died name=%s domid=%d', dominfo.getName(), domid)
  66.160 -            if d['crashed'] and xroot.get_enable_dump():
  66.161 -                self.domain_dumpcore(domid)
  66.162 -            if d['shutdown']:
  66.163 -                reason = shutdown_reason(d['shutdown_reason'])
  66.164 -                log.debug('shutdown name=%s id=%d reason=%s',
  66.165 -                          dominfo.getName(), domid, reason)
  66.166 -                if reason == 'suspend':
  66.167 -                    dominfo.state_set("suspended")
  66.168 -                    continue
  66.169 -                if reason in ['poweroff', 'reboot']:
  66.170 -                    self.domain_restart_schedule(domid, reason)
  66.171 -            dominfo.destroy()
  66.172  
  66.173      def refresh(self):
  66.174          """Refresh domain list from Xen.
  66.175          """
  66.176          doms = self.xen_domains()
  66.177 -        # Remove entries for domains that no longer exist.
  66.178 -        # Update entries for existing domains.
  66.179          for d in self.domains.values():
  66.180              info = doms.get(d.getDomid())
  66.181              if info:
  66.182                  d.update(info)
  66.183 -            elif not d.restart_pending():
  66.184 +            else:
  66.185                  self._delete_domain(d.getDomid())
  66.186 +        for d in doms:
  66.187 +            if d not in self.domains:
  66.188 +                try:
  66.189 +                    self.recreate_domain(doms[d])
  66.190 +                except:
  66.191 +                    log.exception(
  66.192 +                        "Failed to recreate information for domain %d.  "
  66.193 +                        "Destroying it in the hope of recovery.", d)
  66.194 +                    try:
  66.195 +                        xc.domain_destroy(dom = d)
  66.196 +                    except:
  66.197 +                        log.exception('Destruction of %d failed.', d)
  66.198 +
  66.199  
  66.200      def update_domain(self, id):
  66.201          """Update information for a single domain.
  66.202 @@ -277,34 +214,10 @@ class XendDomain:
  66.203          @param config: configuration
  66.204          @return: domain
  66.205          """
  66.206 -        dominfo = XendDomainInfo.create(self.dbmap.getPath(), config)
  66.207 +        dominfo = XendDomainInfo.create(config)
  66.208          self._add_domain(dominfo)
  66.209          return dominfo
  66.210  
  66.211 -    def domain_restart(self, dominfo):
  66.212 -        """Restart a domain.
  66.213 -
  66.214 -        @param dominfo: domain object
  66.215 -        """
  66.216 -        log.info("Restarting domain: name=%s id=%s", dominfo.getName(),
  66.217 -                 dominfo.getDomid())
  66.218 -        eserver.inject("xend.domain.restart",
  66.219 -                       [dominfo.getName(), dominfo.getDomid(), "begin"])
  66.220 -        try:
  66.221 -            dominfo.restart()
  66.222 -            log.info('Restarted domain name=%s id=%s', dominfo.getName(),
  66.223 -                     dominfo.getDomid())
  66.224 -            eserver.inject("xend.domain.restart",
  66.225 -                           [dominfo.getName(), dominfo.getDomid(),
  66.226 -                            "success"])
  66.227 -            self.domain_unpause(dominfo.getDomid())
  66.228 -        except Exception, ex:
  66.229 -            log.exception("Exception restarting domain: name=%s id=%s",
  66.230 -                          dominfo.getName(), dominfo.getDomid())
  66.231 -            eserver.inject("xend.domain.restart",
  66.232 -                           [dominfo.getName(), dominfo.getDomid(), "fail"])
  66.233 -        return dominfo
  66.234 -
  66.235      def domain_configure(self, config):
  66.236          """Configure an existing domain. This is intended for internal
  66.237          use by domain restore and migrate.
  66.238 @@ -318,13 +231,12 @@ class XendDomain:
  66.239          nested = sxp.child_value(config, 'config')
  66.240          if nested:
  66.241              config = nested
  66.242 -        return XendDomainInfo.restore(self.dbmap.getPath(), config)
  66.243 +        return XendDomainInfo.restore(config)
  66.244  
  66.245 -    def domain_restore(self, src, progress=False):
  66.246 +    def domain_restore(self, src):
  66.247          """Restore a domain from file.
  66.248  
  66.249          @param src:      source file
  66.250 -        @param progress: output progress if true
  66.251          """
  66.252  
  66.253          try:
  66.254 @@ -345,33 +257,7 @@ class XendDomain:
  66.255          self.update_domain(id)
  66.256          return self.domains.get(id)
  66.257  
  66.258 -    def dom0_unknown(self):
  66.259 -        dom0 = PRIV_DOMAIN
  66.260 -        uuid = None
  66.261 -        info = self.xen_domain(dom0)
  66.262 -        dompath = GetDomainPath(dom0)
  66.263 -        if dompath:
  66.264 -            vmpath = xstransact.Read(dompath, "vm")
  66.265 -            if vmpath:
  66.266 -                uuid = xstransact.Read(vmpath, "uuid")
  66.267 -            if not uuid:
  66.268 -                uuid = dompath.split("/")[-1]
  66.269 -            dompath = "/".join(dompath.split("/")[0:-1])
  66.270 -        if not uuid:
  66.271 -            uuid = getUuid()
  66.272 -            dompath = self.domroot
  66.273 -        log.info("Creating entry for unknown xend domain: id=%d uuid=%s",
  66.274 -                 dom0, uuid)
  66.275 -        try:
  66.276 -            dominfo = XendDomainInfo.recreate(uuid, dompath, dom0, info)
  66.277 -            self._add_domain(dominfo)
  66.278 -            return dominfo
  66.279 -        except Exception, exn:
  66.280 -            log.exception(exn)
  66.281 -            raise XendError("Error recreating xend domain info: id=%d: %s" %
  66.282 -                            (dom0, str(exn)))
  66.283  
  66.284 -        
  66.285      def domain_lookup(self, id):
  66.286          return self.domains.get(id)
  66.287  
  66.288 @@ -410,8 +296,9 @@ class XendDomain:
  66.289              return xc.domain_pause(dom=dominfo.getDomid())
  66.290          except Exception, ex:
  66.291              raise XendError(str(ex))
  66.292 -    
  66.293 -    def domain_shutdown(self, id, reason='poweroff'):
  66.294 +
  66.295 +
  66.296 +    def domain_shutdown(self, domid, reason='poweroff'):
  66.297          """Shutdown domain (nicely).
  66.298           - poweroff: restart according to exit code and restart mode
  66.299           - reboot:   restart on exit
  66.300 @@ -422,89 +309,13 @@ class XendDomain:
  66.301          @param id:     domain id
  66.302          @param reason: shutdown type: poweroff, reboot, suspend, halt
  66.303          """
  66.304 -        dominfo = self.domain_lookup(id)
  66.305 -        self.domain_restart_schedule(dominfo.getDomid(), reason, force=True)
  66.306 -        eserver.inject('xend.domain.shutdown', [dominfo.getName(),
  66.307 -                                                dominfo.getDomid(), reason])
  66.308 -        if reason == 'halt':
  66.309 -            reason = 'poweroff'
  66.310 -        val = dominfo.shutdown(reason)
  66.311 -        if not reason in ['suspend']:
  66.312 -            self.domain_shutdowns()
  66.313 -        return val
  66.314 -
  66.315 -
  66.316 -    def domain_sysrq(self, id, key):
  66.317 -        """Send a SysRq to the specified domain."""
  66.318 -        return self.callInfo(id, XendDomainInfo.send_sysrq, key)
  66.319 +        self.callInfo(domid, XendDomainInfo.shutdown, reason)
  66.320  
  66.321  
  66.322 -    def domain_shutdowns(self):
  66.323 -        """Process pending domain shutdowns.
  66.324 -        Destroys domains whose shutdowns have timed out.
  66.325 -        """
  66.326 -        timeout = SHUTDOWN_TIMEOUT + 1
  66.327 -        for dominfo in self.domains.values():
  66.328 -            if not dominfo.shutdown_pending:
  66.329 -                # domain doesn't need shutdown
  66.330 -                continue
  66.331 -            id = dominfo.getDomid()
  66.332 -            left = dominfo.shutdown_time_left(SHUTDOWN_TIMEOUT)
  66.333 -            if left <= 0:
  66.334 -                # Shutdown expired - destroy domain.
  66.335 -                try:
  66.336 -                    log.info("Domain shutdown timeout expired: name=%s id=%s",
  66.337 -                             dominfo.getName(), id)
  66.338 -                    self.domain_destroy(id, reason=
  66.339 -                                        dominfo.shutdown_pending['reason'])
  66.340 -                except Exception:
  66.341 -                    pass
  66.342 -            else:
  66.343 -                # Shutdown still pending.
  66.344 -                timeout = min(timeout, left)
  66.345 -        if timeout <= SHUTDOWN_TIMEOUT:
  66.346 -            # Pending shutdowns remain - reschedule.
  66.347 -            scheduler.later(timeout, self.domain_shutdowns)
  66.348 -
  66.349 -    def domain_restart_schedule(self, id, reason, force=False):
  66.350 -        """Schedule a restart for a domain if it needs one.
  66.351 +    def domain_sysrq(self, domid, key):
  66.352 +        """Send a SysRq to the specified domain."""
  66.353 +        return self.callInfo(domid, XendDomainInfo.send_sysrq, key)
  66.354  
  66.355 -        @param id:     domain id
  66.356 -        @param reason: shutdown reason
  66.357 -        """
  66.358 -        log.debug('domain_restart_schedule> %d %s %d', id, reason, force)
  66.359 -        dominfo = self.domain_lookup(id)
  66.360 -        if not dominfo:
  66.361 -            return
  66.362 -        restart = (force and reason == 'reboot') or dominfo.restart_needed(reason)
  66.363 -        if restart:
  66.364 -            log.info('Scheduling restart for domain: name=%s id=%s',
  66.365 -                     dominfo.getName(), dominfo.getDomid())
  66.366 -            eserver.inject("xend.domain.restart",
  66.367 -                           [dominfo.getName(), dominfo.getDomid(),
  66.368 -                            "schedule"])
  66.369 -            dominfo.restarting()
  66.370 -        else:
  66.371 -            log.info('Cancelling restart for domain: name=%s id=%s',
  66.372 -                     dominfo.getName(), dominfo.getDomid())
  66.373 -            eserver.inject("xend.domain.restart",
  66.374 -                           [dominfo.getName(), dominfo.getDomid(), "cancel"])
  66.375 -            dominfo.restart_cancel()
  66.376 -
  66.377 -    def domain_restarts(self):
  66.378 -        """Execute any scheduled domain restarts for domains that have gone.
  66.379 -        """
  66.380 -        doms = self.xen_domains()
  66.381 -        for dominfo in self.domains.values():
  66.382 -            if not dominfo.restart_pending():
  66.383 -                continue
  66.384 -            info = doms.get(dominfo.getDomid())
  66.385 -            if info:
  66.386 -                # Don't execute restart for domains still running.
  66.387 -                continue
  66.388 -            # Remove it from the restarts.
  66.389 -            log.info('restarting: %s' % dominfo.getName())
  66.390 -            self.domain_restart(dominfo)
  66.391  
  66.392      def domain_destroy(self, domid, reason='halt'):
  66.393          """Terminate domain immediately.
  66.394 @@ -517,7 +328,6 @@ class XendDomain:
  66.395          if domid == PRIV_DOMAIN:
  66.396              raise XendError("Cannot destroy privileged domain %i" % domid)
  66.397          
  66.398 -        self.domain_restart_schedule(domid, reason, force=True)
  66.399          dominfo = self.domain_lookup(domid)
  66.400          if dominfo:
  66.401              val = dominfo.destroy()
  66.402 @@ -554,12 +364,11 @@ class XendDomain:
  66.403          
  66.404          return None
  66.405  
  66.406 -    def domain_save(self, id, dst, progress=False):
  66.407 +    def domain_save(self, id, dst):
  66.408          """Start saving a domain to file.
  66.409  
  66.410          @param id:       domain id
  66.411          @param dst:      destination file
  66.412 -        @param progress: output progress if true
  66.413          """
  66.414  
  66.415          try:
  66.416 @@ -661,14 +470,6 @@ class XendDomain:
  66.417          return self.callInfo(domid, XendDomainInfo.getDeviceSxprs, devtype)
  66.418  
  66.419  
  66.420 -    def domain_devtype_get(self, domid, devtype, devid):
  66.421 -        """Get a device from a domain.
  66.422 -        
  66.423 -        @return: device object (or None)
  66.424 -        """
  66.425 -        return self.callInfo(domid, XendDomainInfo.getDevice, devtype, devid)
  66.426 -
  66.427 -
  66.428      def domain_vif_limit_set(self, id, vif, credit, period):
  66.429          """Limit the vif's transmission rate
  66.430          """
  66.431 @@ -730,10 +531,16 @@ class XendDomain:
  66.432      ## private:
  66.433  
  66.434      def callInfo(self, domid, fn, *args, **kwargs):
  66.435 -        self.refresh()
  66.436 -        dominfo = self.domains.get(domid)
  66.437 -        if dominfo:
  66.438 -            return fn(dominfo, *args, **kwargs)
  66.439 +        try:
  66.440 +            self.refresh()
  66.441 +            dominfo = self.domains.get(domid)
  66.442 +            if dominfo:
  66.443 +                return fn(dominfo, *args, **kwargs)
  66.444 +        except XendError:
  66.445 +            raise
  66.446 +        except Exception, exn:
  66.447 +            log.exception("")
  66.448 +            raise XendError(str(exn))
  66.449  
  66.450  
  66.451  def instance():
    67.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Thu Sep 29 16:22:02 2005 -0600
    67.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Thu Sep 29 17:28:44 2005 -0600
    67.3 @@ -35,7 +35,9 @@ from xen.util.blkif import blkdev_uname_
    67.4  from xen.xend.server.channel import EventChannel
    67.5  
    67.6  from xen.xend import image
    67.7 +from xen.xend import scheduler
    67.8  from xen.xend import sxp
    67.9 +from xen.xend import XendRoot
   67.10  from xen.xend.XendBootloader import bootloader
   67.11  from xen.xend.XendLogging import log
   67.12  from xen.xend.XendError import XendError, VmError
   67.13 @@ -43,7 +45,7 @@ from xen.xend.XendRoot import get_compon
   67.14  
   67.15  from xen.xend.uuid import getUuid
   67.16  from xen.xend.xenstore.xstransact import xstransact
   67.17 -from xen.xend.xenstore.xsutil import IntroduceDomain
   67.18 +from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain
   67.19  
   67.20  """Shutdown code for poweroff."""
   67.21  DOMAIN_POWEROFF = 0
   67.22 @@ -75,9 +77,6 @@ restart_modes = [
   67.23      RESTART_NEVER,
   67.24      ]
   67.25  
   67.26 -STATE_RESTART_PENDING = 'pending'
   67.27 -STATE_RESTART_BOOTING = 'booting'
   67.28 -
   67.29  STATE_VM_OK         = "ok"
   67.30  STATE_VM_TERMINATED = "terminated"
   67.31  STATE_VM_SUSPENDED  = "suspended"
   67.32 @@ -92,7 +91,29 @@ SIF_NET_BE_DOMAIN = (1<<5)
   67.33  SIF_TPM_BE_DOMAIN = (1<<7)
   67.34  
   67.35  
   67.36 +SHUTDOWN_TIMEOUT = 30
   67.37 +
   67.38 +
   67.39 +DOMROOT = '/domain'
   67.40 +VMROOT  = '/domain'
   67.41 +
   67.42 +
   67.43  xc = xen.lowlevel.xc.new()
   67.44 +xroot = XendRoot.instance()
   67.45 +
   67.46 +
   67.47 +## Configuration entries that we expect to round-trip -- be read from the
   67.48 +# config file or xc, written to save-files (i.e. through sxpr), and reused as
   67.49 +# config on restart or restore, all without munging.  Some configuration
   67.50 +# entries are munged for backwards compatibility reasons, or because they
   67.51 +# don't come out of xc in the same form as they are specified in the config
   67.52 +# file, so those are handled separately.
   67.53 +ROUNDTRIPPING_CONFIG_ENTRIES = [
   67.54 +        ('name',         str),
   67.55 +        ('ssidref',      int),
   67.56 +        ('cpu_weight',   float),
   67.57 +        ('bootloader',   str)
   67.58 +    ]
   67.59  
   67.60  
   67.61  def domain_exists(name):
   67.62 @@ -133,47 +154,64 @@ class XendDomainInfo:
   67.63      MINIMUM_RESTART_TIME = 20
   67.64  
   67.65  
   67.66 -    def create(cls, dompath, config):
   67.67 +    def create(cls, config):
   67.68          """Create a VM from a configuration.
   67.69  
   67.70 -        @param dompath:   The path to all domain information
   67.71          @param config    configuration
   67.72          @raise: VmError for invalid configuration
   67.73          """
   67.74  
   67.75 -        log.debug("XendDomainInfo.create(%s, ...)", dompath)
   67.76 +        log.debug("XendDomainInfo.create(...)")
   67.77          
   67.78 -        vm = cls(getUuid(), dompath, cls.parseConfig(config))
   67.79 +        vm = cls(getUuid(), cls.parseConfig(config))
   67.80          vm.construct()
   67.81 +        vm.refreshShutdown()
   67.82          return vm
   67.83  
   67.84      create = classmethod(create)
   67.85  
   67.86  
   67.87 -    def recreate(cls, uuid, dompath, domid, info):
   67.88 -        """Create the VM object for an existing domain.
   67.89 +    def recreate(cls, xeninfo):
   67.90 +        """Create the VM object for an existing domain."""
   67.91  
   67.92 -        @param dompath:   The path to all domain information
   67.93 -        @param info:      domain info from xc
   67.94 -        """
   67.95 +        log.debug("XendDomainInfo.recreate(%s)", xeninfo)
   67.96  
   67.97 -        log.debug("XendDomainInfo.recreate(%s, %s, %s, %s)", uuid, dompath,
   67.98 -                  domid, info)
   67.99 +        domid = xeninfo['dom']
  67.100 +        try:
  67.101 +            dompath = GetDomainPath(domid)
  67.102 +            if not dompath:
  67.103 +                raise XendError(
  67.104 +                    'No domain path in store for existing domain %d' % domid)
  67.105 +            vmpath = xstransact.Read(dompath, "vm")
  67.106 +            if not vmpath:
  67.107 +                raise XendError(
  67.108 +                    'No vm path in store for existing domain %d' % domid)
  67.109 +            uuid = xstransact.Read(vmpath, "uuid")
  67.110 +            if not uuid:
  67.111 +                raise XendError(
  67.112 +                    'No vm/uuid path in store for existing domain %d' % domid)
  67.113  
  67.114 -        return cls(uuid, dompath, info, domid, True)
  67.115 +        except Exception, exn:
  67.116 +            log.warn(str(exn))
  67.117 +            uuid = getUuid()
  67.118 +
  67.119 +        log.info("Recreating domain %d, uuid %s", domid, uuid)
  67.120 +
  67.121 +        vm = cls(uuid, xeninfo, domid, True)
  67.122 +        vm.refreshShutdown(xeninfo)
  67.123 +        return vm
  67.124  
  67.125      recreate = classmethod(recreate)
  67.126  
  67.127  
  67.128 -    def restore(cls, dompath, config, uuid = None):
  67.129 +    def restore(cls, config, uuid = None):
  67.130          """Create a domain and a VM object to do a restore.
  67.131  
  67.132 -        @param dompath:   The path to all domain information
  67.133          @param config:    domain configuration
  67.134          @param uuid:      uuid to use
  67.135          """
  67.136          
  67.137 -        log.debug("XendDomainInfo.restore(%s, %s, %s)", dompath, config, uuid)
  67.138 +        log.debug("XendDomainInfo.restore(%s, %s)", config, uuid)
  67.139  
  67.140          if not uuid:
  67.141              uuid = getUuid()
  67.142 @@ -183,14 +221,12 @@ class XendDomainInfo:
  67.143          except TypeError, exn:
  67.144              raise VmError('Invalid ssidref in config: %s' % exn)
  67.145  
  67.146 -        log.debug('restoring with ssidref = %d' % ssidref)
  67.147 -
  67.148 -        vm = cls(uuid, dompath, cls.parseConfig(config),
  67.149 +        vm = cls(uuid, cls.parseConfig(config),
  67.150                   xc.domain_create(ssidref = ssidref))
  67.151 -        vm.clear_shutdown()
  67.152          vm.create_channel()
  67.153          vm.configure()
  67.154          vm.exportToDB()
  67.155 +        vm.refreshShutdown()
  67.156          return vm
  67.157  
  67.158      restore = classmethod(restore)
  67.159 @@ -214,33 +250,28 @@ class XendDomainInfo:
  67.160          log.debug("parseConfig: config is %s" % str(config))
  67.161  
  67.162          result = {}
  67.163 -        imagecfg = "()"
  67.164  
  67.165 -        result['name']         = get_cfg('name')
  67.166 -        result['ssidref']      = get_cfg('ssidref',    int)
  67.167 +        for e in ROUNDTRIPPING_CONFIG_ENTRIES:
  67.168 +            result[e[0]] = get_cfg(e[0], e[1])
  67.169 +
  67.170          result['memory']       = get_cfg('memory',     int)
  67.171          result['mem_kb']       = get_cfg('mem_kb',     int)
  67.172          result['maxmem']       = get_cfg('maxmem',     int)
  67.173          result['maxmem_kb']    = get_cfg('maxmem_kb',  int)
  67.174          result['cpu']          = get_cfg('cpu',        int)
  67.175 -        result['cpu_weight']   = get_cfg('cpu_weight', float)
  67.176 -        result['bootloader']   = get_cfg('bootloader')
  67.177          result['restart_mode'] = get_cfg('restart')
  67.178 +        result['image']        = get_cfg('image')
  67.179  
  67.180          try:
  67.181 -            imagecfg = get_cfg('image')
  67.182 -
  67.183 -            if imagecfg:
  67.184 -                result['image'] = imagecfg
  67.185 -                result['vcpus'] = int(sxp.child_value(imagecfg, 'vcpus',
  67.186 -                                                      1))
  67.187 +            if result['image']:
  67.188 +                result['vcpus'] = int(sxp.child_value(result['image'],
  67.189 +                                                      'vcpus', 1))
  67.190              else:
  67.191                  result['vcpus'] = 1
  67.192          except TypeError, exn:
  67.193              raise VmError(
  67.194                  'Invalid configuration setting: vcpus = %s: %s' %
  67.195 -                (sxp.child_value(imagecfg, 'vcpus', 1),
  67.196 -                 str(exn)))
  67.197 +                (sxp.child_value(result['image'], 'vcpus', 1), str(exn)))
  67.198  
  67.199          result['backend'] = []
  67.200          for c in sxp.children(config, 'backend'):
  67.201 @@ -258,12 +289,12 @@ class XendDomainInfo:
  67.202      parseConfig = classmethod(parseConfig)
  67.203  
  67.204      
  67.205 -    def __init__(self, uuid, parentpath, info, domid = None, augment = False):
  67.206 +    def __init__(self, uuid, info, domid = None, augment = False):
  67.207  
  67.208          self.uuid = uuid
  67.209          self.info = info
  67.210  
  67.211 -        self.path = parentpath + "/" + uuid
  67.212 +        self.path = DOMROOT + "/" + uuid
  67.213  
  67.214          if domid:
  67.215              self.domid = domid
  67.216 @@ -283,26 +314,26 @@ class XendDomainInfo:
  67.217          self.store_mfn = None
  67.218          self.console_channel = None
  67.219          self.console_mfn = None
  67.220 -        
  67.221 -        #todo: state: running, suspended
  67.222 +
  67.223          self.state = STATE_VM_OK
  67.224          self.state_updated = threading.Condition()
  67.225 -        self.shutdown_pending = None
  67.226  
  67.227 -        self.restart_state = None
  67.228 -        self.restart_time = None
  67.229 -        self.restart_count = 0
  67.230 -        
  67.231          self.writeVm("uuid", self.uuid)
  67.232          self.storeDom("vm", self.path)
  67.233  
  67.234  
  67.235      def augmentInfo(self):
  67.236 +        """Augment self.info, as given to us through {@link #recreate}, with
  67.237 +        values taken from the store.  This recovers those values known to xend
  67.238 +        but not to the hypervisor.
  67.239 +        """
  67.240          def useIfNeeded(name, val):
  67.241              if not self.infoIsSet(name) and val is not None:
  67.242                  self.info[name] = val
  67.243  
  67.244          params = (("name", str),
  67.245 +                  ("restart-mode", str),
  67.246 +                  ("image",        str),
  67.247                    ("start-time", float))
  67.248  
  67.249          from_store = self.gatherVm(*params)
  67.250 @@ -322,13 +353,18 @@ class XendDomainInfo:
  67.251              defaultInfo('name',         lambda: "Domain-%d" % self.domid)
  67.252              defaultInfo('ssidref',      lambda: 0)
  67.253              defaultInfo('restart_mode', lambda: RESTART_ONREBOOT)
  67.254 +            defaultInfo('cpu',          lambda: None)
  67.255              defaultInfo('cpu_weight',   lambda: 1.0)
  67.256              defaultInfo('bootloader',   lambda: None)
  67.257              defaultInfo('backend',      lambda: [])
  67.258              defaultInfo('device',       lambda: [])
  67.259 +            defaultInfo('image',        lambda: None)
  67.260  
  67.261              self.check_name(self.info['name'])
  67.262  
  67.263 +            if isinstance(self.info['image'], str):
  67.264 +                self.info['image'] = sxp.from_string(self.info['image'])
  67.265 +
  67.266              # Internally, we keep only maxmem_KiB, and not maxmem or maxmem_kb
  67.267              # (which come from outside, and are in MiB and KiB respectively).
  67.268              # This means that any maxmem or maxmem_kb settings here have come
  67.269 @@ -451,17 +487,16 @@ class XendDomainInfo:
  67.270              'domid':              str(self.domid),
  67.271              'uuid':               self.uuid,
  67.272  
  67.273 -            'restart_time':       str(self.restart_time),
  67.274 -
  67.275 -            'xend/state':         self.state,
  67.276 -            'xend/restart_count': str(self.restart_count),
  67.277              'xend/restart_mode':  str(self.info['restart_mode']),
  67.278  
  67.279              'memory/target':      str(self.info['memory_KiB'])
  67.280              }
  67.281  
  67.282          for (k, v) in self.info.items():
  67.283 -            to_store[k] = str(v)
  67.284 +            if v:
  67.285 +                to_store[k] = str(v)
  67.286 +
  67.287 +        to_store['image'] = sxp.to_string(self.info['image'])
  67.288  
  67.289          log.debug("Storing %s" % str(to_store))
  67.290  
  67.291 @@ -513,6 +548,88 @@ class XendDomainInfo:
  67.292                        self.info['backend'], 0)
  67.293  
  67.294  
  67.295 +    def refreshShutdown(self, xeninfo = None):
  67.296 +        if xeninfo is None:
  67.297 +            xeninfo = dom_get(self.domid)
  67.298 +            if xeninfo is None:
  67.299 +                # The domain no longer exists.  This will occur if we have
  67.300 +                # scheduled a timer to check for shutdown timeouts and the
  67.301 +                # shutdown succeeded.
  67.302 +                return
  67.303 +
  67.304 +        if xeninfo['dying']:
  67.305 +            # Dying means that a domain has been destroyed, but has not yet
  67.306 +            # been cleaned up by Xen.  This could persist indefinitely if,
  67.307 +            # for example, another domain has some of its pages mapped.
  67.308 +            # We might like to diagnose this problem in the future, but for
  67.309 +            # now all we can sensibly do is ignore it.
  67.310 +            pass
  67.311 +
  67.312 +        elif xeninfo['crashed']:
  67.313 +            log.warn('Domain has crashed: name=%s id=%d.',
  67.314 +                     self.info['name'], self.domid)
  67.315 +
  67.316 +            if xroot.get_enable_dump():
  67.317 +                self.dumpCore()
  67.318 +
  67.319 +            self.maybeRestart('crashed')
  67.320 +
  67.321 +        elif xeninfo['shutdown']:
  67.322 +            reason = shutdown_reason(xeninfo['shutdown_reason'])
  67.323 +
  67.324 +            log.info('Domain has shutdown: name=%s id=%d reason=%s.',
  67.325 +                     self.info['name'], self.domid, reason)
  67.326 +
  67.327 +            self.clearRestart()
  67.328 +
  67.329 +            if reason == 'suspend':
  67.330 +                self.state_set(STATE_VM_SUSPENDED)
  67.331 +                # Don't destroy the domain.  XendCheckpoint will do this once
  67.332 +                # it has finished.
  67.333 +            elif reason in ['poweroff', 'reboot']:
  67.334 +                self.maybeRestart(reason)
  67.335 +            else:
  67.336 +                self.destroy()
  67.337 +
  67.338 +        else:
  67.339 +            # Domain is alive.  If we are shutting it down, then check
  67.340 +            # the timeout on that, and destroy it if necessary.
  67.341 +
  67.342 +            sst = self.readVm('xend/shutdown_start_time')
  67.343 +            if sst:
  67.344 +                sst = float(sst)
  67.345 +                timeout = SHUTDOWN_TIMEOUT - time.time() + sst
  67.346 +                if timeout < 0:
  67.347 +                    log.info(
  67.348 +                        "Domain shutdown timeout expired: name=%s id=%s",
  67.349 +                        self.info['name'], self.domid)
  67.350 +                    self.destroy()
  67.351 +                else:
  67.352 +                    log.debug(
  67.353 +                        "Scheduling refreshShutdown on domain %d in %ds.",
  67.354 +                        self.domid, timeout)
  67.355 +                    scheduler.later(timeout, self.refreshShutdown)
  67.356 +
  67.357 +
  67.358 +    def shutdown(self, reason):
  67.359 +        if not reason in shutdown_reasons.values():
  67.360 +            raise XendError('invalid reason:' + reason)
  67.361 +        self.storeVm("control/shutdown", reason)
  67.362 +        if not reason == 'suspend':
  67.363 +            self.storeVm('xend/shutdown_start_time', time.time())
  67.364 +
  67.365 +
  67.366 +    def clearRestart(self):
  67.367 +        self.removeVm("xend/shutdown_start_time")
  67.368 +
  67.369 +
  67.370 +    def maybeRestart(self, reason):
  67.371 +        if self.restart_needed(reason):
  67.372 +            self.restart()
  67.373 +        else:
  67.374 +            self.destroy()
  67.375 +
  67.376 +
  67.377      def dumpCore(self):
  67.378          """Create a core dump for this domain.  Nothrow guarantee."""
  67.379          
  67.380 @@ -526,18 +643,32 @@ class XendDomainInfo:
  67.381                        self.domid, self.info['name'], str(exn))
  67.382  
  67.383  
  67.384 -    def closeStoreChannel(self):
  67.385 -        """Close the store channel, if any.  Nothrow guarantee."""
  67.386 +    def closeChannel(self, channel, entry):
  67.387 +        """Close the given channel, if set, and remove the given entry in the
  67.388 +        store.  Nothrow guarantee."""
  67.389          
  67.390          try:
  67.391 -            if self.store_channel:
  67.392 -                try:
  67.393 -                    self.store_channel.close()
  67.394 -                    self.removeDom("store/port")
  67.395 -                finally:
  67.396 -                    self.store_channel = None
  67.397 +            try:
  67.398 +                if channel:
  67.399 +                    channel.close()
  67.400 +            finally:
  67.401 +                self.removeDom(entry)
  67.402          except Exception, exn:
  67.403              log.exception(exn)
  67.404 +        
  67.405 +
  67.406 +    def closeStoreChannel(self):
  67.407 +        """Close the store channel, if any.  Nothrow guarantee."""
  67.408 +
  67.409 +        self.closeChannel(self.store_channel, "store/port")
  67.410 +        self.store_channel = None
  67.411 +
  67.412 +
  67.413 +    def closeConsoleChannel(self):
  67.414 +        """Close the console channel, if any.  Nothrow guarantee."""
  67.415 +
  67.416 +        self.closeChannel(self.console_channel, "console/port")
  67.417 +        self.console_channel = None
  67.418  
  67.419  
  67.420      def setConsoleRef(self, ref):
  67.421 @@ -566,18 +697,23 @@ class XendDomainInfo:
  67.422              
  67.423          self.info.update(info)
  67.424          self.validateInfo()
  67.425 +        self.refreshShutdown(info)
  67.426  
  67.427          log.debug("XendDomainInfo.update done on domain %d: %s", self.domid,
  67.428                    self.info)
  67.429  
  67.430  
  67.431 +    ## private:
  67.432 +
  67.433      def state_set(self, state):
  67.434          self.state_updated.acquire()
  67.435          if self.state != state:
  67.436              self.state = state
  67.437              self.state_updated.notifyAll()
  67.438          self.state_updated.release()
  67.439 -        self.exportToDB()
  67.440 +
  67.441 +
  67.442 +    ## public:
  67.443  
  67.444      def state_wait(self, state):
  67.445          self.state_updated.acquire()
  67.446 @@ -585,6 +721,7 @@ class XendDomainInfo:
  67.447              self.state_updated.wait()
  67.448          self.state_updated.release()
  67.449  
  67.450 +
  67.451      def __str__(self):
  67.452          s = "<domain"
  67.453          s += " id=" + str(self.domid)
  67.454 @@ -597,13 +734,6 @@ class XendDomainInfo:
  67.455      __repr__ = __str__
  67.456  
  67.457  
  67.458 -    def getDeviceController(self, name):
  67.459 -        if name not in controllerClasses:
  67.460 -            raise XendError("unknown device type: " + str(name))
  67.461 -
  67.462 -        return controllerClasses[name](self)
  67.463 -
  67.464 -
  67.465      def createDevice(self, deviceClass, devconfig):
  67.466          return self.getDeviceController(deviceClass).createDevice(devconfig)
  67.467  
  67.468 @@ -617,49 +747,66 @@ class XendDomainInfo:
  67.469          return self.getDeviceController(deviceClass).destroyDevice(devid)
  67.470  
  67.471  
  67.472 +    def getDeviceSxprs(self, deviceClass):
  67.473 +        return self.getDeviceController(deviceClass).sxprs()
  67.474 +
  67.475 +
  67.476 +    ## private:
  67.477 +
  67.478 +    def getDeviceController(self, name):
  67.479 +        if name not in controllerClasses:
  67.480 +            raise XendError("unknown device type: " + str(name))
  67.481 +
  67.482 +        return controllerClasses[name](self)
  67.483 +
  67.484 +
  67.485 +    ## public:
  67.486 +
  67.487      def sxpr(self):
  67.488          sxpr = ['domain',
  67.489 -                ['domid', self.domid],
  67.490 -                ['name', self.info['name']],
  67.491 -                ['memory', self.info['memory_KiB'] / 1024],
  67.492 -                ['ssidref', self.info['ssidref']]]
  67.493 -        if self.uuid:
  67.494 -            sxpr.append(['uuid', self.uuid])
  67.495 -        if self.info:
  67.496 -            sxpr.append(['maxmem', self.info['maxmem_KiB'] / 1024])
  67.497 +                ['domid',   self.domid],
  67.498 +                ['uuid',    self.uuid],
  67.499 +                ['memory',  self.info['memory_KiB'] / 1024]]
  67.500  
  67.501 -            if self.infoIsSet('device'):
  67.502 -                for (_, c) in self.info['device']:
  67.503 -                    sxpr.append(['device', c])
  67.504 +        for e in ROUNDTRIPPING_CONFIG_ENTRIES:
  67.505 +            if self.infoIsSet(e[0]):
  67.506 +                sxpr.append([e[0], self.info[e[0]]])
  67.507 +        
  67.508 +        sxpr.append(['maxmem', self.info['maxmem_KiB'] / 1024])
  67.509  
  67.510 -            def stateChar(name):
  67.511 -                if name in self.info:
  67.512 -                    if self.info[name]:
  67.513 -                        return name[0]
  67.514 -                    else:
  67.515 -                        return '-'
  67.516 +        if self.infoIsSet('image'):
  67.517 +            sxpr.append(['image', self.info['image']])
  67.518 +
  67.519 +        if self.infoIsSet('device'):
  67.520 +            for (_, c) in self.info['device']:
  67.521 +                sxpr.append(['device', c])
  67.522 +
  67.523 +        def stateChar(name):
  67.524 +            if name in self.info:
  67.525 +                if self.info[name]:
  67.526 +                    return name[0]
  67.527                  else:
  67.528 -                    return '?'
  67.529 -
  67.530 -            state = reduce(
  67.531 -                lambda x, y: x + y,
  67.532 -                map(stateChar,
  67.533 -                    ['running', 'blocked', 'paused', 'shutdown', 'crashed']))
  67.534 +                    return '-'
  67.535 +            else:
  67.536 +                return '?'
  67.537  
  67.538 -            sxpr.append(['state', state])
  67.539 -            if self.infoIsSet('shutdown'):
  67.540 -                reason = shutdown_reason(self.info['shutdown_reason'])
  67.541 -                sxpr.append(['shutdown_reason', reason])
  67.542 -            if self.infoIsSet('cpu_time'):
  67.543 -                sxpr.append(['cpu_time', self.info['cpu_time']/1e9])    
  67.544 -            sxpr.append(['vcpus', self.info['vcpus']])
  67.545 -            sxpr.append(['cpumap', self.info['cpumap']])
  67.546 -            if self.infoIsSet('vcpu_to_cpu'):
  67.547 -                sxpr.append(['cpu', self.info['vcpu_to_cpu'][0]])
  67.548 -                # build a string, using '|' to separate items, show only up
  67.549 -                # to number of vcpus in domain, and trim the trailing '|'
  67.550 -                sxpr.append(['vcpu_to_cpu', ''.join(map(lambda x: str(x)+'|',
  67.551 -                            self.info['vcpu_to_cpu'][0:self.info['vcpus']]))[:-1]])
  67.552 +        state = reduce(
  67.553 +            lambda x, y: x + y,
  67.554 +            map(stateChar,
  67.555 +                ['running', 'blocked', 'paused', 'shutdown', 'crashed',
  67.556 +                 'dying']))
  67.557 +
  67.558 +        sxpr.append(['state', state])
  67.559 +        if self.infoIsSet('shutdown'):
  67.560 +            reason = shutdown_reason(self.info['shutdown_reason'])
  67.561 +            sxpr.append(['shutdown_reason', reason])
  67.562 +        if self.infoIsSet('cpu_time'):
  67.563 +            sxpr.append(['cpu_time', self.info['cpu_time']/1e9])
  67.564 +        sxpr.append(['vcpus', self.info['vcpus']])
  67.565 +        sxpr.append(['cpumap', self.info['cpumap']])
  67.566 +        if self.infoIsSet('vcpu_to_cpu'):
  67.567 +            sxpr.append(['cpu', self.info['vcpu_to_cpu'][0]])
  67.568 +            sxpr.append(['vcpu_to_cpu', self.prettyVCpuMap()])
  67.569              
  67.570          if self.infoIsSet('start_time'):
  67.571              up_time =  time.time() - self.info['start_time']
  67.572 @@ -674,14 +821,17 @@ class XendDomainInfo:
  67.573              sxpr.append(['console_channel', self.console_channel.sxpr()])
  67.574          if self.console_mfn:
  67.575              sxpr.append(['console_mfn', self.console_mfn])
  67.576 -        if self.restart_count:
  67.577 -            sxpr.append(['restart_count', self.restart_count])
  67.578 -        if self.restart_state:
  67.579 -            sxpr.append(['restart_state', self.restart_state])
  67.580 -        if self.restart_time:
  67.581 -            sxpr.append(['restart_time', str(self.restart_time)])
  67.582 +
  67.583          return sxpr
  67.584  
  67.585 +
  67.586 +    ## private:
  67.587 +
  67.588 +    def prettyVCpuMap(self):
  67.589 +        return '|'.join(map(str,
  67.590 +                            self.info['vcpu_to_cpu'][0:self.info['vcpus']]))
  67.591 +
  67.592 +
  67.593      def check_name(self, name):
  67.594          """Check if a vm name is valid. Valid names contain alphabetic characters,
  67.595          digits, or characters in '_-.:/+'.
  67.596 @@ -719,11 +869,19 @@ class XendDomainInfo:
  67.597          @param config: configuration
  67.598          @raise: VmError on error
  67.599          """
  67.600 -        # todo - add support for scheduling params?
  67.601 +
  67.602 +        log.debug('XendDomainInfo.construct: %s %s',
  67.603 +                  str(self.domid),
  67.604 +                  str(self.info['ssidref']))
  67.605 +
  67.606 +        self.domid = xc.domain_create(dom = 0, ssidref = self.info['ssidref'])
  67.607 +
  67.608 +        if self.domid <= 0:
  67.609 +            raise VmError('Creating domain failed: name=%s' %
  67.610 +                          self.info['name'])
  67.611 +
  67.612          try:
  67.613              self.initDomain()
  67.614 -
  67.615 -            # Create domain devices.
  67.616              self.construct_image()
  67.617              self.configure()
  67.618              self.exportToDB()
  67.619 @@ -737,30 +895,23 @@ class XendDomainInfo:
  67.620  
  67.621  
  67.622      def initDomain(self):
  67.623 -        log.debug('XendDomainInfo.initDomain: %s %s %s %s)',
  67.624 +        log.debug('XendDomainInfo.initDomain: %s %s %s',
  67.625                    str(self.domid),
  67.626                    str(self.info['memory_KiB']),
  67.627 -                  str(self.info['ssidref']),
  67.628                    str(self.info['cpu_weight']))
  67.629  
  67.630 -        self.domid = xc.domain_create(dom = self.domid or 0,
  67.631 -                                      ssidref = self.info['ssidref'])
  67.632 -
  67.633 -        if 'image' not in self.info:
  67.634 +        if not self.infoIsSet('image'):
  67.635              raise VmError('Missing image in configuration')
  67.636  
  67.637          self.image = image.create(self,
  67.638                                    self.info['image'],
  67.639                                    self.info['device'])
  67.640  
  67.641 -        if self.domid <= 0:
  67.642 -            raise VmError('Creating domain failed: name=%s' %
  67.643 -                          self.info['name'])
  67.644 -
  67.645          if self.info['bootloader']:
  67.646              self.image.handleBootloading()
  67.647  
  67.648          xc.domain_setcpuweight(self.domid, self.info['cpu_weight'])
  67.649 +        # XXX Merge with configure_maxmem?
  67.650          m = self.image.getDomainMemory(self.info['memory_KiB'])
  67.651          xc.domain_setmaxmem(self.domid, m)
  67.652          xc.domain_memory_increase_reservation(self.domid, m, 0, 0)
  67.653 @@ -794,6 +945,8 @@ class XendDomainInfo:
  67.654          self.configure_vcpus(self.info['vcpus'])
  67.655  
  67.656  
  67.657 +    ## public:
  67.658 +
  67.659      def delete(self):
  67.660          """Delete the vm's db.
  67.661          """
  67.662 @@ -803,48 +956,46 @@ class XendDomainInfo:
  67.663              log.warning("error in domain db delete: %s", ex)
  67.664  
  67.665  
  67.666 -    def destroy_domain(self):
  67.667 -        """Destroy the vm's domain.
  67.668 -        The domain will not finally go away unless all vm
  67.669 -        devices have been released.
  67.670 -        """
  67.671 -        if self.domid is None:
  67.672 -            return
  67.673 -        try:
  67.674 -            xc.domain_destroy(dom=self.domid)
  67.675 -        except Exception, err:
  67.676 -            log.exception("Domain destroy failed: %s", self.info['name'])
  67.677 +    def cleanup(self):
  67.678 +        """Cleanup vm resources: release devices.  Nothrow guarantee."""
  67.679  
  67.680 -    def cleanup(self):
  67.681 -        """Cleanup vm resources: release devices.
  67.682 -        """
  67.683 -        self.state = STATE_VM_TERMINATED
  67.684 +        self.state_set(STATE_VM_TERMINATED)
  67.685          self.release_devices()
  67.686          self.closeStoreChannel()
  67.687 -        if self.console_channel:
  67.688 -            # notify processes using this console?
  67.689 -            try:
  67.690 -                self.console_channel.close()
  67.691 -                self.console_channel = None
  67.692 -            except:
  67.693 -                pass
  67.694 +        self.closeConsoleChannel()
  67.695 +
  67.696          if self.image:
  67.697              try:
  67.698                  self.image.destroy()
  67.699 -                self.image = None
  67.700              except:
  67.701 -                pass
  67.702 +                log.exception(
  67.703 +                    "XendDomainInfo.cleanup: image.destroy() failed.")
  67.704 +            self.image = None
  67.705 +
  67.706  
  67.707      def destroy(self):
  67.708 -        """Cleanup vm and destroy domain.
  67.709 -        """
  67.710 +        """Cleanup vm and destroy domain.  Nothrow guarantee."""
  67.711  
  67.712 -        log.debug("XendDomainInfo.destroy")
  67.713 +        log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
  67.714  
  67.715 -        self.destroy_domain()
  67.716          self.cleanup()
  67.717 -        self.exportToDB()
  67.718 -        return 0
  67.719 +
  67.720 +        try:
  67.721 +            self.removeVm()
  67.722 +        except Exception:
  67.723 +            log.exception("Removing VM path failed.")
  67.724 +
  67.725 +        try:
  67.726 +            self.removeDom()
  67.727 +        except Exception:
  67.728 +            log.exception("Removing domain path failed.")
  67.729 +
  67.730 +        try:
  67.731 +            if self.domid is not None:
  67.732 +                xc.domain_destroy(dom=self.domid)
  67.733 +        except Exception:
  67.734 +            log.exception("XendDomainInfo.destroy: xc.domain_destroy failed.")
  67.735 +
  67.736  
  67.737      def is_terminated(self):
  67.738          """Check if a domain has been terminated.
  67.739 @@ -852,8 +1003,7 @@ class XendDomainInfo:
  67.740          return self.state == STATE_VM_TERMINATED
  67.741  
  67.742      def release_devices(self):
  67.743 -        """Release all vm devices.
  67.744 -        """
  67.745 +        """Release all vm devices.  Nothrow guarantee."""
  67.746  
  67.747          while True:
  67.748              t = xstransact("%s/device" % self.path)
  67.749 @@ -865,8 +1015,8 @@ class XendDomainInfo:
  67.750                          # Log and swallow any exceptions in removal --
  67.751                          # there's nothing more we can do.
  67.752                          log.exception(
  67.753 -                           "Device release failed: %s; %s; %s; %s" %
  67.754 -                            (self.info['name'], n, d, str(ex)))
  67.755 +                           "Device release failed: %s; %s; %s",
  67.756 +                           self.info['name'], n, d)
  67.757              if t.commit():
  67.758                  break
  67.759  
  67.760 @@ -902,8 +1052,7 @@ class XendDomainInfo:
  67.761  
  67.762          @raise: VmError for invalid devices
  67.763          """
  67.764 -        if not self.rebooting():
  67.765 -            self.create_configured_devices()
  67.766 +        self.create_configured_devices()
  67.767          if self.image:
  67.768              self.image.createDeviceModel()
  67.769  
  67.770 @@ -942,23 +1091,6 @@ class XendDomainInfo:
  67.771              return reason == 'reboot'
  67.772          return False
  67.773  
  67.774 -    def restart_cancel(self):
  67.775 -        """Cancel a vm restart.
  67.776 -        """
  67.777 -        self.restart_state = None
  67.778 -
  67.779 -    def restarting(self):
  67.780 -        """Put the vm into restart mode.
  67.781 -        """
  67.782 -        self.restart_state = STATE_RESTART_PENDING
  67.783 -
  67.784 -    def restart_pending(self):
  67.785 -        """Test if the vm has a pending restart.
  67.786 -        """
  67.787 -        return self.restart_state == STATE_RESTART_PENDING
  67.788 -
  67.789 -    def rebooting(self):
  67.790 -        return self.restart_state == STATE_RESTART_BOOTING
  67.791  
  67.792      def restart_check(self):
  67.793          """Check if domain restart is OK.
  67.794 @@ -976,23 +1108,37 @@ class XendDomainInfo:
  67.795          self.restart_time = tnow
  67.796          self.restart_count += 1
  67.797  
  67.798 +
  67.799      def restart(self):
  67.800 -        """Restart the domain after it has exited.
  67.801 -        Reuses the domain id
  67.802 +        """Restart the domain after it has exited. """
  67.803  
  67.804 -        """
  67.805 +        #            self.restart_check()
  67.806 +        self.cleanup()
  67.807 +
  67.808 +        config = self.sxpr()
  67.809 +
  67.810 +        if self.readVm('xend/restart_in_progress'):
  67.811 +            log.error('Xend failed during restart of domain %d.  '
  67.812 +                      'Refusing to restart to avoid loops.',
  67.813 +                      self.domid)
  67.814 +            self.destroy()
  67.815 +            return
  67.816 +
  67.817 +        self.writeVm('xend/restart_in_progress', 'True')
  67.818 +
  67.819          try:
  67.820 -            self.clear_shutdown()
  67.821 -            self.state = STATE_VM_OK
  67.822 -            self.shutdown_pending = None
  67.823 -            self.restart_check()
  67.824 -            self.exportToDB()
  67.825 -            self.restart_state = STATE_RESTART_BOOTING
  67.826 -            self.configure_bootloader()
  67.827 -            self.construct()
  67.828 -            self.exportToDB()
  67.829 +            self.destroy()
  67.830 +            try:
  67.831 +                xd = get_component('xen.xend.XendDomain')
  67.832 +                xd.domain_unpause(xd.domain_create(config).getDomid())
  67.833 +            except Exception, exn:
  67.834 +                log.exception('Failed to restart domain %d.', self.domid)
  67.835          finally:
  67.836 -            self.restart_state = None
  67.837 +            self.removeVm('xend/restart_in_progress')
  67.838 +            
  67.839 +        # self.configure_bootloader()
  67.840 +        #        self.exportToDB()
  67.841 +
  67.842  
  67.843      def configure_bootloader(self):
  67.844          if not self.info['bootloader']:
  67.845 @@ -1006,7 +1152,8 @@ class XendDomainInfo:
  67.846          if dev:
  67.847              disk = sxp.child_value(dev, "uname")
  67.848              fn = blkdev_uname_to_file(disk)
  67.849 -            blcfg = bootloader(self.info['bootloader'], fn, 1, self.info['vcpus'])
  67.850 +            blcfg = bootloader(self.info['bootloader'], fn, 1,
  67.851 +                               self.info['vcpus'])
  67.852          if blcfg is None:
  67.853              msg = "Had a bootloader specified, but can't find disk"
  67.854              log.error(msg)
  67.855 @@ -1023,7 +1170,9 @@ class XendDomainInfo:
  67.856  
  67.857  
  67.858      def configure_maxmem(self):
  67.859 -        xc.domain_setmaxmem(self.domid, maxmem_kb = self.info['maxmem_KiB'])
  67.860 +        if self.image:
  67.861 +            m = self.image.getDomainMemory(self.info['memory_KiB'])
  67.862 +            xc.domain_setmaxmem(self.domid, maxmem_kb = m)
  67.863  
  67.864  
  67.865      def vcpu_hotplug(self, vcpu, state):
  67.866 @@ -1038,24 +1187,9 @@ class XendDomainInfo:
  67.867              availability = "online"
  67.868          self.storeVm("cpu/%d/availability" % vcpu, availability)
  67.869  
  67.870 -    def shutdown(self, reason):
  67.871 -        if not reason in shutdown_reasons.values():
  67.872 -            raise XendError('invalid reason:' + reason)
  67.873 -        self.storeVm("control/shutdown", reason)
  67.874 -        if not reason in ['suspend']:
  67.875 -            self.shutdown_pending = {'start':time.time(), 'reason':reason}
  67.876 -
  67.877 -    def clear_shutdown(self):
  67.878 -        self.removeVm("control/shutdown")
  67.879 -
  67.880      def send_sysrq(self, key=0):
  67.881          self.storeVm("control/sysrq", '%c' % key)
  67.882  
  67.883 -    def shutdown_time_left(self, timeout):
  67.884 -        if not self.shutdown_pending:
  67.885 -            return 0
  67.886 -        return timeout - (time.time() - self.shutdown_pending['start'])
  67.887 -
  67.888      def dom0_init_store(self):
  67.889          if not self.store_channel:
  67.890              self.store_channel = self.eventChannel("store/port")
  67.891 @@ -1078,8 +1212,6 @@ class XendDomainInfo:
  67.892      def dom0_enforce_vcpus(self):
  67.893          dom = 0
  67.894          # get max number of vcpus to use for dom0 from config
  67.895 -        from xen.xend import XendRoot
  67.896 -        xroot = XendRoot.instance()
  67.897          target = int(xroot.get_dom0_vcpus())
  67.898          log.debug("number of vcpus to use is %d" % (target))
  67.899     
    68.1 --- a/tools/python/xen/xend/server/DevController.py	Thu Sep 29 16:22:02 2005 -0600
    68.2 +++ b/tools/python/xen/xend/server/DevController.py	Thu Sep 29 17:28:44 2005 -0600
    68.3 @@ -81,6 +81,13 @@ class DevController:
    68.4          xstransact.Remove(backpath)
    68.5  
    68.6  
    68.7 +    def sxprs(self):
    68.8 +        """@return an s-expression describing all the devices of this
    68.9 +        controller's device-class.
   68.10 +        """
   68.11 +        return xstransact.ListRecursive(self.frontendRoot())
   68.12 +
   68.13 +
   68.14      def sxpr(self, devid):
   68.15          """@return an s-expression describing the specified device.
   68.16          """
   68.17 @@ -126,8 +133,8 @@ class DevController:
   68.18          compulsory to use it; subclasses may prefer to allocate IDs based upon
   68.19          the device configuration instead.
   68.20          """
   68.21 +        path = self.frontendMiscPath()
   68.22          while True:
   68.23 -            path = self.frontendMiscPath()
   68.24              t = xstransact(path)
   68.25              try:
   68.26                  result = t.read("nextDeviceID")
   68.27 @@ -196,8 +203,11 @@ class DevController:
   68.28  
   68.29  
   68.30      def frontendPath(self, devid):
   68.31 -        return "%s/device/%s/%d" % (self.vm.getPath(), self.deviceClass,
   68.32 -                                    devid)
   68.33 +        return "%s/%d" % (self.frontendRoot(), devid)
   68.34 +
   68.35 +
   68.36 +    def frontendRoot(self):
   68.37 +        return "%s/device/%s" % (self.vm.getPath(), self.deviceClass)
   68.38  
   68.39  
   68.40      def frontendMiscPath(self):
    69.1 --- a/tools/python/xen/xend/server/SrvDaemon.py	Thu Sep 29 16:22:02 2005 -0600
    69.2 +++ b/tools/python/xen/xend/server/SrvDaemon.py	Thu Sep 29 17:28:44 2005 -0600
    69.3 @@ -9,26 +9,24 @@ import signal
    69.4  import sys
    69.5  import threading
    69.6  import linecache
    69.7 -import socket
    69.8  import pwd
    69.9  import re
   69.10 -import StringIO
   69.11  import traceback
   69.12 -import time
   69.13 -import glob
   69.14  
   69.15 -from xen.xend import sxp
   69.16 -from xen.xend import PrettyPrint
   69.17 -from xen.xend import EventServer; eserver = EventServer.instance()
   69.18 -from xen.xend.XendError import XendError
   69.19 +from xen.xend import EventServer
   69.20  from xen.xend.server import SrvServer
   69.21  from xen.xend.XendLogging import log
   69.22 -from xen.xend import XendRoot; xroot = XendRoot.instance()
   69.23 +from xen.xend import XendRoot
   69.24  
   69.25  import event
   69.26  import relocate
   69.27  from params import *
   69.28  
   69.29 +
   69.30 +eserver = EventServer.instance()
   69.31 +xroot = XendRoot.instance()
   69.32 +
   69.33 +
   69.34  class Daemon:
   69.35      """The xend daemon.
   69.36      """
   69.37 @@ -233,7 +231,7 @@ class Daemon:
   69.38              except:
   69.39                  pass
   69.40  
   69.41 -    def print_trace(self, str):
   69.42 +    def print_trace(self, string):
   69.43          for i in range(self.traceindent):
   69.44              ch = " "
   69.45              if (i % 5):
   69.46 @@ -241,7 +239,7 @@ class Daemon:
   69.47              else:
   69.48                  ch = '|'
   69.49              self.tracefile.write(ch)
   69.50 -        self.tracefile.write(str)
   69.51 +        self.tracefile.write(string)
   69.52              
   69.53      def trace(self, frame, event, arg):
   69.54          if not self.traceon:
   69.55 @@ -289,7 +287,7 @@ class Daemon:
   69.56          try:
   69.57              os.setuid(pwd.getpwnam(XEND_USER)[2])
   69.58              return 0
   69.59 -        except KeyError, error:
   69.60 +        except KeyError:
   69.61              print >>sys.stderr, "Error: no such user '%s'" % XEND_USER
   69.62              return 1
   69.63  
    70.1 --- a/tools/python/xen/xend/server/SrvDmesg.py	Thu Sep 29 16:22:02 2005 -0600
    70.2 +++ b/tools/python/xen/xend/server/SrvDmesg.py	Thu Sep 29 17:28:44 2005 -0600
    70.3 @@ -13,15 +13,15 @@
    70.4  # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    70.5  #============================================================================
    70.6  # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
    70.7 +# Copyright (C) 2005 XenSource Ltd
    70.8  #============================================================================
    70.9  
   70.10 -import os
   70.11  
   70.12 -from xen.xend import sxp
   70.13  from xen.xend import XendDmesg
   70.14  
   70.15  from xen.web.SrvDir import SrvDir
   70.16  
   70.17 +
   70.18  class SrvDmesg(SrvDir):
   70.19      """Xen Dmesg output.
   70.20      """
   70.21 @@ -47,6 +47,6 @@ class SrvDmesg(SrvDir):
   70.22      def info(self):
   70.23          return self.xd.info()
   70.24  
   70.25 -    def op_clear(self, op, req):
   70.26 +    def op_clear(self, _1, _2):
   70.27          self.xd.clear()
   70.28          return 0
    71.1 --- a/tools/python/xen/xend/server/SrvDomain.py	Thu Sep 29 16:22:02 2005 -0600
    71.2 +++ b/tools/python/xen/xend/server/SrvDomain.py	Thu Sep 29 17:28:44 2005 -0600
    71.3 @@ -150,17 +150,6 @@ class SrvDomain(SrvDir):
    71.4          val = fn(req.args, {'dom': self.dom.domid})
    71.5          return val
    71.6  
    71.7 -    def op_device(self, op, req):
    71.8 -        fn = FormFn(self.xd.domain_devtype_get,
    71.9 -                    [['dom',    'int'],
   71.10 -                     ['type',   'str'],
   71.11 -                     ['idx',    'int']])
   71.12 -        val = fn(req.args, {'dom': self.dom.domid})
   71.13 -        if val:
   71.14 -            return val.sxpr()
   71.15 -        else:
   71.16 -            raise XendError("invalid device")
   71.17 -
   71.18      def op_device_create(self, op, req):
   71.19          fn = FormFn(self.xd.domain_device_create,
   71.20                      [['dom',    'int'],
   71.21 @@ -172,7 +161,7 @@ class SrvDomain(SrvDir):
   71.22          fn = FormFn(self.xd.domain_device_refresh,
   71.23                      [['dom',  'int'],
   71.24                       ['type', 'str'],
   71.25 -                     ['idx',  'str']])
   71.26 +                     ['idx',  'int']])
   71.27          val = fn(req.args, {'dom': self.dom.domid})
   71.28          return val
   71.29  
   71.30 @@ -180,7 +169,7 @@ class SrvDomain(SrvDir):
   71.31          fn = FormFn(self.xd.domain_device_destroy,
   71.32                      [['dom',  'int'],
   71.33                       ['type', 'str'],
   71.34 -                     ['idx',  'str']])
   71.35 +                     ['idx',  'int']])
   71.36          val = fn(req.args, {'dom': self.dom.domid})
   71.37          return val
   71.38                  
   71.39 @@ -188,7 +177,7 @@ class SrvDomain(SrvDir):
   71.40          fn = FormFn(self.xd.domain_device_configure,
   71.41                      [['dom',    'int'],
   71.42                       ['config', 'sxpr'],
   71.43 -                     ['idx',    'str']])
   71.44 +                     ['idx',    'int']])
   71.45          val = fn(req.args, {'dom': self.dom.domid})
   71.46          return val
   71.47  
   71.48 @@ -230,10 +219,6 @@ class SrvDomain(SrvDir):
   71.49              self.print_path(req)
   71.50              #self.ls()
   71.51              req.write('<p>%s</p>' % self.dom)
   71.52 -            if self.dom.config:
   71.53 -                req.write("<code><pre>")
   71.54 -                PrettyPrint.prettyprint(self.dom.config, out=req)
   71.55 -                req.write("</pre></code>")
   71.56              self.form(req)
   71.57              req.write('</body></html>')
   71.58          return ''
    72.1 --- a/tools/python/xen/xend/server/SrvDomainDir.py	Thu Sep 29 16:22:02 2005 -0600
    72.2 +++ b/tools/python/xen/xend/server/SrvDomainDir.py	Thu Sep 29 17:28:44 2005 -0600
    72.3 @@ -38,19 +38,17 @@ class SrvDomainDir(SrvDir):
    72.4          self.xd = XendDomain.instance()
    72.5  
    72.6      def domain(self, x):
    72.7 -        val = None
    72.8          dom = self.xd.domain_lookup_by_name(x)
    72.9          if not dom:
   72.10              raise XendError('No such domain ' + str(x))
   72.11 -        val = SrvDomain(dom)
   72.12 -        return val
   72.13 +        return SrvDomain(dom)
   72.14  
   72.15      def get(self, x):
   72.16          v = SrvDir.get(self, x)
   72.17          if v is not None:
   72.18              return v
   72.19 -        v = self.domain(x)
   72.20 -        return v
   72.21 +        else:
   72.22 +            return self.domain(x)
   72.23  
   72.24      def op_create(self, op, req):
   72.25          """Create a domain.
   72.26 @@ -152,11 +150,11 @@ class SrvDomainDir(SrvDir):
   72.27              domains = self.xd.list_sorted()
   72.28              req.write('<ul>')
   72.29              for d in domains:
   72.30 -                req.write('<li><a href="%s%s"> Domain %s</a>'
   72.31 -                          % (url, d.getName(), d.getName()))
   72.32 -                req.write('id=%s' % d.getDomain())
   72.33 -                req.write('memory=%d'% d.getMemoryTarget())
   72.34 -                req.write('ssidref=%d'% d.getSsidref())
   72.35 +                req.write(
   72.36 +                    '<li><a href="%s%s">Domain %s</a>: id = %s, memory = %d, '
   72.37 +                    'ssidref = %d.'
   72.38 +                    % (url, d.getName(), d.getName(), d.getDomid(),
   72.39 +                       d.getMemoryTarget(), d.getSsidref()))
   72.40                  req.write('</li>')
   72.41              req.write('</ul>')
   72.42  
    73.1 --- a/tools/python/xen/xend/server/SrvNode.py	Thu Sep 29 16:22:02 2005 -0600
    73.2 +++ b/tools/python/xen/xend/server/SrvNode.py	Thu Sep 29 17:28:44 2005 -0600
    73.3 @@ -62,7 +62,7 @@ class SrvNode(SrvDir):
    73.4              for d in self.info():
    73.5                  req.write('<li> %10s: %s' % (d[0], str(d[1])))
    73.6              req.write('<li><a href="%sdmesg">Xen dmesg output</a>' % url)
    73.7 -            req.write('<li><a href="%slog>Xend log</a>' % url)
    73.8 +            req.write('<li><a href="%slog">Xend log</a>' % url)
    73.9              req.write('</ul>')
   73.10              req.write('</body></html>')
   73.11              
    74.1 --- a/tools/python/xen/xend/xenstore/xstransact.py	Thu Sep 29 16:22:02 2005 -0600
    74.2 +++ b/tools/python/xen/xend/xenstore/xstransact.py	Thu Sep 29 17:28:44 2005 -0600
    74.3 @@ -1,4 +1,5 @@
    74.4  # Copyright (C) 2005 Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
    74.5 +# Copyright (C) 2005 XenSource Ltd
    74.6  
    74.7  # This file is subject to the terms and conditions of the GNU General
    74.8  # Public License.  See the file "COPYING" in the main directory of
    74.9 @@ -9,6 +10,7 @@ import threading
   74.10  from xen.lowlevel import xs
   74.11  from xen.xend.xenstore.xsutil import xshandle
   74.12  
   74.13 +
   74.14  class xstransact:
   74.15  
   74.16      def __init__(self, path):
   74.17 @@ -40,8 +42,15 @@ class xstransact:
   74.18                                 '%s, while reading %s' % (ex.args[1], path))
   74.19  
   74.20      def read(self, *args):
   74.21 +        """If no arguments are given, return the value at this transaction's
   74.22 +        path.  If one argument is given, treat that argument as a subpath to
   74.23 +        this transaction's path, and return the value at that path.
   74.24 +        Otherwise, treat each argument as a subpath to this transaction's
   74.25 +        path, and return a list composed of the values at each of those
   74.26 +        instead.
   74.27 +        """
   74.28          if len(args) == 0:
   74.29 -            raise TypeError
   74.30 +            return xshandle().read(self.path)
   74.31          if len(args) == 1:
   74.32              return self._read(args[0])
   74.33          ret = []
   74.34 @@ -105,13 +114,50 @@ class xstransact:
   74.35          return []
   74.36  
   74.37      def list(self, *args):
   74.38 +        """If no arguments are given, list this transaction's path, returning
   74.39 +        the entries therein, or the empty list if no entries are found.
   74.40 +        Otherwise, treat each argument as a subpath to this transaction's
   74.41 +        path, and return the cumulative listing of each of those instead.
   74.42 +        """
   74.43          if len(args) == 0:
   74.44 -            raise TypeError
   74.45 +            ret = xshandle().ls(self.path)
   74.46 +            if ret is None:
   74.47 +                return []
   74.48 +            else:
   74.49 +                return ret
   74.50 +        else:
   74.51 +            ret = []
   74.52 +            for key in args:
   74.53 +                ret.extend(self._list(key))
   74.54 +            return ret
   74.55 +
   74.56 +
   74.57 +    def list_recursive_(self, subdir, keys):
   74.58          ret = []
   74.59 -        for key in args:
   74.60 -            ret.extend(self._list(key))
   74.61 +        for key in keys:
   74.62 +            new_subdir = subdir + "/" + key
   74.63 +            l = xshandle().ls(new_subdir)
   74.64 +            if l:
   74.65 +                ret.append([key, self.list_recursive_(new_subdir, l)])
   74.66 +            else:
   74.67 +                ret.append([key, xshandle().read(new_subdir)])
   74.68          return ret
   74.69  
   74.70 +
   74.71 +    def list_recursive(self, *args):
   74.72 +        """If no arguments are given, list this transaction's path, returning
   74.73 +        the entries therein, or the empty list if no entries are found.
   74.74 +        Otherwise, treat each argument as a subpath to this transaction's
   74.75 +        path, and return the cumulative listing of each of those instead.
   74.76 +        """
   74.77 +        if len(args) == 0:
   74.78 +            args = self.list()
   74.79 +            if args is None or len(args) == 0:
   74.80 +                return []
   74.81 +
   74.82 +        return self.list_recursive_(self.path, args)
   74.83 +
   74.84 +
   74.85      def gather(self, *args):
   74.86          if len(args) and type(args[0]) != tuple:
   74.87              args = args,
   74.88 @@ -163,6 +209,13 @@ class xstransact:
   74.89  
   74.90  
   74.91      def Read(cls, path, *args):
   74.92 +        """If only one argument is given (path), return the value stored at
   74.93 +        that path.  If two arguments are given, treat the second argument as a
   74.94 +        subpath within the first, and return the value at the composed path.
   74.95 +        Otherwise, treat each argument after the first as a subpath to the
   74.96 +        given path, and return a list composed of the values at each of those
   74.97 +        instead.  This operation is performed inside a transaction.
   74.98 +        """
   74.99          while True:
  74.100              t = cls(path)
  74.101              try:
  74.102 @@ -206,6 +259,12 @@ class xstransact:
  74.103      Remove = classmethod(Remove)
  74.104  
  74.105      def List(cls, path, *args):
  74.106 +        """If only one argument is given (path), list its contents, returning
  74.107 +        the entries therein, or the empty list if no entries are found.
  74.108 +        Otherwise, treat each further argument as a subpath to the given path,
  74.109 +        and return the cumulative listing of each of those instead.  This
  74.110 +        operation is performed inside a transaction.
  74.111 +        """
  74.112          while True:
  74.113              t = cls(path)
  74.114              try:
  74.115 @@ -218,6 +277,25 @@ class xstransact:
  74.116  
  74.117      List = classmethod(List)
  74.118  
  74.119 +    def ListRecursive(cls, path, *args):
  74.120 +        """If only one argument is given (path), list its contents
  74.121 +        recursively, returning the entries therein, or the empty list if no
  74.122 +        entries are found.  Otherwise, treat each further argument as a
  74.123 +        subpath to the given path, and return the cumulative listing of each
  74.124 +        of those instead.  This operation is performed inside a transaction.
  74.125 +        """
  74.126 +        while True:
  74.127 +            t = cls(path)
  74.128 +            try:
  74.129 +                v = t.list_recursive(*args)
  74.130 +                if t.commit():
  74.131 +                    return v
  74.132 +            except:
  74.133 +                t.abort()
  74.134 +                raise
  74.135 +
  74.136 +    ListRecursive = classmethod(ListRecursive)
  74.137 +
  74.138      def Gather(cls, path, *args):
  74.139          while True:
  74.140              t = cls(path)
    75.1 --- a/tools/python/xen/xm/main.py	Thu Sep 29 16:22:02 2005 -0600
    75.2 +++ b/tools/python/xen/xm/main.py	Thu Sep 29 17:28:44 2005 -0600
    75.3 @@ -175,6 +175,10 @@ def xm_save(args):
    75.4  
    75.5      dom = args[0] # TODO: should check if this exists
    75.6      savefile = os.path.abspath(args[1])
    75.7 +
    75.8 +    if not os.access(os.path.dirname(savefile), os.W_OK):
    75.9 +        err("xm save: Unable to create file %s" % savefile)
   75.10 +        sys.exit(1)
   75.11      
   75.12      from xen.xend.XendClient import server
   75.13      server.xend_domain_save(dom, savefile)
   75.14 @@ -184,6 +188,10 @@ def xm_restore(args):
   75.15  
   75.16      savefile = os.path.abspath(args[0])
   75.17  
   75.18 +    if not os.access(savefile, os.R_OK):
   75.19 +        err("xm restore: Unable to read file %s" % savefile)
   75.20 +        sys.exit(1)
   75.21 +
   75.22      from xen.xend.XendClient import server
   75.23      info = server.xend_domain_restore(savefile)
   75.24      PrettyPrint.prettyprint(info)
    76.1 --- a/tools/xenstore/xenstored_domain.c	Thu Sep 29 16:22:02 2005 -0600
    76.2 +++ b/tools/xenstore/xenstored_domain.c	Thu Sep 29 17:28:44 2005 -0600
    76.3 @@ -63,6 +63,8 @@ struct domain
    76.4  	/* The connection associated with this. */
    76.5  	struct connection *conn;
    76.6  
    76.7 +	/* Have we noticed that this domain is shutdown? */
    76.8 +	int shutdown;
    76.9  };
   76.10  
   76.11  static LIST_HEAD(domains);
   76.12 @@ -222,19 +224,25 @@ static void domain_cleanup(void)
   76.13  {
   76.14  	xc_dominfo_t dominfo;
   76.15  	struct domain *domain, *tmp;
   76.16 -	int released = 0;
   76.17 +	int notify = 0;
   76.18  
   76.19  	list_for_each_entry_safe(domain, tmp, &domains, list) {
   76.20  		if (xc_domain_getinfo(*xc_handle, domain->domid, 1,
   76.21  				      &dominfo) == 1 &&
   76.22 -		    dominfo.domid == domain->domid &&
   76.23 -		    !dominfo.dying && !dominfo.crashed && !dominfo.shutdown)
   76.24 -			continue;
   76.25 +		    dominfo.domid == domain->domid) {
   76.26 +			if ((dominfo.crashed || dominfo.shutdown)
   76.27 +			    && !domain->shutdown) {
   76.28 +				domain->shutdown = 1;
   76.29 +				notify = 1;
   76.30 +			}
   76.31 +			if (!dominfo.dying)
   76.32 +				continue;
   76.33 +		}
   76.34  		talloc_free(domain->conn);
   76.35 -		released++;
   76.36 +		notify = 1;
   76.37  	}
   76.38  
   76.39 -	if (released)
   76.40 +	if (notify)
   76.41  		fire_watches(NULL, "@releaseDomain", false);
   76.42  }
   76.43  
   76.44 @@ -272,6 +280,7 @@ static struct domain *new_domain(void *c
   76.45  	struct domain *domain;
   76.46  	domain = talloc(context, struct domain);
   76.47  	domain->port = 0;
   76.48 +	domain->shutdown = 0;
   76.49  	domain->domid = domid;
   76.50  	domain->path = talloc_strdup(domain, path);
   76.51  	domain->page = xc_map_foreign_range(*xc_handle, domain->domid,
    77.1 --- a/xen/arch/x86/shadow32.c	Thu Sep 29 16:22:02 2005 -0600
    77.2 +++ b/xen/arch/x86/shadow32.c	Thu Sep 29 17:28:44 2005 -0600
    77.3 @@ -755,9 +755,13 @@ void free_monitor_pagetable(struct vcpu 
    77.4  
    77.5      /*
    77.6       * Then free monitor_table.
    77.7 +     * Note: for VMX guest, only BSP need do this free.
    77.8       */
    77.9 -    mfn = pagetable_get_pfn(v->arch.monitor_table);
   77.10 -    free_domheap_page(&frame_table[mfn]);
   77.11 +    if (!(VMX_DOMAIN(v) && v->vcpu_id)) {
   77.12 +        mfn = pagetable_get_pfn(v->arch.monitor_table);
   77.13 +        unmap_domain_page(v->arch.monitor_vtable);
   77.14 +        free_domheap_page(&frame_table[mfn]);
   77.15 +    }
   77.16  
   77.17      v->arch.monitor_table = mk_pagetable(0);
   77.18      v->arch.monitor_vtable = 0;
   77.19 @@ -1832,7 +1836,7 @@ shadow_mark_mfn_out_of_sync(struct vcpu 
   77.20      }
   77.21  #endif
   77.22  
   77.23 -    FSH_LOG("%s(gpfn=%lx, mfn=%lx) c=%08x t=%08x", __func__,
   77.24 +    FSH_LOG("%s(gpfn=%lx, mfn=%lx) c=%08x t=%08lx", __func__,
   77.25              gpfn, mfn, page->count_info, page->u.inuse.type_info);
   77.26  
   77.27      // XXX this will require some more thought...  Cross-domain sharing and
   77.28 @@ -3013,7 +3017,7 @@ static int check_pte(
   77.29      l1_pgentry_t guest_pte = *p_guest_pte;
   77.30      l1_pgentry_t shadow_pte = *p_shadow_pte;
   77.31      l1_pgentry_t snapshot_pte = p_snapshot_pte ? *p_snapshot_pte : l1e_empty();
   77.32 -    l1_pgentry_t eff_guest_pte;
   77.33 +    l1_pgentry_t eff_guest_pte = l1e_empty();
   77.34      unsigned long mask, eff_guest_pfn, eff_guest_mfn, shadow_mfn;
   77.35      int errors = 0, guest_writable;
   77.36      int page_table_page;
   77.37 @@ -3070,7 +3074,7 @@ static int check_pte(
   77.38  
   77.39      if ( (l1e_get_flags(shadow_pte) & _PAGE_RW ) && !guest_writable )
   77.40      {
   77.41 -        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08x page_table_page=%d\n",
   77.42 +        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx page_table_page=%d\n",
   77.43                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
   77.44                 frame_table[eff_guest_mfn].u.inuse.type_info,
   77.45                 page_table_page);
   77.46 @@ -3081,7 +3085,7 @@ static int check_pte(
   77.47           (l1e_get_flags(shadow_pte) & _PAGE_RW ) &&
   77.48           !(guest_writable && (l1e_get_flags(eff_guest_pte) & _PAGE_DIRTY)) )
   77.49      {
   77.50 -        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08x page_table_page=%d\n",
   77.51 +        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx page_table_page=%d\n",
   77.52                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
   77.53                 frame_table[eff_guest_mfn].u.inuse.type_info,
   77.54                 page_table_page);
    78.1 --- a/xen/arch/x86/shadow_public.c	Thu Sep 29 16:22:02 2005 -0600
    78.2 +++ b/xen/arch/x86/shadow_public.c	Thu Sep 29 17:28:44 2005 -0600
    78.3 @@ -256,14 +256,15 @@ void free_monitor_pagetable(struct vcpu 
    78.4  {
    78.5      unsigned long mfn;
    78.6  
    78.7 -//    ASSERT( pagetable_val(v->arch.monitor_table) );
    78.8      /*
    78.9       * free monitor_table.
   78.10 +     * Note: for VMX guest, only BSP need do this free.
   78.11       */
   78.12 -    //mfn = (pagetable_val(v->arch.monitor_table)) >> PAGE_SHIFT;
   78.13 -    mfn = pagetable_get_pfn(v->arch.monitor_table);
   78.14 -    unmap_domain_page(v->arch.monitor_vtable);
   78.15 -    free_domheap_page(&frame_table[mfn]);
   78.16 +    if (!(VMX_DOMAIN(v) && v->vcpu_id)) {
   78.17 +        mfn = pagetable_get_pfn(v->arch.monitor_table);
   78.18 +        unmap_domain_page(v->arch.monitor_vtable);
   78.19 +        free_domheap_page(&frame_table[mfn]);
   78.20 +    }
   78.21      v->arch.monitor_table = mk_pagetable(0);
   78.22      v->arch.monitor_vtable = 0;
   78.23  }
   78.24 @@ -358,9 +359,13 @@ void free_monitor_pagetable(struct vcpu 
   78.25  
   78.26      /*
   78.27       * Then free monitor_table.
   78.28 +     * Note: for VMX guest, only BSP need do this free.
   78.29       */
   78.30 -    mfn = pagetable_get_pfn(v->arch.monitor_table);
   78.31 -    free_domheap_page(&frame_table[mfn]);
   78.32 +    if (!(VMX_DOMAIN(v) && v->vcpu_id)) {
   78.33 +        mfn = pagetable_get_pfn(v->arch.monitor_table);
   78.34 +        unmap_domain_page(v->arch.monitor_vtable);
   78.35 +        free_domheap_page(&frame_table[mfn]);
   78.36 +    }
   78.37  
   78.38      v->arch.monitor_table = mk_pagetable(0);
   78.39      v->arch.monitor_vtable = 0;
    79.1 --- a/xen/arch/x86/time.c	Thu Sep 29 16:22:02 2005 -0600
    79.2 +++ b/xen/arch/x86/time.c	Thu Sep 29 17:28:44 2005 -0600
    79.3 @@ -119,8 +119,8 @@ static inline u64 scale_delta(u64 delta,
    79.4          "mov  %4,%%eax ; "
    79.5          "mov  %%edx,%4 ; "
    79.6          "mul  %5       ; "
    79.7 +        "xor  %5,%5    ; "
    79.8          "add  %4,%%eax ; "
    79.9 -        "xor  %5,%5    ; "
   79.10          "adc  %5,%%edx ; "
   79.11          : "=A" (product), "=r" (tmp1), "=r" (tmp2)
   79.12          : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (scale->mul_frac) );
    80.1 --- a/xen/arch/x86/vmx_platform.c	Thu Sep 29 16:22:02 2005 -0600
    80.2 +++ b/xen/arch/x86/vmx_platform.c	Thu Sep 29 17:28:44 2005 -0600
    80.3 @@ -667,6 +667,7 @@ static void mmio_operands(int type, unsi
    80.4      mpcip->instr = inst->instr;
    80.5      mpcip->operand[0] = inst->operand[0]; /* source */
    80.6      mpcip->operand[1] = inst->operand[1]; /* destination */
    80.7 +    mpcip->immediate = inst->immediate;
    80.8  
    80.9      if (inst->operand[0] & REGISTER) { /* dest is memory */
   80.10          index = operand_index(inst->operand[0]);
   80.11 @@ -833,12 +834,16 @@ void handle_mmio(unsigned long va, unsig
   80.12          mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs);
   80.13          break;
   80.14  
   80.15 -    case INSTR_CMP:
   80.16 -        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
   80.17 -        break;
   80.18 +    case INSTR_CMP:        /* Pass through */
   80.19 +    case INSTR_TEST:
   80.20 +        mpcip->flags = mmio_inst.flags;
   80.21 +        mpcip->instr = mmio_inst.instr;
   80.22 +        mpcip->operand[0] = mmio_inst.operand[0]; /* source */
   80.23 +        mpcip->operand[1] = mmio_inst.operand[1]; /* destination */
   80.24 +        mpcip->immediate = mmio_inst.immediate;
   80.25  
   80.26 -    case INSTR_TEST:
   80.27 -        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
   80.28 +        /* send the request and wait for the value */
   80.29 +        send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, mmio_inst.op_size, 0, IOREQ_READ, 0);
   80.30          break;
   80.31  
   80.32      default: