direct-io.hg

changeset 7317:61b3b357d827

Merge xen-unstable into xen-ia64-unstable
author djm@kirby.fc.hp.com
date Thu Sep 29 17:28:44 2005 -0600 (2005-09-29)
parents 93e27f7ca8a8 d88e98fd4f7a
children c22741d000a5
files .hgignore Makefile buildconfigs/Rules.mk docs/src/user/installation.tex linux-2.6-xen-sparse/arch/ia64/Kconfig linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_ia64 linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c linux-2.6-xen-sparse/arch/xen/kernel/devmem.c linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c linux-2.6-xen-sparse/arch/xen/kernel/fixup.c linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c linux-2.6-xen-sparse/arch/xen/kernel/reboot.c linux-2.6-xen-sparse/arch/xen/kernel/smp.c linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/entry.S linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6-xen-sparse/drivers/xen/blkback/common.h linux-2.6-xen-sparse/drivers/xen/blkback/interface.c linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6-xen-sparse/drivers/xen/blkfront/block.h linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c linux-2.6-xen-sparse/drivers/xen/blktap/common.h linux-2.6-xen-sparse/drivers/xen/blktap/interface.c linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c linux-2.6-xen-sparse/drivers/xen/netback/common.h linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6-xen-sparse/drivers/xen/tpmback/common.h linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypervisor.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h linux-2.6-xen-sparse/include/asm-xen/balloon.h linux-2.6-xen-sparse/include/asm-xen/driver_util.h linux-2.6-xen-sparse/include/asm-xen/evtchn.h linux-2.6-xen-sparse/include/asm-xen/foreign_page.h linux-2.6-xen-sparse/include/asm-xen/gnttab.h linux-2.6-xen-sparse/include/asm-xen/xen_proc.h linux-2.6-xen-sparse/include/asm-xen/xenbus.h tools/check/check_hotplug tools/console/daemon/io.c tools/debugger/gdb/README tools/examples/Makefile tools/examples/xmexample.vmx tools/firmware/vmxassist/Makefile tools/firmware/vmxassist/gen.c tools/firmware/vmxassist/head.S tools/firmware/vmxassist/setup.c tools/firmware/vmxassist/vm86.c tools/firmware/vmxassist/vmxloader.c tools/ioemu/hw/cirrus_vga.c tools/ioemu/hw/pc.c tools/ioemu/hw/vga.c tools/ioemu/target-i386-dm/helper2.c tools/ioemu/vl.c tools/libxc/xc_vmx_build.c tools/libxc/xenguest.h tools/libxc/xg_private.h tools/python/xen/lowlevel/xc/xc.c tools/python/xen/lowlevel/xs/xs.c tools/python/xen/web/SrvBase.py tools/python/xen/web/http.py tools/python/xen/xend/PrettyPrint.py tools/python/xen/xend/XendCheckpoint.py tools/python/xen/xend/XendClient.py tools/python/xen/xend/XendDomain.py tools/python/xen/xend/XendDomainInfo.py tools/python/xen/xend/image.py tools/python/xen/xend/server/DevController.py tools/python/xen/xend/server/SrvDaemon.py tools/python/xen/xend/server/SrvDmesg.py tools/python/xen/xend/server/SrvDomain.py tools/python/xen/xend/server/SrvDomainDir.py tools/python/xen/xend/server/SrvNode.py tools/python/xen/xend/xenstore/xsnode.py tools/python/xen/xend/xenstore/xstransact.py tools/python/xen/xm/main.py tools/xenstore/Makefile tools/xenstore/speedtest.c tools/xenstore/tdb.c tools/xenstore/tdb.h tools/xenstore/testsuite/04rm.test tools/xenstore/testsuite/08transaction.slowtest tools/xenstore/testsuite/08transaction.test tools/xenstore/testsuite/12readonly.test tools/xenstore/testsuite/14complexperms.test tools/xenstore/testsuite/16block-watch-crash.test tools/xenstore/xenstore_client.c tools/xenstore/xenstored.h tools/xenstore/xenstored_core.c tools/xenstore/xenstored_core.h tools/xenstore/xenstored_domain.c tools/xenstore/xenstored_transaction.c tools/xenstore/xenstored_transaction.h tools/xenstore/xenstored_watch.c tools/xenstore/xenstored_watch.h tools/xenstore/xs.c tools/xenstore/xs.h tools/xenstore/xs_lib.c tools/xenstore/xs_lib.h tools/xenstore/xs_random.c tools/xenstore/xs_stress.c tools/xenstore/xs_tdb_dump.c tools/xenstore/xs_test.c xen/arch/ia64/asm-offsets.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/xen/process.c xen/arch/ia64/xen/vcpu.c xen/arch/x86/mm.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/time.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/common/grant_table.c xen/include/asm-ia64/vcpu.h xen/include/asm-x86/e820.h xen/include/asm-x86/mm.h xen/include/asm-x86/vmx_platform.h xen/include/xen/grant_table.h
line diff
     1.1 --- a/.hgignore	Thu Sep 29 16:22:02 2005 -0600
     1.2 +++ b/.hgignore	Thu Sep 29 17:28:44 2005 -0600
     1.3 @@ -158,6 +158,7 @@
     1.4  ^tools/xenstore/xs_dom0_test$
     1.5  ^tools/xenstore/xs_random$
     1.6  ^tools/xenstore/xs_stress$
     1.7 +^tools/xenstore/xs_tdb_dump$
     1.8  ^tools/xenstore/xs_test$
     1.9  ^tools/xenstore/xs_watch_stress$
    1.10  ^tools/xentrace/xenctx$
     8.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Thu Sep 29 16:22:02 2005 -0600
     8.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Thu Sep 29 17:28:44 2005 -0600
     8.3 @@ -2684,7 +2684,7 @@ CONFIG_ACPI_SYSTEM=y
     8.4  #
     8.5  # File systems
     8.6  #
     8.7 -CONFIG_EXT2_FS=m
     8.8 +CONFIG_EXT2_FS=y
     8.9  CONFIG_EXT2_FS_XATTR=y
    8.10  CONFIG_EXT2_FS_POSIX_ACL=y
    8.11  CONFIG_EXT2_FS_SECURITY=y
    8.12 @@ -2913,7 +2913,7 @@ CONFIG_KEYS=y
    8.13  # CONFIG_KEYS_DEBUG_PROC_KEYS is not set
    8.14  CONFIG_SECURITY=y
    8.15  # CONFIG_SECURITY_NETWORK is not set
    8.16 -CONFIG_SECURITY_CAPABILITIES=m
    8.17 +CONFIG_SECURITY_CAPABILITIES=y
    8.18  CONFIG_SECURITY_ROOTPLUG=m
    8.19  CONFIG_SECURITY_SECLVL=m
    8.20  CONFIG_SECURITY_SELINUX=y
     9.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c	Thu Sep 29 16:22:02 2005 -0600
     9.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c	Thu Sep 29 17:28:44 2005 -0600
     9.3 @@ -112,6 +112,10 @@ void xen_idle(void)
     9.4  
     9.5  #ifdef CONFIG_HOTPLUG_CPU
     9.6  #include <asm/nmi.h>
     9.7 +#ifdef CONFIG_SMP
     9.8 +extern void smp_suspend(void);
     9.9 +extern void smp_resume(void);
    9.10 +#endif
    9.11  /* We don't actually take CPU down, just spin without interrupts. */
    9.12  static inline void play_dead(void)
    9.13  {
    9.14 @@ -120,6 +124,14 @@ static inline void play_dead(void)
    9.15  		HYPERVISOR_yield();
    9.16  
    9.17  	__flush_tlb_all();
    9.18 +   /* 
    9.19 +    * Restore IPI/IRQ mappings before marking online to prevent 
    9.20 +    * race between pending interrupts and restoration of handler. 
    9.21 +    */
    9.22 +#ifdef CONFIG_SMP
    9.23 +	local_irq_enable(); /* XXX Needed for smp_resume(). Clean me up. */
    9.24 +	smp_resume();
    9.25 +#endif
    9.26  	cpu_set(smp_processor_id(), cpu_online_map);
    9.27  }
    9.28  #else
    9.29 @@ -135,10 +147,6 @@ static inline void play_dead(void)
    9.30   * low exit latency (ie sit in a loop waiting for
    9.31   * somebody to say that they'd like to reschedule)
    9.32   */
    9.33 -#ifdef CONFIG_SMP
    9.34 -extern void smp_suspend(void);
    9.35 -extern void smp_resume(void);
    9.36 -#endif
    9.37  void cpu_idle (void)
    9.38  {
    9.39  	int cpu = _smp_processor_id();
    9.40 @@ -166,9 +174,6 @@ void cpu_idle (void)
    9.41  				HYPERVISOR_vcpu_down(cpu);
    9.42  #endif
    9.43  				play_dead();
    9.44 -#ifdef CONFIG_SMP
    9.45 -				smp_resume();
    9.46 -#endif
    9.47  				local_irq_enable();
    9.48  			}
    9.49  
    10.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c	Thu Sep 29 16:22:02 2005 -0600
    10.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c	Thu Sep 29 17:28:44 2005 -0600
    10.3 @@ -131,21 +131,9 @@ DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IP
    10.4  
    10.5  static inline void __send_IPI_one(unsigned int cpu, int vector)
    10.6  {
    10.7 -	unsigned int evtchn;
    10.8 -
    10.9 -	evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
   10.10 -	// printk("send_IPI_mask_bitmask cpu %d vector %d evtchn %d\n", cpu, vector, evtchn);
   10.11 -	if (evtchn) {
   10.12 -#if 0
   10.13 -		shared_info_t *s = HYPERVISOR_shared_info;
   10.14 -		while (synch_test_bit(evtchn, &s->evtchn_pending[0]) ||
   10.15 -		       synch_test_bit(evtchn, &s->evtchn_mask[0]))
   10.16 -			;
   10.17 -#endif
   10.18 -		notify_via_evtchn(evtchn);
   10.19 -	} else
   10.20 -		printk("send_IPI to unbound port %d/%d",
   10.21 -		       cpu, vector);
   10.22 +	int evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
   10.23 +	BUG_ON(evtchn < 0);
   10.24 +	notify_remote_via_evtchn(evtchn);
   10.25  }
   10.26  
   10.27  void __send_IPI_shortcut(unsigned int shortcut, int vector)
    11.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c	Thu Sep 29 16:22:02 2005 -0600
    11.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c	Thu Sep 29 17:28:44 2005 -0600
    11.3 @@ -446,27 +446,6 @@ static void __init smp_callin(void)
    11.4  
    11.5  static int cpucount;
    11.6  
    11.7 -
    11.8 -static irqreturn_t ldebug_interrupt(
    11.9 -	int irq, void *dev_id, struct pt_regs *regs)
   11.10 -{
   11.11 -	return IRQ_HANDLED;
   11.12 -}
   11.13 -
   11.14 -static DEFINE_PER_CPU(int, ldebug_irq);
   11.15 -static char ldebug_name[NR_CPUS][15];
   11.16 -
   11.17 -void ldebug_setup(void)
   11.18 -{
   11.19 -	int cpu = smp_processor_id();
   11.20 -
   11.21 -	per_cpu(ldebug_irq, cpu) = bind_virq_to_irq(VIRQ_DEBUG);
   11.22 -	sprintf(ldebug_name[cpu], "ldebug%d", cpu);
   11.23 -	BUG_ON(request_irq(per_cpu(ldebug_irq, cpu), ldebug_interrupt,
   11.24 -	                   SA_INTERRUPT, ldebug_name[cpu], NULL));
   11.25 -}
   11.26 -
   11.27 -
   11.28  extern void local_setup_timer(void);
   11.29  
   11.30  /*
   11.31 @@ -484,7 +463,6 @@ static void __init start_secondary(void 
   11.32  	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
   11.33  		rep_nop();
   11.34  	local_setup_timer();
   11.35 -	ldebug_setup();
   11.36  	smp_intr_init();
   11.37  	local_irq_enable();
   11.38  	/*
    12.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c	Thu Sep 29 16:22:02 2005 -0600
    12.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c	Thu Sep 29 17:28:44 2005 -0600
    12.3 @@ -186,8 +186,8 @@ static inline u64 scale_delta(u64 delta,
    12.4  		"mov  %4,%%eax ; "
    12.5  		"mov  %%edx,%4 ; "
    12.6  		"mul  %5       ; "
    12.7 +		"xor  %5,%5    ; "
    12.8  		"add  %4,%%eax ; "
    12.9 -		"xor  %5,%5    ; "
   12.10  		"adc  %5,%%edx ; "
   12.11  		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
   12.12  		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
   12.13 @@ -836,13 +836,6 @@ void start_hz_timer(void)
   12.14  	cpu_clear(smp_processor_id(), nohz_cpu_mask);
   12.15  }
   12.16  
   12.17 -void time_suspend(void)
   12.18 -{
   12.19 -	/* nothing */
   12.20 -	teardown_irq(per_cpu(timer_irq, 0), &irq_timer);
   12.21 -	unbind_virq_from_irq(VIRQ_TIMER);
   12.22 -}
   12.23 -
   12.24  /* No locking required. We are only CPU running, and interrupts are off. */
   12.25  void time_resume(void)
   12.26  {
   12.27 @@ -854,9 +847,6 @@ void time_resume(void)
   12.28  	per_cpu(processed_system_time, 0) = processed_system_time;
   12.29  
   12.30  	update_wallclock();
   12.31 -
   12.32 -	per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER);
   12.33 -	(void)setup_irq(per_cpu(timer_irq, 0), &irq_timer);
   12.34  }
   12.35  
   12.36  #ifdef CONFIG_SMP
    13.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c	Thu Sep 29 16:22:02 2005 -0600
    13.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c	Thu Sep 29 17:28:44 2005 -0600
    13.3 @@ -209,7 +209,10 @@ static void dump_fault_path(unsigned lon
    13.4  {
    13.5  	unsigned long *p, page;
    13.6  
    13.7 -        page = __pa(per_cpu(cur_pgd, smp_processor_id()));
    13.8 +	preempt_disable();
    13.9 +	page = __pa(per_cpu(cur_pgd, smp_processor_id()));
   13.10 +	preempt_enable();
   13.11 +
   13.12  	p  = (unsigned long *)__va(page);
   13.13  	p += (address >> 30) * 2;
   13.14  	printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
   13.15 @@ -237,8 +240,13 @@ static void dump_fault_path(unsigned lon
   13.16  {
   13.17  	unsigned long page;
   13.18  
   13.19 +	preempt_disable();
   13.20  	page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
   13.21  	    [address >> 22];
   13.22 +	preempt_enable();
   13.23 +
   13.24 +	page = ((unsigned long *) per_cpu(cur_pgd, get_cpu()))
   13.25 +	    [address >> 22];
   13.26  	printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
   13.27  	       machine_to_phys(page));
   13.28  	/*
   13.29 @@ -567,7 +575,9 @@ vmalloc_fault:
   13.30  		pmd_t *pmd, *pmd_k;
   13.31  		pte_t *pte_k;
   13.32  
   13.33 +		preempt_disable();
   13.34  		pgd = index + per_cpu(cur_pgd, smp_processor_id());
   13.35 +		preempt_enable();
   13.36  		pgd_k = init_mm.pgd + index;
   13.37  
   13.38  		if (!pgd_present(*pgd_k))
    14.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c	Thu Sep 29 16:22:02 2005 -0600
    14.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c	Thu Sep 29 17:28:44 2005 -0600
    14.3 @@ -144,3 +144,13 @@ struct file_operations mem_fops = {
    14.4  	.mmap		= mmap_mem,
    14.5  	.open		= open_mem,
    14.6  };
    14.7 +
    14.8 +/*
    14.9 + * Local variables:
   14.10 + *  c-file-style: "linux"
   14.11 + *  indent-tabs-mode: t
   14.12 + *  c-indent-level: 8
   14.13 + *  c-basic-offset: 8
   14.14 + *  tab-width: 8
   14.15 + * End:
   14.16 + */
    15.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Thu Sep 29 16:22:02 2005 -0600
    15.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Thu Sep 29 17:28:44 2005 -0600
    15.3 @@ -3,7 +3,7 @@
    15.4   * 
    15.5   * Communication via Xen event channels.
    15.6   * 
    15.7 - * Copyright (c) 2002-2004, K A Fraser
    15.8 + * Copyright (c) 2002-2005, K A Fraser
    15.9   * 
   15.10   * This file may be distributed separately from the Linux kernel, or
   15.11   * incorporated into other software packages, subject to the following license:
   15.12 @@ -73,27 +73,33 @@ static unsigned long pirq_needs_unmask_n
   15.13  static u8  cpu_evtchn[NR_EVENT_CHANNELS];
   15.14  static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
   15.15  
   15.16 -#define active_evtchns(cpu,sh,idx)              \
   15.17 -    ((sh)->evtchn_pending[idx] &                \
   15.18 -     cpu_evtchn_mask[cpu][idx] &                \
   15.19 -     ~(sh)->evtchn_mask[idx])
   15.20 +#define active_evtchns(cpu,sh,idx)		\
   15.21 +	((sh)->evtchn_pending[idx] &		\
   15.22 +	 cpu_evtchn_mask[cpu][idx] &		\
   15.23 +	 ~(sh)->evtchn_mask[idx])
   15.24  
   15.25 -void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
   15.26 +static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
   15.27  {
   15.28 -    clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
   15.29 -    set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
   15.30 -    cpu_evtchn[chn] = cpu;
   15.31 +	clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
   15.32 +	set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
   15.33 +	cpu_evtchn[chn] = cpu;
   15.34 +}
   15.35 +
   15.36 +static void init_evtchn_cpu_bindings(void)
   15.37 +{
   15.38 +	/* By default all event channels notify CPU#0. */
   15.39 +	memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
   15.40 +	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
   15.41  }
   15.42  
   15.43  #else
   15.44  
   15.45 -#define active_evtchns(cpu,sh,idx)              \
   15.46 -    ((sh)->evtchn_pending[idx] &                \
   15.47 -     ~(sh)->evtchn_mask[idx])
   15.48 +#define active_evtchns(cpu,sh,idx)		\
   15.49 +	((sh)->evtchn_pending[idx] &		\
   15.50 +	 ~(sh)->evtchn_mask[idx])
   15.51 +#define bind_evtchn_to_cpu(chn,cpu)	((void)0)
   15.52 +#define init_evtchn_cpu_bindings()	((void)0)
   15.53  
   15.54 -void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
   15.55 -{
   15.56 -}
   15.57  #endif
   15.58  
   15.59  /* Upcall to generic IRQ layer. */
   15.60 @@ -108,9 +114,9 @@ extern asmlinkage unsigned int do_IRQ(st
   15.61  #elif defined (__x86_64__)
   15.62  #define IRQ_REG orig_rax
   15.63  #endif
   15.64 -#define do_IRQ(irq, regs) do {                  \
   15.65 -    (regs)->IRQ_REG = (irq);                    \
   15.66 -    do_IRQ((regs));                             \
   15.67 +#define do_IRQ(irq, regs) do {			\
   15.68 +	(regs)->IRQ_REG = (irq);		\
   15.69 +	do_IRQ((regs));				\
   15.70  } while (0)
   15.71  #endif
   15.72  
   15.73 @@ -123,249 +129,241 @@ extern asmlinkage unsigned int do_IRQ(st
   15.74   */
   15.75  void force_evtchn_callback(void)
   15.76  {
   15.77 -    (void)HYPERVISOR_xen_version(0, NULL);
   15.78 +	(void)HYPERVISOR_xen_version(0, NULL);
   15.79  }
   15.80  EXPORT_SYMBOL(force_evtchn_callback);
   15.81  
   15.82  /* NB. Interrupts are disabled on entry. */
   15.83  asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
   15.84  {
   15.85 -    u32     l1, l2;
   15.86 -    unsigned int   l1i, l2i, port;
   15.87 -    int            irq, cpu = smp_processor_id();
   15.88 -    shared_info_t *s = HYPERVISOR_shared_info;
   15.89 -    vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
   15.90 +	u32     l1, l2;
   15.91 +	unsigned int   l1i, l2i, port;
   15.92 +	int            irq, cpu = smp_processor_id();
   15.93 +	shared_info_t *s = HYPERVISOR_shared_info;
   15.94 +	vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
   15.95  
   15.96 -    vcpu_info->evtchn_upcall_pending = 0;
   15.97 +	vcpu_info->evtchn_upcall_pending = 0;
   15.98  
   15.99 -    /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
  15.100 -    l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
  15.101 -    while ( l1 != 0 )
  15.102 -    {
  15.103 -        l1i = __ffs(l1);
  15.104 -        l1 &= ~(1 << l1i);
  15.105 +	/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
  15.106 +	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
  15.107 +	while (l1 != 0) {
  15.108 +		l1i = __ffs(l1);
  15.109 +		l1 &= ~(1 << l1i);
  15.110          
  15.111 -        while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
  15.112 -        {
  15.113 -            l2i = __ffs(l2);
  15.114 -            l2 &= ~(1 << l2i);
  15.115 +		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
  15.116 +			l2i = __ffs(l2);
  15.117 +			l2 &= ~(1 << l2i);
  15.118              
  15.119 -            port = (l1i << 5) + l2i;
  15.120 -            if ( (irq = evtchn_to_irq[port]) != -1 ) {
  15.121 -                do_IRQ(irq, regs);
  15.122 -	    } else
  15.123 -                evtchn_device_upcall(port);
  15.124 -        }
  15.125 -    }
  15.126 +			port = (l1i << 5) + l2i;
  15.127 +			if ((irq = evtchn_to_irq[port]) != -1)
  15.128 +				do_IRQ(irq, regs);
  15.129 +			else
  15.130 +				evtchn_device_upcall(port);
  15.131 +		}
  15.132 +	}
  15.133  }
  15.134  EXPORT_SYMBOL(evtchn_do_upcall);
  15.135  
  15.136  static int find_unbound_irq(void)
  15.137  {
  15.138 -    int irq;
  15.139 +	int irq;
  15.140  
  15.141 -    for ( irq = 0; irq < NR_IRQS; irq++ )
  15.142 -        if ( irq_bindcount[irq] == 0 )
  15.143 -            break;
  15.144 +	for (irq = 0; irq < NR_IRQS; irq++)
  15.145 +		if (irq_bindcount[irq] == 0)
  15.146 +			break;
  15.147  
  15.148 -    if ( irq == NR_IRQS )
  15.149 -        panic("No available IRQ to bind to: increase NR_IRQS!\n");
  15.150 +	if (irq == NR_IRQS)
  15.151 +		panic("No available IRQ to bind to: increase NR_IRQS!\n");
  15.152  
  15.153 -    return irq;
  15.154 +	return irq;
  15.155  }
  15.156  
  15.157  int bind_virq_to_irq(int virq)
  15.158  {
  15.159 -    evtchn_op_t op;
  15.160 -    int evtchn, irq;
  15.161 -    int cpu = smp_processor_id();
  15.162 +	evtchn_op_t op;
  15.163 +	int evtchn, irq;
  15.164 +	int cpu = smp_processor_id();
  15.165  
  15.166 -    spin_lock(&irq_mapping_update_lock);
  15.167 +	spin_lock(&irq_mapping_update_lock);
  15.168  
  15.169 -    if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
  15.170 -    {
  15.171 -        op.cmd              = EVTCHNOP_bind_virq;
  15.172 -        op.u.bind_virq.virq = virq;
  15.173 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  15.174 -            panic("Failed to bind virtual IRQ %d\n", virq);
  15.175 -        evtchn = op.u.bind_virq.port;
  15.176 +	if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
  15.177 +		op.cmd              = EVTCHNOP_bind_virq;
  15.178 +		op.u.bind_virq.virq = virq;
  15.179 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
  15.180 +		evtchn = op.u.bind_virq.port;
  15.181  
  15.182 -        irq = find_unbound_irq();
  15.183 -        evtchn_to_irq[evtchn] = irq;
  15.184 -        irq_to_evtchn[irq]    = evtchn;
  15.185 +		irq = find_unbound_irq();
  15.186 +		evtchn_to_irq[evtchn] = irq;
  15.187 +		irq_to_evtchn[irq]    = evtchn;
  15.188  
  15.189 -        per_cpu(virq_to_irq, cpu)[virq] = irq;
  15.190 +		per_cpu(virq_to_irq, cpu)[virq] = irq;
  15.191  
  15.192 -        bind_evtchn_to_cpu(evtchn, cpu);
  15.193 -    }
  15.194 +		bind_evtchn_to_cpu(evtchn, cpu);
  15.195 +	}
  15.196  
  15.197 -    irq_bindcount[irq]++;
  15.198 +	irq_bindcount[irq]++;
  15.199  
  15.200 -    spin_unlock(&irq_mapping_update_lock);
  15.201 +	spin_unlock(&irq_mapping_update_lock);
  15.202      
  15.203 -    return irq;
  15.204 +	return irq;
  15.205  }
  15.206  EXPORT_SYMBOL(bind_virq_to_irq);
  15.207  
  15.208  void unbind_virq_from_irq(int virq)
  15.209  {
  15.210 -    evtchn_op_t op;
  15.211 -    int cpu    = smp_processor_id();
  15.212 -    int irq    = per_cpu(virq_to_irq, cpu)[virq];
  15.213 -    int evtchn = irq_to_evtchn[irq];
  15.214 -
  15.215 -    spin_lock(&irq_mapping_update_lock);
  15.216 +	evtchn_op_t op;
  15.217 +	int cpu    = smp_processor_id();
  15.218 +	int irq    = per_cpu(virq_to_irq, cpu)[virq];
  15.219 +	int evtchn = irq_to_evtchn[irq];
  15.220  
  15.221 -    if ( --irq_bindcount[irq] == 0 )
  15.222 -    {
  15.223 -        op.cmd          = EVTCHNOP_close;
  15.224 -        op.u.close.dom  = DOMID_SELF;
  15.225 -        op.u.close.port = evtchn;
  15.226 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  15.227 -            panic("Failed to unbind virtual IRQ %d\n", virq);
  15.228 +	spin_lock(&irq_mapping_update_lock);
  15.229 +
  15.230 +	if (--irq_bindcount[irq] == 0) {
  15.231 +		op.cmd          = EVTCHNOP_close;
  15.232 +		op.u.close.dom  = DOMID_SELF;
  15.233 +		op.u.close.port = evtchn;
  15.234 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
  15.235  
  15.236 -        /*
  15.237 -         * This is a slight hack. Interdomain ports can be allocated directly 
  15.238 -         * by userspace, and at that point they get bound by Xen to vcpu 0. We 
  15.239 -         * therefore need to make sure that if we get an event on an event 
  15.240 -         * channel we don't know about vcpu 0 handles it. Binding channels to 
  15.241 -         * vcpu 0 when closing them achieves this.
  15.242 -         */
  15.243 -        bind_evtchn_to_cpu(evtchn, 0);
  15.244 -        evtchn_to_irq[evtchn] = -1;
  15.245 -        irq_to_evtchn[irq]    = -1;
  15.246 -        per_cpu(virq_to_irq, cpu)[virq]     = -1;
  15.247 -    }
  15.248 +		/*
  15.249 +		 * This is a slight hack. Interdomain ports can be allocated 
  15.250 +		 * directly by userspace, and at that point they get bound by 
  15.251 +		 * Xen to vcpu 0. We therefore need to make sure that if we get
  15.252 +		 * an event on an event channel we don't know about vcpu 0 
  15.253 +		 * handles it. Binding channels to vcpu 0 when closing them
  15.254 +		 * achieves this.
  15.255 +		 */
  15.256 +		bind_evtchn_to_cpu(evtchn, 0);
  15.257 +		evtchn_to_irq[evtchn] = -1;
  15.258 +		irq_to_evtchn[irq]    = -1;
  15.259 +		per_cpu(virq_to_irq, cpu)[virq] = -1;
  15.260 +	}
  15.261  
  15.262 -    spin_unlock(&irq_mapping_update_lock);
  15.263 +	spin_unlock(&irq_mapping_update_lock);
  15.264  }
  15.265  EXPORT_SYMBOL(unbind_virq_from_irq);
  15.266  
  15.267  int bind_ipi_to_irq(int ipi)
  15.268  {
  15.269 -    evtchn_op_t op;
  15.270 -    int evtchn, irq;
  15.271 -    int cpu = smp_processor_id();
  15.272 -
  15.273 -    spin_lock(&irq_mapping_update_lock);
  15.274 +	evtchn_op_t op;
  15.275 +	int evtchn, irq;
  15.276 +	int cpu = smp_processor_id();
  15.277  
  15.278 -    if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
  15.279 -    {
  15.280 -        op.cmd = EVTCHNOP_bind_ipi;
  15.281 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  15.282 -            panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
  15.283 -        evtchn = op.u.bind_ipi.port;
  15.284 +	spin_lock(&irq_mapping_update_lock);
  15.285 +
  15.286 +	if ((evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == -1) {
  15.287 +		op.cmd = EVTCHNOP_bind_ipi;
  15.288 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
  15.289 +		evtchn = op.u.bind_ipi.port;
  15.290  
  15.291 -        irq = find_unbound_irq();
  15.292 -        evtchn_to_irq[evtchn] = irq;
  15.293 -        irq_to_evtchn[irq]    = evtchn;
  15.294 +		irq = find_unbound_irq();
  15.295 +		evtchn_to_irq[evtchn] = irq;
  15.296 +		irq_to_evtchn[irq]    = evtchn;
  15.297  
  15.298 -        per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
  15.299 +		per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
  15.300  
  15.301 -        bind_evtchn_to_cpu(evtchn, cpu);
  15.302 -    } 
  15.303 -    else
  15.304 -    {
  15.305 -        irq = evtchn_to_irq[evtchn];
  15.306 -    }
  15.307 +		bind_evtchn_to_cpu(evtchn, cpu);
  15.308 +	} else {
  15.309 +		irq = evtchn_to_irq[evtchn];
  15.310 +	}
  15.311  
  15.312 -    irq_bindcount[irq]++;
  15.313 +	irq_bindcount[irq]++;
  15.314  
  15.315 -    spin_unlock(&irq_mapping_update_lock);
  15.316 +	spin_unlock(&irq_mapping_update_lock);
  15.317  
  15.318 -    return irq;
  15.319 +	return irq;
  15.320  }
  15.321  EXPORT_SYMBOL(bind_ipi_to_irq);
  15.322  
  15.323  void unbind_ipi_from_irq(int ipi)
  15.324  {
  15.325 -    evtchn_op_t op;
  15.326 -    int cpu    = smp_processor_id();
  15.327 -    int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
  15.328 -    int irq    = evtchn_to_irq[evtchn];
  15.329 +	evtchn_op_t op;
  15.330 +	int cpu    = smp_processor_id();
  15.331 +	int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
  15.332 +	int irq    = evtchn_to_irq[evtchn];
  15.333  
  15.334 -    spin_lock(&irq_mapping_update_lock);
  15.335 +	spin_lock(&irq_mapping_update_lock);
  15.336  
  15.337 -    if ( --irq_bindcount[irq] == 0 )
  15.338 -    {
  15.339 -        op.cmd          = EVTCHNOP_close;
  15.340 -        op.u.close.dom  = DOMID_SELF;
  15.341 -        op.u.close.port = evtchn;
  15.342 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  15.343 -            panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
  15.344 +	if (--irq_bindcount[irq] == 0) {
  15.345 +		op.cmd          = EVTCHNOP_close;
  15.346 +		op.u.close.dom  = DOMID_SELF;
  15.347 +		op.u.close.port = evtchn;
  15.348 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
  15.349  
  15.350 -        /* See comments in unbind_virq_from_irq */
  15.351 -        bind_evtchn_to_cpu(evtchn, 0);
  15.352 -        evtchn_to_irq[evtchn] = -1;
  15.353 -        irq_to_evtchn[irq]    = -1;
  15.354 -        per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
  15.355 -    }
  15.356 +		/* See comments in unbind_virq_from_irq */
  15.357 +		bind_evtchn_to_cpu(evtchn, 0);
  15.358 +		evtchn_to_irq[evtchn] = -1;
  15.359 +		irq_to_evtchn[irq]    = -1;
  15.360 +		per_cpu(ipi_to_evtchn, cpu)[ipi] = -1;
  15.361 +	}
  15.362  
  15.363 -    spin_unlock(&irq_mapping_update_lock);
  15.364 +	spin_unlock(&irq_mapping_update_lock);
  15.365  }
  15.366  EXPORT_SYMBOL(unbind_ipi_from_irq);
  15.367  
  15.368  int bind_evtchn_to_irq(unsigned int evtchn)
  15.369  {
  15.370 -    int irq;
  15.371 +	int irq;
  15.372  
  15.373 -    spin_lock(&irq_mapping_update_lock);
  15.374 +	spin_lock(&irq_mapping_update_lock);
  15.375  
  15.376 -    if ( (irq = evtchn_to_irq[evtchn]) == -1 )
  15.377 -    {
  15.378 -        irq = find_unbound_irq();
  15.379 -        evtchn_to_irq[evtchn] = irq;
  15.380 -        irq_to_evtchn[irq]    = evtchn;
  15.381 -    }
  15.382 +	if ((irq = evtchn_to_irq[evtchn]) == -1) {
  15.383 +		irq = find_unbound_irq();
  15.384 +		evtchn_to_irq[evtchn] = irq;
  15.385 +		irq_to_evtchn[irq]    = evtchn;
  15.386 +	}
  15.387  
  15.388 -    irq_bindcount[irq]++;
  15.389 +	irq_bindcount[irq]++;
  15.390  
  15.391 -    spin_unlock(&irq_mapping_update_lock);
  15.392 +	spin_unlock(&irq_mapping_update_lock);
  15.393      
  15.394 -    return irq;
  15.395 +	return irq;
  15.396  }
  15.397  EXPORT_SYMBOL(bind_evtchn_to_irq);
  15.398  
  15.399 -void unbind_evtchn_from_irq(unsigned int evtchn)
  15.400 +void unbind_evtchn_from_irq(unsigned int irq)
  15.401  {
  15.402 -    int irq = evtchn_to_irq[evtchn];
  15.403 +	evtchn_op_t op;
  15.404 +	int evtchn = irq_to_evtchn[irq];
  15.405  
  15.406 -    spin_lock(&irq_mapping_update_lock);
  15.407 +	spin_lock(&irq_mapping_update_lock);
  15.408  
  15.409 -    if ( --irq_bindcount[irq] == 0 )
  15.410 -    {
  15.411 -        evtchn_to_irq[evtchn] = -1;
  15.412 -        irq_to_evtchn[irq]    = -1;
  15.413 -    }
  15.414 +	if ((--irq_bindcount[irq] == 0) && (evtchn != -1)) {
  15.415 +		op.cmd          = EVTCHNOP_close;
  15.416 +		op.u.close.dom  = DOMID_SELF;
  15.417 +		op.u.close.port = evtchn;
  15.418 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
  15.419  
  15.420 -    spin_unlock(&irq_mapping_update_lock);
  15.421 +		evtchn_to_irq[evtchn] = -1;
  15.422 +		irq_to_evtchn[irq]    = -1;
  15.423 +	}
  15.424 +
  15.425 +	spin_unlock(&irq_mapping_update_lock);
  15.426  }
  15.427  EXPORT_SYMBOL(unbind_evtchn_from_irq);
  15.428  
  15.429  int bind_evtchn_to_irqhandler(
  15.430 -    unsigned int evtchn,
  15.431 -    irqreturn_t (*handler)(int, void *, struct pt_regs *),
  15.432 -    unsigned long irqflags,
  15.433 -    const char *devname,
  15.434 -    void *dev_id)
  15.435 +	unsigned int evtchn,
  15.436 +	irqreturn_t (*handler)(int, void *, struct pt_regs *),
  15.437 +	unsigned long irqflags,
  15.438 +	const char *devname,
  15.439 +	void *dev_id)
  15.440  {
  15.441 -    unsigned int irq;
  15.442 -    int retval;
  15.443 +	unsigned int irq;
  15.444 +	int retval;
  15.445  
  15.446 -    irq = bind_evtchn_to_irq(evtchn);
  15.447 -    retval = request_irq(irq, handler, irqflags, devname, dev_id);
  15.448 -    if ( retval != 0 )
  15.449 -        unbind_evtchn_from_irq(evtchn);
  15.450 +	irq = bind_evtchn_to_irq(evtchn);
  15.451 +	retval = request_irq(irq, handler, irqflags, devname, dev_id);
  15.452 +	if (retval != 0)
  15.453 +		unbind_evtchn_from_irq(irq);
  15.454  
  15.455 -    return retval;
  15.456 +	return irq;
  15.457  }
  15.458  EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
  15.459  
  15.460 -void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id)
  15.461 +void unbind_evtchn_from_irqhandler(unsigned int irq, void *dev_id)
  15.462  {
  15.463 -    unsigned int irq = evtchn_to_irq[evtchn];
  15.464 -    free_irq(irq, dev_id);
  15.465 -    unbind_evtchn_from_irq(evtchn);
  15.466 +	free_irq(irq, dev_id);
  15.467 +	unbind_evtchn_from_irq(irq);
  15.468  }
  15.469  EXPORT_SYMBOL(unbind_evtchn_from_irqhandler);
  15.470  
  15.471 @@ -378,50 +376,50 @@ static void do_nothing_function(void *ig
  15.472  /* Rebind an evtchn so that it gets delivered to a specific cpu */
  15.473  static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
  15.474  {
  15.475 -    evtchn_op_t op;
  15.476 -    int evtchn;
  15.477 +	evtchn_op_t op;
  15.478 +	int evtchn;
  15.479  
  15.480 -    spin_lock(&irq_mapping_update_lock);
  15.481 -    evtchn = irq_to_evtchn[irq];
  15.482 -    if (!VALID_EVTCHN(evtchn)) {
  15.483 -        spin_unlock(&irq_mapping_update_lock);
  15.484 -        return;
  15.485 -    }
  15.486 +	spin_lock(&irq_mapping_update_lock);
  15.487 +	evtchn = irq_to_evtchn[irq];
  15.488 +	if (!VALID_EVTCHN(evtchn)) {
  15.489 +		spin_unlock(&irq_mapping_update_lock);
  15.490 +		return;
  15.491 +	}
  15.492  
  15.493 -    /* Tell Xen to send future instances of this interrupt to other vcpu. */
  15.494 -    op.cmd = EVTCHNOP_bind_vcpu;
  15.495 -    op.u.bind_vcpu.port = evtchn;
  15.496 -    op.u.bind_vcpu.vcpu = tcpu;
  15.497 +	/* Send future instances of this interrupt to other vcpu. */
  15.498 +	op.cmd = EVTCHNOP_bind_vcpu;
  15.499 +	op.u.bind_vcpu.port = evtchn;
  15.500 +	op.u.bind_vcpu.vcpu = tcpu;
  15.501  
  15.502 -    /*
  15.503 -     * If this fails, it usually just indicates that we're dealing with a virq 
  15.504 -     * or IPI channel, which don't actually need to be rebound. Ignore it, 
  15.505 -     * but don't do the xenlinux-level rebind in that case.
  15.506 -     */
  15.507 -    if (HYPERVISOR_event_channel_op(&op) >= 0)
  15.508 -        bind_evtchn_to_cpu(evtchn, tcpu);
  15.509 +	/*
  15.510 +	 * If this fails, it usually just indicates that we're dealing with a 
  15.511 +	 * virq or IPI channel, which don't actually need to be rebound. Ignore
  15.512 +	 * it, but don't do the xenlinux-level rebind in that case.
  15.513 +	 */
  15.514 +	if (HYPERVISOR_event_channel_op(&op) >= 0)
  15.515 +		bind_evtchn_to_cpu(evtchn, tcpu);
  15.516  
  15.517 -    spin_unlock(&irq_mapping_update_lock);
  15.518 +	spin_unlock(&irq_mapping_update_lock);
  15.519  
  15.520 -    /*
  15.521 -     * Now send the new target processor a NOP IPI. When this returns, it 
  15.522 -     * will check for any pending interrupts, and so service any that got 
  15.523 -     * delivered to the wrong processor by mistake.
  15.524 -     * 
  15.525 -     * XXX: The only time this is called with interrupts disabled is from the 
  15.526 -     * hotplug/hotunplug path. In that case, all cpus are stopped with 
  15.527 -     * interrupts disabled, and the missed interrupts will be picked up when 
  15.528 -     * they start again. This is kind of a hack.
  15.529 -     */
  15.530 -    if (!irqs_disabled())
  15.531 -        smp_call_function(do_nothing_function, NULL, 0, 0);
  15.532 +	/*
  15.533 +	 * Now send the new target processor a NOP IPI. When this returns, it
  15.534 +	 * will check for any pending interrupts, and so service any that got 
  15.535 +	 * delivered to the wrong processor by mistake.
  15.536 +	 * 
  15.537 +	 * XXX: The only time this is called with interrupts disabled is from
  15.538 +	 * the hotplug/hotunplug path. In that case, all cpus are stopped with 
  15.539 +	 * interrupts disabled, and the missed interrupts will be picked up
  15.540 +	 * when they start again. This is kind of a hack.
  15.541 +	 */
  15.542 +	if (!irqs_disabled())
  15.543 +		smp_call_function(do_nothing_function, NULL, 0, 0);
  15.544  }
  15.545  
  15.546  
  15.547  static void set_affinity_irq(unsigned irq, cpumask_t dest)
  15.548  {
  15.549 -    unsigned tcpu = first_cpu(dest);
  15.550 -    rebind_irq_to_cpu(irq, tcpu);
  15.551 +	unsigned tcpu = first_cpu(dest);
  15.552 +	rebind_irq_to_cpu(irq, tcpu);
  15.553  }
  15.554  
  15.555  /*
  15.556 @@ -430,83 +428,84 @@ static void set_affinity_irq(unsigned ir
  15.557  
  15.558  static unsigned int startup_dynirq(unsigned int irq)
  15.559  {
  15.560 -    int evtchn = irq_to_evtchn[irq];
  15.561 +	int evtchn = irq_to_evtchn[irq];
  15.562  
  15.563 -    if ( !VALID_EVTCHN(evtchn) )
  15.564 -        return 0;
  15.565 -    unmask_evtchn(evtchn);
  15.566 -    return 0;
  15.567 +	if (VALID_EVTCHN(evtchn))
  15.568 +		unmask_evtchn(evtchn);
  15.569 +	return 0;
  15.570  }
  15.571  
  15.572  static void shutdown_dynirq(unsigned int irq)
  15.573  {
  15.574 -    int evtchn = irq_to_evtchn[irq];
  15.575 +	int evtchn = irq_to_evtchn[irq];
  15.576  
  15.577 -    if ( !VALID_EVTCHN(evtchn) )
  15.578 -        return;
  15.579 -    mask_evtchn(evtchn);
  15.580 +	if (VALID_EVTCHN(evtchn))
  15.581 +		mask_evtchn(evtchn);
  15.582  }
  15.583  
  15.584  static void enable_dynirq(unsigned int irq)
  15.585  {
  15.586 -    int evtchn = irq_to_evtchn[irq];
  15.587 +	int evtchn = irq_to_evtchn[irq];
  15.588  
  15.589 -    unmask_evtchn(evtchn);
  15.590 +	if (VALID_EVTCHN(evtchn))
  15.591 +		unmask_evtchn(evtchn);
  15.592  }
  15.593  
  15.594  static void disable_dynirq(unsigned int irq)
  15.595  {
  15.596 -    int evtchn = irq_to_evtchn[irq];
  15.597 +	int evtchn = irq_to_evtchn[irq];
  15.598  
  15.599 -    mask_evtchn(evtchn);
  15.600 +	if (VALID_EVTCHN(evtchn))
  15.601 +		mask_evtchn(evtchn);
  15.602  }
  15.603  
  15.604  static void ack_dynirq(unsigned int irq)
  15.605  {
  15.606 -    int evtchn = irq_to_evtchn[irq];
  15.607 +	int evtchn = irq_to_evtchn[irq];
  15.608  
  15.609 -    mask_evtchn(evtchn);
  15.610 -    clear_evtchn(evtchn);
  15.611 +	if (VALID_EVTCHN(evtchn)) {
  15.612 +		mask_evtchn(evtchn);
  15.613 +		clear_evtchn(evtchn);
  15.614 +	}
  15.615  }
  15.616  
  15.617  static void end_dynirq(unsigned int irq)
  15.618  {
  15.619 -    int evtchn = irq_to_evtchn[irq];
  15.620 +	int evtchn = irq_to_evtchn[irq];
  15.621  
  15.622 -    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
  15.623 -        unmask_evtchn(evtchn);
  15.624 +	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
  15.625 +		unmask_evtchn(evtchn);
  15.626  }
  15.627  
  15.628  static struct hw_interrupt_type dynirq_type = {
  15.629 -    "Dynamic-irq",
  15.630 -    startup_dynirq,
  15.631 -    shutdown_dynirq,
  15.632 -    enable_dynirq,
  15.633 -    disable_dynirq,
  15.634 -    ack_dynirq,
  15.635 -    end_dynirq,
  15.636 -    set_affinity_irq
  15.637 +	"Dynamic-irq",
  15.638 +	startup_dynirq,
  15.639 +	shutdown_dynirq,
  15.640 +	enable_dynirq,
  15.641 +	disable_dynirq,
  15.642 +	ack_dynirq,
  15.643 +	end_dynirq,
  15.644 +	set_affinity_irq
  15.645  };
  15.646  
  15.647  static inline void pirq_unmask_notify(int pirq)
  15.648  {
  15.649 -    physdev_op_t op;
  15.650 -    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
  15.651 -    {
  15.652 -        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
  15.653 -        (void)HYPERVISOR_physdev_op(&op);
  15.654 -    }
  15.655 +	physdev_op_t op;
  15.656 +	if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
  15.657 +		op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
  15.658 +		(void)HYPERVISOR_physdev_op(&op);
  15.659 +	}
  15.660  }
  15.661  
  15.662  static inline void pirq_query_unmask(int pirq)
  15.663  {
  15.664 -    physdev_op_t op;
  15.665 -    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
  15.666 -    op.u.irq_status_query.irq = pirq;
  15.667 -    (void)HYPERVISOR_physdev_op(&op);
  15.668 -    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
  15.669 -    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
  15.670 -        set_bit(pirq, &pirq_needs_unmask_notify[0]);
  15.671 +	physdev_op_t op;
  15.672 +	op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
  15.673 +	op.u.irq_status_query.irq = pirq;
  15.674 +	(void)HYPERVISOR_physdev_op(&op);
  15.675 +	clear_bit(pirq, &pirq_needs_unmask_notify[0]);
  15.676 +	if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
  15.677 +		set_bit(pirq, &pirq_needs_unmask_notify[0]);
  15.678  }
  15.679  
  15.680  /*
  15.681 @@ -517,218 +516,252 @@ static inline void pirq_query_unmask(int
  15.682  
  15.683  static unsigned int startup_pirq(unsigned int irq)
  15.684  {
  15.685 -    evtchn_op_t op;
  15.686 -    int evtchn;
  15.687 +	evtchn_op_t op;
  15.688 +	int evtchn;
  15.689  
  15.690 -    op.cmd               = EVTCHNOP_bind_pirq;
  15.691 -    op.u.bind_pirq.pirq  = irq;
  15.692 -    /* NB. We are happy to share unless we are probing. */
  15.693 -    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
  15.694 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
  15.695 -    {
  15.696 -        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
  15.697 -            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
  15.698 -        return 0;
  15.699 -    }
  15.700 -    evtchn = op.u.bind_pirq.port;
  15.701 +	op.cmd               = EVTCHNOP_bind_pirq;
  15.702 +	op.u.bind_pirq.pirq  = irq;
  15.703 +	/* NB. We are happy to share unless we are probing. */
  15.704 +	op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
  15.705 +	if (HYPERVISOR_event_channel_op(&op) != 0) {
  15.706 +		if ( !probing_irq(irq) )
  15.707 +			printk(KERN_INFO "Failed to obtain physical "
  15.708 +			       "IRQ %d\n", irq);
  15.709 +		return 0;
  15.710 +	}
  15.711 +	evtchn = op.u.bind_pirq.port;
  15.712  
  15.713 -    pirq_query_unmask(irq_to_pirq(irq));
  15.714 +	pirq_query_unmask(irq_to_pirq(irq));
  15.715  
  15.716 -    bind_evtchn_to_cpu(evtchn, 0);
  15.717 -    evtchn_to_irq[evtchn] = irq;
  15.718 -    irq_to_evtchn[irq]    = evtchn;
  15.719 +	bind_evtchn_to_cpu(evtchn, 0);
  15.720 +	evtchn_to_irq[evtchn] = irq;
  15.721 +	irq_to_evtchn[irq]    = evtchn;
  15.722  
  15.723 -    unmask_evtchn(evtchn);
  15.724 -    pirq_unmask_notify(irq_to_pirq(irq));
  15.725 +	unmask_evtchn(evtchn);
  15.726 +	pirq_unmask_notify(irq_to_pirq(irq));
  15.727  
  15.728 -    return 0;
  15.729 +	return 0;
  15.730  }
  15.731  
  15.732  static void shutdown_pirq(unsigned int irq)
  15.733  {
  15.734 -    evtchn_op_t op;
  15.735 -    int evtchn = irq_to_evtchn[irq];
  15.736 +	evtchn_op_t op;
  15.737 +	int evtchn = irq_to_evtchn[irq];
  15.738  
  15.739 -    if ( !VALID_EVTCHN(evtchn) )
  15.740 -        return;
  15.741 +	if (!VALID_EVTCHN(evtchn))
  15.742 +		return;
  15.743  
  15.744 -    mask_evtchn(evtchn);
  15.745 +	mask_evtchn(evtchn);
  15.746  
  15.747 -    op.cmd          = EVTCHNOP_close;
  15.748 -    op.u.close.dom  = DOMID_SELF;
  15.749 -    op.u.close.port = evtchn;
  15.750 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
  15.751 -        panic("Failed to unbind physical IRQ %d\n", irq);
  15.752 +	op.cmd          = EVTCHNOP_close;
  15.753 +	op.u.close.dom  = DOMID_SELF;
  15.754 +	op.u.close.port = evtchn;
  15.755 +	BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
  15.756  
  15.757 -    bind_evtchn_to_cpu(evtchn, 0);
  15.758 -    evtchn_to_irq[evtchn] = -1;
  15.759 -    irq_to_evtchn[irq]    = -1;
  15.760 +	bind_evtchn_to_cpu(evtchn, 0);
  15.761 +	evtchn_to_irq[evtchn] = -1;
  15.762 +	irq_to_evtchn[irq]    = -1;
  15.763  }
  15.764  
  15.765  static void enable_pirq(unsigned int irq)
  15.766  {
  15.767 -    int evtchn = irq_to_evtchn[irq];
  15.768 -    if ( !VALID_EVTCHN(evtchn) )
  15.769 -        return;
  15.770 -    unmask_evtchn(evtchn);
  15.771 -    pirq_unmask_notify(irq_to_pirq(irq));
  15.772 +	int evtchn = irq_to_evtchn[irq];
  15.773 +
  15.774 +	if (VALID_EVTCHN(evtchn)) {
  15.775 +		unmask_evtchn(evtchn);
  15.776 +		pirq_unmask_notify(irq_to_pirq(irq));
  15.777 +	}
  15.778  }
  15.779  
  15.780  static void disable_pirq(unsigned int irq)
  15.781  {
  15.782 -    int evtchn = irq_to_evtchn[irq];
  15.783 -    if ( !VALID_EVTCHN(evtchn) )
  15.784 -        return;
  15.785 -    mask_evtchn(evtchn);
  15.786 +	int evtchn = irq_to_evtchn[irq];
  15.787 +
  15.788 +	if (VALID_EVTCHN(evtchn))
  15.789 +		mask_evtchn(evtchn);
  15.790  }
  15.791  
  15.792  static void ack_pirq(unsigned int irq)
  15.793  {
  15.794 -    int evtchn = irq_to_evtchn[irq];
  15.795 -    if ( !VALID_EVTCHN(evtchn) )
  15.796 -        return;
  15.797 -    mask_evtchn(evtchn);
  15.798 -    clear_evtchn(evtchn);
  15.799 +	int evtchn = irq_to_evtchn[irq];
  15.800 +
  15.801 +	if (VALID_EVTCHN(evtchn)) {
  15.802 +		mask_evtchn(evtchn);
  15.803 +		clear_evtchn(evtchn);
  15.804 +	}
  15.805  }
  15.806  
  15.807  static void end_pirq(unsigned int irq)
  15.808  {
  15.809 -    int evtchn = irq_to_evtchn[irq];
  15.810 -    if ( !VALID_EVTCHN(evtchn) )
  15.811 -        return;
  15.812 -    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
  15.813 -    {
  15.814 -        unmask_evtchn(evtchn);
  15.815 -        pirq_unmask_notify(irq_to_pirq(irq));
  15.816 -    }
  15.817 +	int evtchn = irq_to_evtchn[irq];
  15.818 +
  15.819 +	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
  15.820 +		unmask_evtchn(evtchn);
  15.821 +		pirq_unmask_notify(irq_to_pirq(irq));
  15.822 +	}
  15.823  }
  15.824  
  15.825  static struct hw_interrupt_type pirq_type = {
  15.826 -    "Phys-irq",
  15.827 -    startup_pirq,
  15.828 -    shutdown_pirq,
  15.829 -    enable_pirq,
  15.830 -    disable_pirq,
  15.831 -    ack_pirq,
  15.832 -    end_pirq,
  15.833 -    set_affinity_irq
  15.834 +	"Phys-irq",
  15.835 +	startup_pirq,
  15.836 +	shutdown_pirq,
  15.837 +	enable_pirq,
  15.838 +	disable_pirq,
  15.839 +	ack_pirq,
  15.840 +	end_pirq,
  15.841 +	set_affinity_irq
  15.842  };
  15.843  
  15.844  void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
  15.845  {
  15.846 -    int evtchn = irq_to_evtchn[i];
  15.847 -    shared_info_t *s = HYPERVISOR_shared_info;
  15.848 -    if ( !VALID_EVTCHN(evtchn) )
  15.849 -        return;
  15.850 -    BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
  15.851 -    synch_set_bit(evtchn, &s->evtchn_pending[0]);
  15.852 +	int evtchn = irq_to_evtchn[i];
  15.853 +	shared_info_t *s = HYPERVISOR_shared_info;
  15.854 +	if (!VALID_EVTCHN(evtchn))
  15.855 +		return;
  15.856 +	BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
  15.857 +	synch_set_bit(evtchn, &s->evtchn_pending[0]);
  15.858  }
  15.859  
  15.860 -void irq_suspend(void)
  15.861 +void notify_remote_via_irq(int irq)
  15.862  {
  15.863 -    int pirq, virq, irq, evtchn;
  15.864 -    int cpu = smp_processor_id(); /* XXX */
  15.865 +	int evtchn = irq_to_evtchn[irq];
  15.866  
  15.867 -    /* Unbind VIRQs from event channels. */
  15.868 -    for ( virq = 0; virq < NR_VIRQS; virq++ )
  15.869 -    {
  15.870 -        if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
  15.871 -            continue;
  15.872 -        evtchn = irq_to_evtchn[irq];
  15.873 -
  15.874 -        /* Mark the event channel as unused in our table. */
  15.875 -        evtchn_to_irq[evtchn] = -1;
  15.876 -        irq_to_evtchn[irq]    = -1;
  15.877 -    }
  15.878 -
  15.879 -    /* Check that no PIRQs are still bound. */
  15.880 -    for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
  15.881 -        if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
  15.882 -            panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
  15.883 -                  pirq, evtchn);
  15.884 +	if (VALID_EVTCHN(evtchn))
  15.885 +		notify_remote_via_evtchn(evtchn);
  15.886  }
  15.887  
  15.888  void irq_resume(void)
  15.889  {
  15.890 -    evtchn_op_t op;
  15.891 -    int         virq, irq, evtchn;
  15.892 -    int cpu = smp_processor_id(); /* XXX */
  15.893 +	evtchn_op_t op;
  15.894 +	int         cpu, pirq, virq, ipi, irq, evtchn;
  15.895 +
  15.896 +	init_evtchn_cpu_bindings();
  15.897 +
  15.898 +	/* New event-channel space is not 'live' yet. */
  15.899 +	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  15.900 +		mask_evtchn(evtchn);
  15.901 +
  15.902 +	/* Check that no PIRQs are still bound. */
  15.903 +	for (pirq = 0; pirq < NR_PIRQS; pirq++)
  15.904 +		BUG_ON(irq_to_evtchn[pirq_to_irq(pirq)] != -1);
  15.905  
  15.906 -    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
  15.907 -        mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
  15.908 +	/* Secondary CPUs must have no VIRQ or IPI bindings. */
  15.909 +	for (cpu = 1; cpu < NR_CPUS; cpu++) {
  15.910 +		for (virq = 0; virq < NR_VIRQS; virq++)
  15.911 +			BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
  15.912 +		for (ipi = 0; ipi < NR_IPIS; ipi++)
  15.913 +			BUG_ON(per_cpu(ipi_to_evtchn, cpu)[ipi] != -1);
  15.914 +	}
  15.915  
  15.916 -    for ( virq = 0; virq < NR_VIRQS; virq++ )
  15.917 -    {
  15.918 -        if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
  15.919 -            continue;
  15.920 +	/* No IRQ -> event-channel mappings. */
  15.921 +	for (irq = 0; irq < NR_IRQS; irq++)
  15.922 +		irq_to_evtchn[irq] = -1;
  15.923 +
  15.924 +	/* Primary CPU: rebind VIRQs automatically. */
  15.925 +	for (virq = 0; virq < NR_VIRQS; virq++) {
  15.926 +		if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
  15.927 +			continue;
  15.928  
  15.929 -        /* Get a new binding from Xen. */
  15.930 -        op.cmd              = EVTCHNOP_bind_virq;
  15.931 -        op.u.bind_virq.virq = virq;
  15.932 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
  15.933 -            panic("Failed to bind virtual IRQ %d\n", virq);
  15.934 -        evtchn = op.u.bind_virq.port;
  15.935 +		/* Get a new binding from Xen. */
  15.936 +		op.cmd              = EVTCHNOP_bind_virq;
  15.937 +		op.u.bind_virq.virq = virq;
  15.938 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
  15.939 +		evtchn = op.u.bind_virq.port;
  15.940          
  15.941 -        /* Record the new mapping. */
  15.942 -        bind_evtchn_to_cpu(evtchn, 0);
  15.943 -        evtchn_to_irq[evtchn] = irq;
  15.944 -        irq_to_evtchn[irq]    = evtchn;
  15.945 +		/* Record the new mapping. */
  15.946 +		evtchn_to_irq[evtchn] = irq;
  15.947 +		irq_to_evtchn[irq]    = evtchn;
  15.948 +
  15.949 +		/* Ready for use. */
  15.950 +		unmask_evtchn(evtchn);
  15.951 +	}
  15.952 +
  15.953 +	/* Primary CPU: rebind IPIs automatically. */
  15.954 +	for (ipi = 0; ipi < NR_IPIS; ipi++) {
  15.955 +		if ((evtchn = per_cpu(ipi_to_evtchn, 0)[ipi]) == -1)
  15.956 +			continue;
  15.957 +
  15.958 +		irq = evtchn_to_irq[evtchn];
  15.959 +		evtchn_to_irq[evtchn] = -1;
  15.960  
  15.961 -        /* Ready for use. */
  15.962 -        unmask_evtchn(evtchn);
  15.963 -    }
  15.964 +		/* Get a new binding from Xen. */
  15.965 +		op.cmd = EVTCHNOP_bind_ipi;
  15.966 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
  15.967 +		evtchn = op.u.bind_ipi.port;
  15.968 +        
  15.969 +		/* Record the new mapping. */
  15.970 +		evtchn_to_irq[evtchn] = irq;
  15.971 +		irq_to_evtchn[irq]    = evtchn;
  15.972 +
  15.973 +		/* Ready for use. */
  15.974 +		unmask_evtchn(evtchn);
  15.975 +	}
  15.976 +
  15.977 +	/* Remove defunct event-channel -> IRQ mappings. */
  15.978 +	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) {
  15.979 +		if ((evtchn_to_irq[evtchn] != -1) &&
  15.980 +		    (irq_to_evtchn[evtchn_to_irq[evtchn]] == -1))
  15.981 +			evtchn_to_irq[evtchn] = -1;
  15.982 +	}
  15.983  }
  15.984  
  15.985  void __init init_IRQ(void)
  15.986  {
  15.987 -    int i;
  15.988 -    int cpu;
  15.989 -
  15.990 -    irq_ctx_init(0);
  15.991 +	int i;
  15.992 +	int cpu;
  15.993  
  15.994 -    spin_lock_init(&irq_mapping_update_lock);
  15.995 +	irq_ctx_init(0);
  15.996  
  15.997 -#ifdef CONFIG_SMP
  15.998 -    /* By default all event channels notify CPU#0. */
  15.999 -    memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
 15.1000 -#endif
 15.1001 +	spin_lock_init(&irq_mapping_update_lock);
 15.1002 +
 15.1003 +	init_evtchn_cpu_bindings();
 15.1004  
 15.1005 -    for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
 15.1006 -        /* No VIRQ -> IRQ mappings. */
 15.1007 -        for ( i = 0; i < NR_VIRQS; i++ )
 15.1008 -            per_cpu(virq_to_irq, cpu)[i] = -1;
 15.1009 -    }
 15.1010 +	/* No VIRQ or IPI bindings. */
 15.1011 +	for (cpu = 0; cpu < NR_CPUS; cpu++) {
 15.1012 +		for (i = 0; i < NR_VIRQS; i++)
 15.1013 +			per_cpu(virq_to_irq, cpu)[i] = -1;
 15.1014 +		for (i = 0; i < NR_IPIS; i++)
 15.1015 +			per_cpu(ipi_to_evtchn, cpu)[i] = -1;
 15.1016 +	}
 15.1017  
 15.1018 -    /* No event-channel -> IRQ mappings. */
 15.1019 -    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
 15.1020 -    {
 15.1021 -        evtchn_to_irq[i] = -1;
 15.1022 -        mask_evtchn(i); /* No event channels are 'live' right now. */
 15.1023 -    }
 15.1024 +	/* No event-channel -> IRQ mappings. */
 15.1025 +	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
 15.1026 +		evtchn_to_irq[i] = -1;
 15.1027 +		mask_evtchn(i); /* No event channels are 'live' right now. */
 15.1028 +	}
 15.1029 +
 15.1030 +	/* No IRQ -> event-channel mappings. */
 15.1031 +	for (i = 0; i < NR_IRQS; i++)
 15.1032 +		irq_to_evtchn[i] = -1;
 15.1033  
 15.1034 -    /* No IRQ -> event-channel mappings. */
 15.1035 -    for ( i = 0; i < NR_IRQS; i++ )
 15.1036 -        irq_to_evtchn[i] = -1;
 15.1037 +	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
 15.1038 +	for (i = 0; i < NR_DYNIRQS; i++) {
 15.1039 +		irq_bindcount[dynirq_to_irq(i)] = 0;
 15.1040  
 15.1041 -    for ( i = 0; i < NR_DYNIRQS; i++ )
 15.1042 -    {
 15.1043 -        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
 15.1044 -        irq_bindcount[dynirq_to_irq(i)] = 0;
 15.1045 +		irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
 15.1046 +		irq_desc[dynirq_to_irq(i)].action  = 0;
 15.1047 +		irq_desc[dynirq_to_irq(i)].depth   = 1;
 15.1048 +		irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
 15.1049 +	}
 15.1050 +
 15.1051 +	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
 15.1052 +	for (i = 0; i < NR_PIRQS; i++)
 15.1053 +	{
 15.1054 +		irq_bindcount[pirq_to_irq(i)] = 1;
 15.1055  
 15.1056 -        irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
 15.1057 -        irq_desc[dynirq_to_irq(i)].action  = 0;
 15.1058 -        irq_desc[dynirq_to_irq(i)].depth   = 1;
 15.1059 -        irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
 15.1060 -    }
 15.1061 +		irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
 15.1062 +		irq_desc[pirq_to_irq(i)].action  = 0;
 15.1063 +		irq_desc[pirq_to_irq(i)].depth   = 1;
 15.1064 +		irq_desc[pirq_to_irq(i)].handler = &pirq_type;
 15.1065 +	}
 15.1066 +}
 15.1067  
 15.1068 -    for ( i = 0; i < NR_PIRQS; i++ )
 15.1069 -    {
 15.1070 -        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
 15.1071 -        irq_bindcount[pirq_to_irq(i)] = 1;
 15.1072 -
 15.1073 -        irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
 15.1074 -        irq_desc[pirq_to_irq(i)].action  = 0;
 15.1075 -        irq_desc[pirq_to_irq(i)].depth   = 1;
 15.1076 -        irq_desc[pirq_to_irq(i)].handler = &pirq_type;
 15.1077 -    }
 15.1078 -}
 15.1079 +/*
 15.1080 + * Local variables:
 15.1081 + *  c-file-style: "linux"
 15.1082 + *  indent-tabs-mode: t
 15.1083 + *  c-indent-level: 8
 15.1084 + *  c-basic-offset: 8
 15.1085 + *  tab-width: 8
 15.1086 + * End:
 15.1087 + */
    16.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/fixup.c	Thu Sep 29 16:22:02 2005 -0600
    16.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/fixup.c	Thu Sep 29 17:28:44 2005 -0600
    16.3 @@ -37,51 +37,57 @@
    16.4  
    16.5  #define DP(_f, _args...) printk(KERN_ALERT "  " _f "\n" , ## _args )
    16.6  
    16.7 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
    16.8 -#define __LINKAGE fastcall
    16.9 -#else
   16.10 -#define __LINKAGE asmlinkage
   16.11 -#endif
   16.12 -
   16.13 -__LINKAGE void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
   16.14 +fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
   16.15  {
   16.16 -    static unsigned long printed = 0;
   16.17 -    char info[100];
   16.18 -    int i;
   16.19 +	static unsigned long printed = 0;
   16.20 +	char info[100];
   16.21 +	int i;
   16.22  
   16.23 -    if ( !test_and_set_bit(0, &printed) )
   16.24 -    {
   16.25 -        HYPERVISOR_vm_assist(VMASST_CMD_disable,
   16.26 -			     VMASST_TYPE_4gb_segments_notify);
   16.27 +	if (test_and_set_bit(0, &printed))
   16.28 +		return;
   16.29  
   16.30 -        sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
   16.31 +	HYPERVISOR_vm_assist(
   16.32 +		VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
   16.33 +
   16.34 +	sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
   16.35 +
   16.36  
   16.37 -        DP("");
   16.38 -        DP("***************************************************************");
   16.39 -        DP("***************************************************************");
   16.40 -        DP("** WARNING: Currently emulating unsupported memory accesses  **");
   16.41 -        DP("**          in /lib/tls libraries. The emulation is very     **");
   16.42 -        DP("**          slow. To ensure full performance you should      **");
   16.43 -        DP("**          execute the following as root:                   **");
   16.44 -        DP("**          mv /lib/tls /lib/tls.disabled                    **");
   16.45 -        DP("** Offending process: %-38.38s **", info);
   16.46 -        DP("***************************************************************");
   16.47 -        DP("***************************************************************");
   16.48 -        DP("");
   16.49 +	DP("");
   16.50 +	DP("***************************************************************");
   16.51 +	DP("***************************************************************");
   16.52 +	DP("** WARNING: Currently emulating unsupported memory accesses  **");
   16.53 +	DP("**          in /lib/tls libraries. The emulation is very     **");
   16.54 +	DP("**          slow. To ensure full performance you should      **");
   16.55 +	DP("**          execute the following as root:                   **");
   16.56 +	DP("**          mv /lib/tls /lib/tls.disabled                    **");
   16.57 +	DP("** Offending process: %-38.38s **", info);
   16.58 +	DP("***************************************************************");
   16.59 +	DP("***************************************************************");
   16.60 +	DP("");
   16.61  
   16.62 -        for ( i = 5; i > 0; i-- )
   16.63 -        {
   16.64 -            printk("Pausing... %d", i);
   16.65 -            mdelay(1000);
   16.66 -            printk("\b\b\b\b\b\b\b\b\b\b\b\b");
   16.67 -        }
   16.68 -        printk("Continuing...\n\n");
   16.69 -    }
   16.70 +	for (i = 5; i > 0; i--) {
   16.71 +		printk("Pausing... %d", i);
   16.72 +		mdelay(1000);
   16.73 +		printk("\b\b\b\b\b\b\b\b\b\b\b\b");
   16.74 +	}
   16.75 +
   16.76 +	printk("Continuing...\n\n");
   16.77  }
   16.78  
   16.79  static int __init fixup_init(void)
   16.80  {
   16.81 -    HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
   16.82 -    return 0;
   16.83 +	HYPERVISOR_vm_assist(
   16.84 +		VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
   16.85 +	return 0;
   16.86  }
   16.87  __initcall(fixup_init);
   16.88 +
   16.89 +/*
   16.90 + * Local variables:
   16.91 + *  c-file-style: "linux"
   16.92 + *  indent-tabs-mode: t
   16.93 + *  c-indent-level: 8
   16.94 + *  c-basic-offset: 8
   16.95 + *  tab-width: 8
   16.96 + * End:
   16.97 + */
    17.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c	Thu Sep 29 16:22:02 2005 -0600
    17.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/gnttab.c	Thu Sep 29 17:28:44 2005 -0600
    17.3 @@ -1,13 +1,10 @@
    17.4  /******************************************************************************
    17.5   * gnttab.c
    17.6   * 
    17.7 - * Two sets of functionality:
    17.8 - * 1. Granting foreign access to our memory reservation.
    17.9 - * 2. Accessing others' memory reservations via grant references.
   17.10 - * (i.e., mechanisms for both sender and recipient of grant references)
   17.11 + * Granting foreign access to our memory reservation.
   17.12   * 
   17.13   * Copyright (c) 2005, Christopher Clark
   17.14 - * Copyright (c) 2004, K A Fraser
   17.15 + * Copyright (c) 2004-2005, K A Fraser
   17.16   */
   17.17  
   17.18  #include <linux/config.h>
   17.19 @@ -23,15 +20,15 @@
   17.20  #include <asm/synch_bitops.h>
   17.21  
   17.22  #if 1
   17.23 -#define ASSERT(_p) \
   17.24 -    if ( !(_p) ) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
   17.25 -    #_p , __LINE__, __FILE__); *(int*)0=0; }
   17.26 +#define ASSERT(_p)							      \
   17.27 +	if ( !(_p) ) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
   17.28 +	#_p , __LINE__, __FILE__); *(int*)0=0; }
   17.29  #else
   17.30  #define ASSERT(_p) ((void)0)
   17.31  #endif
   17.32  
   17.33 -#define WPRINTK(fmt, args...) \
   17.34 -    printk(KERN_WARNING "xen_grant: " fmt, ##args)
   17.35 +#define WPRINTK(fmt, args...)				\
   17.36 +	printk(KERN_WARNING "xen_grant: " fmt, ##args)
   17.37  
   17.38  
   17.39  EXPORT_SYMBOL(gnttab_grant_foreign_access);
   17.40 @@ -49,11 +46,14 @@ EXPORT_SYMBOL(gnttab_release_grant_refer
   17.41  EXPORT_SYMBOL(gnttab_grant_foreign_access_ref);
   17.42  EXPORT_SYMBOL(gnttab_grant_foreign_transfer_ref);
   17.43  
   17.44 +/* External tools reserve first few grant table entries. */
   17.45 +#define NR_RESERVED_ENTRIES 8
   17.46 +
   17.47  #define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t))
   17.48  #define GNTTAB_LIST_END (NR_GRANT_ENTRIES + 1)
   17.49  
   17.50  static grant_ref_t gnttab_list[NR_GRANT_ENTRIES];
   17.51 -static int gnttab_free_count = NR_GRANT_ENTRIES;
   17.52 +static int gnttab_free_count;
   17.53  static grant_ref_t gnttab_free_head;
   17.54  static spinlock_t gnttab_list_lock = SPIN_LOCK_UNLOCKED;
   17.55  
   17.56 @@ -64,22 +64,22 @@ static struct gnttab_free_callback *gntt
   17.57  static int
   17.58  get_free_entries(int count)
   17.59  {
   17.60 -    unsigned long flags;
   17.61 -    int ref;
   17.62 -    grant_ref_t head;
   17.63 -    spin_lock_irqsave(&gnttab_list_lock, flags);
   17.64 -    if (gnttab_free_count < count) {
   17.65 +	unsigned long flags;
   17.66 +	int ref;
   17.67 +	grant_ref_t head;
   17.68 +	spin_lock_irqsave(&gnttab_list_lock, flags);
   17.69 +	if (gnttab_free_count < count) {
   17.70 +		spin_unlock_irqrestore(&gnttab_list_lock, flags);
   17.71 +		return -1;
   17.72 +	}
   17.73 +	ref = head = gnttab_free_head;
   17.74 +	gnttab_free_count -= count;
   17.75 +	while (count-- > 1)
   17.76 +		head = gnttab_list[head];
   17.77 +	gnttab_free_head = gnttab_list[head];
   17.78 +	gnttab_list[head] = GNTTAB_LIST_END;
   17.79  	spin_unlock_irqrestore(&gnttab_list_lock, flags);
   17.80 -	return -1;
   17.81 -    }
   17.82 -    ref = head = gnttab_free_head;
   17.83 -    gnttab_free_count -= count;
   17.84 -    while (count-- > 1)
   17.85 -	head = gnttab_list[head];
   17.86 -    gnttab_free_head = gnttab_list[head];
   17.87 -    gnttab_list[head] = GNTTAB_LIST_END;
   17.88 -    spin_unlock_irqrestore(&gnttab_list_lock, flags);
   17.89 -    return ref;
   17.90 +	return ref;
   17.91  }
   17.92  
   17.93  #define get_free_entry() get_free_entries(1)
   17.94 @@ -87,38 +87,41 @@ get_free_entries(int count)
   17.95  static void
   17.96  do_free_callbacks(void)
   17.97  {
   17.98 -    struct gnttab_free_callback *callback = gnttab_free_callback_list, *next;
   17.99 -    gnttab_free_callback_list = NULL;
  17.100 -    while (callback) {
  17.101 -	next = callback->next;
  17.102 -	if (gnttab_free_count >= callback->count) {
  17.103 -	    callback->next = NULL;
  17.104 -	    callback->fn(callback->arg);
  17.105 -	} else {
  17.106 -	    callback->next = gnttab_free_callback_list;
  17.107 -	    gnttab_free_callback_list = callback;
  17.108 +	struct gnttab_free_callback *callback, *next;
  17.109 +
  17.110 +	callback = gnttab_free_callback_list;
  17.111 +	gnttab_free_callback_list = NULL;
  17.112 +
  17.113 +	while (callback != NULL) {
  17.114 +		next = callback->next;
  17.115 +		if (gnttab_free_count >= callback->count) {
  17.116 +			callback->next = NULL;
  17.117 +			callback->fn(callback->arg);
  17.118 +		} else {
  17.119 +			callback->next = gnttab_free_callback_list;
  17.120 +			gnttab_free_callback_list = callback;
  17.121 +		}
  17.122 +		callback = next;
  17.123  	}
  17.124 -	callback = next;
  17.125 -    }
  17.126  }
  17.127  
  17.128  static inline void
  17.129  check_free_callbacks(void)
  17.130  {
  17.131 -    if (unlikely(gnttab_free_callback_list))
  17.132 -	do_free_callbacks();
  17.133 +	if (unlikely(gnttab_free_callback_list))
  17.134 +		do_free_callbacks();
  17.135  }
  17.136  
  17.137  static void
  17.138  put_free_entry(grant_ref_t ref)
  17.139  {
  17.140 -    unsigned long flags;
  17.141 -    spin_lock_irqsave(&gnttab_list_lock, flags);
  17.142 -    gnttab_list[ref] = gnttab_free_head;
  17.143 -    gnttab_free_head = ref;
  17.144 -    gnttab_free_count++;
  17.145 -    check_free_callbacks();
  17.146 -    spin_unlock_irqrestore(&gnttab_list_lock, flags);
  17.147 +	unsigned long flags;
  17.148 +	spin_lock_irqsave(&gnttab_list_lock, flags);
  17.149 +	gnttab_list[ref] = gnttab_free_head;
  17.150 +	gnttab_free_head = ref;
  17.151 +	gnttab_free_count++;
  17.152 +	check_free_callbacks();
  17.153 +	spin_unlock_irqrestore(&gnttab_list_lock, flags);
  17.154  }
  17.155  
  17.156  /*
  17.157 @@ -128,187 +131,189 @@ put_free_entry(grant_ref_t ref)
  17.158  int
  17.159  gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly)
  17.160  {
  17.161 -    int ref;
  17.162 +	int ref;
  17.163      
  17.164 -    if ( unlikely((ref = get_free_entry()) == -1) )
  17.165 -        return -ENOSPC;
  17.166 +	if (unlikely((ref = get_free_entry()) == -1))
  17.167 +		return -ENOSPC;
  17.168  
  17.169 -    shared[ref].frame = frame;
  17.170 -    shared[ref].domid = domid;
  17.171 -    wmb();
  17.172 -    shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
  17.173 +	shared[ref].frame = frame;
  17.174 +	shared[ref].domid = domid;
  17.175 +	wmb();
  17.176 +	shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
  17.177  
  17.178 -    return ref;
  17.179 +	return ref;
  17.180  }
  17.181  
  17.182  void
  17.183  gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
  17.184  				unsigned long frame, int readonly)
  17.185  {
  17.186 -    shared[ref].frame = frame;
  17.187 -    shared[ref].domid = domid;
  17.188 -    wmb();
  17.189 -    shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
  17.190 +	shared[ref].frame = frame;
  17.191 +	shared[ref].domid = domid;
  17.192 +	wmb();
  17.193 +	shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
  17.194  }
  17.195  
  17.196  
  17.197  int
  17.198  gnttab_query_foreign_access(grant_ref_t ref)
  17.199  {
  17.200 -    u16 nflags;
  17.201 +	u16 nflags;
  17.202  
  17.203 -    nflags = shared[ref].flags;
  17.204 +	nflags = shared[ref].flags;
  17.205  
  17.206 -    return ( nflags & (GTF_reading|GTF_writing) );
  17.207 +	return (nflags & (GTF_reading|GTF_writing));
  17.208  }
  17.209  
  17.210  void
  17.211  gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
  17.212  {
  17.213 -    u16 flags, nflags;
  17.214 +	u16 flags, nflags;
  17.215  
  17.216 -    nflags = shared[ref].flags;
  17.217 -    do {
  17.218 -        if ( (flags = nflags) & (GTF_reading|GTF_writing) )
  17.219 -            printk(KERN_ALERT "WARNING: g.e. still in use!\n");
  17.220 -    }
  17.221 -    while ( (nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) != flags );
  17.222 +	nflags = shared[ref].flags;
  17.223 +	do {
  17.224 +		if ( (flags = nflags) & (GTF_reading|GTF_writing) )
  17.225 +			printk(KERN_ALERT "WARNING: g.e. still in use!\n");
  17.226 +	}
  17.227 +	while ((nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) !=
  17.228 +	       flags);
  17.229  }
  17.230  
  17.231  void
  17.232  gnttab_end_foreign_access(grant_ref_t ref, int readonly)
  17.233  {
  17.234 -    gnttab_end_foreign_access_ref(ref, readonly);
  17.235 -    put_free_entry(ref);
  17.236 +	gnttab_end_foreign_access_ref(ref, readonly);
  17.237 +	put_free_entry(ref);
  17.238  }
  17.239  
  17.240  int
  17.241  gnttab_grant_foreign_transfer(domid_t domid)
  17.242  {
  17.243 -    int ref;
  17.244 +	int ref;
  17.245  
  17.246 -    if ( unlikely((ref = get_free_entry()) == -1) )
  17.247 -        return -ENOSPC;
  17.248 +	if (unlikely((ref = get_free_entry()) == -1))
  17.249 +		return -ENOSPC;
  17.250  
  17.251 -    shared[ref].frame = 0;
  17.252 -    shared[ref].domid = domid;
  17.253 -    wmb();
  17.254 -    shared[ref].flags = GTF_accept_transfer;
  17.255 +	shared[ref].frame = 0;
  17.256 +	shared[ref].domid = domid;
  17.257 +	wmb();
  17.258 +	shared[ref].flags = GTF_accept_transfer;
  17.259  
  17.260 -    return ref;
  17.261 +	return ref;
  17.262  }
  17.263  
  17.264  void
  17.265  gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid)
  17.266  {
  17.267 -    shared[ref].frame = 0;
  17.268 -    shared[ref].domid = domid;
  17.269 -    wmb();
  17.270 -    shared[ref].flags = GTF_accept_transfer;
  17.271 +	shared[ref].frame = 0;
  17.272 +	shared[ref].domid = domid;
  17.273 +	wmb();
  17.274 +	shared[ref].flags = GTF_accept_transfer;
  17.275  }
  17.276  
  17.277  unsigned long
  17.278  gnttab_end_foreign_transfer_ref(grant_ref_t ref)
  17.279  {
  17.280 -    unsigned long frame = 0;
  17.281 -    u16           flags;
  17.282 +	unsigned long frame = 0;
  17.283 +	u16           flags;
  17.284  
  17.285 -    flags = shared[ref].flags;
  17.286 +	flags = shared[ref].flags;
  17.287  
  17.288 -    /*
  17.289 -     * If a transfer is committed then wait for the frame address to appear.
  17.290 -     * Otherwise invalidate the grant entry against future use.
  17.291 -     */
  17.292 -    if ( likely(flags != GTF_accept_transfer) ||
  17.293 -         (synch_cmpxchg(&shared[ref].flags, flags, 0) != GTF_accept_transfer) )
  17.294 -        while ( unlikely((frame = shared[ref].frame) == 0) )
  17.295 -            cpu_relax();
  17.296 +	/*
  17.297 +	 * If a transfer is committed then wait for the frame address to
  17.298 +	 * appear. Otherwise invalidate the grant entry against future use.
  17.299 +	 */
  17.300 +	if (likely(flags != GTF_accept_transfer) ||
  17.301 +	    (synch_cmpxchg(&shared[ref].flags, flags, 0) !=
  17.302 +	     GTF_accept_transfer))
  17.303 +		while (unlikely((frame = shared[ref].frame) == 0))
  17.304 +			cpu_relax();
  17.305  
  17.306 -    return frame;
  17.307 +	return frame;
  17.308  }
  17.309  
  17.310  unsigned long
  17.311  gnttab_end_foreign_transfer(grant_ref_t ref)
  17.312  {
  17.313 -    unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
  17.314 -    put_free_entry(ref);
  17.315 -    return frame;
  17.316 +	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
  17.317 +	put_free_entry(ref);
  17.318 +	return frame;
  17.319  }
  17.320  
  17.321  void
  17.322  gnttab_free_grant_reference(grant_ref_t ref)
  17.323  {
  17.324  
  17.325 -    put_free_entry(ref);
  17.326 +	put_free_entry(ref);
  17.327  }
  17.328  
  17.329  void
  17.330  gnttab_free_grant_references(grant_ref_t head)
  17.331  {
  17.332 -    grant_ref_t ref;
  17.333 -    unsigned long flags;
  17.334 -    int count = 1;
  17.335 -    if (head == GNTTAB_LIST_END)
  17.336 -	return;
  17.337 -    spin_lock_irqsave(&gnttab_list_lock, flags);
  17.338 -    ref = head;
  17.339 -    while (gnttab_list[ref] != GNTTAB_LIST_END) {
  17.340 -	ref = gnttab_list[ref];
  17.341 -	count++;
  17.342 -    }
  17.343 -    gnttab_list[ref] = gnttab_free_head;
  17.344 -    gnttab_free_head = head;
  17.345 -    gnttab_free_count += count;
  17.346 -    check_free_callbacks();
  17.347 -    spin_unlock_irqrestore(&gnttab_list_lock, flags);
  17.348 +	grant_ref_t ref;
  17.349 +	unsigned long flags;
  17.350 +	int count = 1;
  17.351 +	if (head == GNTTAB_LIST_END)
  17.352 +		return;
  17.353 +	spin_lock_irqsave(&gnttab_list_lock, flags);
  17.354 +	ref = head;
  17.355 +	while (gnttab_list[ref] != GNTTAB_LIST_END) {
  17.356 +		ref = gnttab_list[ref];
  17.357 +		count++;
  17.358 +	}
  17.359 +	gnttab_list[ref] = gnttab_free_head;
  17.360 +	gnttab_free_head = head;
  17.361 +	gnttab_free_count += count;
  17.362 +	check_free_callbacks();
  17.363 +	spin_unlock_irqrestore(&gnttab_list_lock, flags);
  17.364  }
  17.365  
  17.366  int
  17.367  gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
  17.368  {
  17.369 -    int h = get_free_entries(count);
  17.370 +	int h = get_free_entries(count);
  17.371  
  17.372 -    if (h == -1)
  17.373 -	return -ENOSPC;
  17.374 +	if (h == -1)
  17.375 +		return -ENOSPC;
  17.376  
  17.377 -    *head = h;
  17.378 +	*head = h;
  17.379  
  17.380 -    return 0;
  17.381 +	return 0;
  17.382  }
  17.383  
  17.384  int
  17.385  gnttab_claim_grant_reference(grant_ref_t *private_head)
  17.386  {
  17.387 -    grant_ref_t g = *private_head;
  17.388 -    if (unlikely(g == GNTTAB_LIST_END))
  17.389 -        return -ENOSPC;
  17.390 -    *private_head = gnttab_list[g];
  17.391 -    return g;
  17.392 +	grant_ref_t g = *private_head;
  17.393 +	if (unlikely(g == GNTTAB_LIST_END))
  17.394 +		return -ENOSPC;
  17.395 +	*private_head = gnttab_list[g];
  17.396 +	return g;
  17.397  }
  17.398  
  17.399  void
  17.400  gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t  release)
  17.401  {
  17.402 -    gnttab_list[release] = *private_head;
  17.403 -    *private_head = release;
  17.404 +	gnttab_list[release] = *private_head;
  17.405 +	*private_head = release;
  17.406  }
  17.407  
  17.408  void
  17.409  gnttab_request_free_callback(struct gnttab_free_callback *callback,
  17.410  			     void (*fn)(void *), void *arg, u16 count)
  17.411  {
  17.412 -    unsigned long flags;
  17.413 -    spin_lock_irqsave(&gnttab_list_lock, flags);
  17.414 -    if (callback->next)
  17.415 -	goto out;
  17.416 -    callback->fn = fn;
  17.417 -    callback->arg = arg;
  17.418 -    callback->count = count;
  17.419 -    callback->next = gnttab_free_callback_list;
  17.420 -    gnttab_free_callback_list = callback;
  17.421 -    check_free_callbacks();
  17.422 +	unsigned long flags;
  17.423 +	spin_lock_irqsave(&gnttab_list_lock, flags);
  17.424 +	if (callback->next)
  17.425 +		goto out;
  17.426 +	callback->fn = fn;
  17.427 +	callback->arg = arg;
  17.428 +	callback->count = count;
  17.429 +	callback->next = gnttab_free_callback_list;
  17.430 +	gnttab_free_callback_list = callback;
  17.431 +	check_free_callbacks();
  17.432   out:
  17.433 -    spin_unlock_irqrestore(&gnttab_list_lock, flags);
  17.434 +	spin_unlock_irqrestore(&gnttab_list_lock, flags);
  17.435  }
  17.436  
  17.437  /*
  17.438 @@ -323,79 +328,83 @@ static int
  17.439  grant_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
  17.440  	    unsigned long data)
  17.441  {
  17.442 -    int                     ret;
  17.443 -    privcmd_hypercall_t     hypercall;
  17.444 +	int                     ret;
  17.445 +	privcmd_hypercall_t     hypercall;
  17.446  
  17.447 -    /* XXX Need safety checks here if using for anything other
  17.448 -     *     than debugging */
  17.449 -    return -ENOSYS;
  17.450 +	/*
  17.451 +	 * XXX Need safety checks here if using for anything other
  17.452 +	 *     than debugging.
  17.453 +	 */
  17.454 +	return -ENOSYS;
  17.455  
  17.456 -    if ( cmd != IOCTL_PRIVCMD_HYPERCALL )
  17.457 -        return -ENOSYS;
  17.458 +	if ( cmd != IOCTL_PRIVCMD_HYPERCALL )
  17.459 +		return -ENOSYS;
  17.460  
  17.461 -    if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
  17.462 -        return -EFAULT;
  17.463 +	if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
  17.464 +		return -EFAULT;
  17.465  
  17.466 -    if ( hypercall.op != __HYPERVISOR_grant_table_op )
  17.467 -        return -ENOSYS;
  17.468 +	if ( hypercall.op != __HYPERVISOR_grant_table_op )
  17.469 +		return -ENOSYS;
  17.470  
  17.471 -    /* hypercall-invoking asm taken from privcmd.c */
  17.472 -    __asm__ __volatile__ (
  17.473 -        "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
  17.474 -        "movl  4(%%eax),%%ebx ;"
  17.475 -        "movl  8(%%eax),%%ecx ;"
  17.476 -        "movl 12(%%eax),%%edx ;"
  17.477 -        "movl 16(%%eax),%%esi ;"
  17.478 -        "movl 20(%%eax),%%edi ;"
  17.479 -        "movl   (%%eax),%%eax ;"
  17.480 -        TRAP_INSTR "; "
  17.481 -        "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
  17.482 -        : "=a" (ret) : "0" (&hypercall) : "memory" );
  17.483 +	/* hypercall-invoking asm taken from privcmd.c */
  17.484 +	__asm__ __volatile__ (
  17.485 +		"pushl %%ebx; pushl %%ecx; pushl %%edx; "
  17.486 +		"pushl %%esi; pushl %%edi; "
  17.487 +		"movl  4(%%eax),%%ebx ;"
  17.488 +		"movl  8(%%eax),%%ecx ;"
  17.489 +		"movl 12(%%eax),%%edx ;"
  17.490 +		"movl 16(%%eax),%%esi ;"
  17.491 +		"movl 20(%%eax),%%edi ;"
  17.492 +		"movl   (%%eax),%%eax ;"
  17.493 +		TRAP_INSTR "; "
  17.494 +		"popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
  17.495 +		: "=a" (ret) : "0" (&hypercall) : "memory" );
  17.496  
  17.497 -    return ret;
  17.498 +	return ret;
  17.499  }
  17.500  
  17.501  static struct file_operations grant_file_ops = {
  17.502 -    ioctl:  grant_ioctl,
  17.503 +	ioctl:  grant_ioctl,
  17.504  };
  17.505  
  17.506  static int
  17.507  grant_read(char *page, char **start, off_t off, int count, int *eof,
  17.508  	   void *data)
  17.509  {
  17.510 -    int             len;
  17.511 -    unsigned int    i;
  17.512 -    grant_entry_t  *gt;
  17.513 +	int             len;
  17.514 +	unsigned int    i;
  17.515 +	grant_entry_t  *gt;
  17.516  
  17.517 -    gt = (grant_entry_t *)shared;
  17.518 -    len = 0;
  17.519 +	gt = (grant_entry_t *)shared;
  17.520 +	len = 0;
  17.521  
  17.522 -    for ( i = 0; i < NR_GRANT_ENTRIES; i++ )
  17.523 -        /* TODO: safety catch here until this can handle >PAGE_SIZE output */
  17.524 -        if (len > (PAGE_SIZE - 200))
  17.525 -        {
  17.526 -            len += sprintf( page + len, "Truncated.\n");
  17.527 -            break;
  17.528 -        }
  17.529 +	for (i = 0; i < NR_GRANT_ENTRIES; i++) {
  17.530 +		if (len > (PAGE_SIZE - 200)) {
  17.531 +			len += sprintf( page + len, "Truncated.\n");
  17.532 +			break;
  17.533 +		}
  17.534 +	}
  17.535  
  17.536 -        if ( gt[i].flags )
  17.537 -            len += sprintf( page + len,
  17.538 -                    "Grant: ref (0x%x) flags (0x%hx) dom (0x%hx) frame (0x%x)\n", 
  17.539 -                    i,
  17.540 -                    gt[i].flags,
  17.541 -                    gt[i].domid,
  17.542 -                    gt[i].frame );
  17.543 +	if (gt[i].flags) {
  17.544 +		len += sprintf(page + len,
  17.545 +			       "Grant: ref (0x%x) flags (0x%hx) "
  17.546 +			       "dom (0x%hx) frame (0x%x)\n", 
  17.547 +			       i,
  17.548 +			       gt[i].flags,
  17.549 +			       gt[i].domid,
  17.550 +			       gt[i].frame );
  17.551 +	}
  17.552  
  17.553 -    *eof = 1;
  17.554 -    return len;
  17.555 +	*eof = 1;
  17.556 +	return len;
  17.557  }
  17.558  
  17.559  static int
  17.560  grant_write(struct file *file, const char __user *buffer, unsigned long count,
  17.561  	    void *data)
  17.562  {
  17.563 -    /* TODO: implement this */
  17.564 -    return -ENOSYS;
  17.565 +	/* TODO: implement this */
  17.566 +	return -ENOSYS;
  17.567  }
  17.568  
  17.569  #endif /* CONFIG_PROC_FS */
  17.570 @@ -403,70 +412,81 @@ grant_write(struct file *file, const cha
  17.571  int
  17.572  gnttab_resume(void)
  17.573  {
  17.574 -    gnttab_setup_table_t setup;
  17.575 -    unsigned long        frames[NR_GRANT_FRAMES];
  17.576 -    int                  i;
  17.577 +	gnttab_setup_table_t setup;
  17.578 +	unsigned long        frames[NR_GRANT_FRAMES];
  17.579 +	int                  i;
  17.580  
  17.581 -    setup.dom        = DOMID_SELF;
  17.582 -    setup.nr_frames  = NR_GRANT_FRAMES;
  17.583 -    setup.frame_list = frames;
  17.584 +	setup.dom        = DOMID_SELF;
  17.585 +	setup.nr_frames  = NR_GRANT_FRAMES;
  17.586 +	setup.frame_list = frames;
  17.587  
  17.588 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0);
  17.589 -    BUG_ON(setup.status != 0);
  17.590 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
  17.591 +	BUG_ON(setup.status != 0);
  17.592  
  17.593 -    for ( i = 0; i < NR_GRANT_FRAMES; i++ )
  17.594 -        set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
  17.595 +	for (i = 0; i < NR_GRANT_FRAMES; i++)
  17.596 +		set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
  17.597  
  17.598 -    return 0;
  17.599 +	return 0;
  17.600  }
  17.601  
  17.602  int
  17.603  gnttab_suspend(void)
  17.604  {
  17.605 -    int i;
  17.606 +	int i;
  17.607  
  17.608 -    for ( i = 0; i < NR_GRANT_FRAMES; i++ )
  17.609 -	clear_fixmap(FIX_GNTTAB_END - i);
  17.610 +	for (i = 0; i < NR_GRANT_FRAMES; i++)
  17.611 +		clear_fixmap(FIX_GNTTAB_END - i);
  17.612  
  17.613 -    return 0;
  17.614 +	return 0;
  17.615  }
  17.616  
  17.617  static int __init
  17.618  gnttab_init(void)
  17.619  {
  17.620 -    int i;
  17.621 +	int i;
  17.622  
  17.623 -    if (xen_init() < 0)
  17.624 -        return -ENODEV;
  17.625 +	if (xen_init() < 0)
  17.626 +		return -ENODEV;
  17.627 +
  17.628 +	BUG_ON(gnttab_resume());
  17.629  
  17.630 -    BUG_ON(gnttab_resume());
  17.631 -
  17.632 -    shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
  17.633 +	shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
  17.634  
  17.635 -    for ( i = 0; i < NR_GRANT_ENTRIES; i++ )
  17.636 -        gnttab_list[i] = i + 1;
  17.637 -    
  17.638 +	for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
  17.639 +		gnttab_list[i] = i + 1;
  17.640 +	gnttab_free_count = NR_GRANT_ENTRIES - NR_RESERVED_ENTRIES;
  17.641 +	gnttab_free_head  = NR_RESERVED_ENTRIES;
  17.642 +
  17.643  #ifdef CONFIG_PROC_FS
  17.644 -    /*
  17.645 -     *  /proc/xen/grant : used by libxc to access grant tables
  17.646 -     */
  17.647 -    if ( (grant_pde = create_xen_proc_entry("grant", 0600)) == NULL )
  17.648 -    {
  17.649 -        WPRINTK("Unable to create grant xen proc entry\n");
  17.650 -        return -1;
  17.651 -    }
  17.652 +	/*
  17.653 +	 *  /proc/xen/grant : used by libxc to access grant tables
  17.654 +	 */
  17.655 +	if ((grant_pde = create_xen_proc_entry("grant", 0600)) == NULL) {
  17.656 +		WPRINTK("Unable to create grant xen proc entry\n");
  17.657 +		return -1;
  17.658 +	}
  17.659  
  17.660 -    grant_file_ops.read   = grant_pde->proc_fops->read;
  17.661 -    grant_file_ops.write  = grant_pde->proc_fops->write;
  17.662 +	grant_file_ops.read   = grant_pde->proc_fops->read;
  17.663 +	grant_file_ops.write  = grant_pde->proc_fops->write;
  17.664  
  17.665 -    grant_pde->proc_fops  = &grant_file_ops;
  17.666 +	grant_pde->proc_fops  = &grant_file_ops;
  17.667  
  17.668 -    grant_pde->read_proc  = &grant_read;
  17.669 -    grant_pde->write_proc = &grant_write;
  17.670 +	grant_pde->read_proc  = &grant_read;
  17.671 +	grant_pde->write_proc = &grant_write;
  17.672  #endif
  17.673  
  17.674 -    printk("Grant table initialized\n");
  17.675 -    return 0;
  17.676 +	printk("Grant table initialized\n");
  17.677 +	return 0;
  17.678  }
  17.679  
  17.680  __initcall(gnttab_init);
  17.681 +
  17.682 +/*
  17.683 + * Local variables:
  17.684 + *  c-file-style: "linux"
  17.685 + *  indent-tabs-mode: t
  17.686 + *  c-indent-level: 8
  17.687 + *  c-basic-offset: 8
  17.688 + *  tab-width: 8
  17.689 + * End:
  17.690 + */
    18.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Thu Sep 29 16:22:02 2005 -0600
    18.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Thu Sep 29 17:28:44 2005 -0600
    18.3 @@ -12,7 +12,6 @@
    18.4  #include <asm-xen/evtchn.h>
    18.5  #include <asm/hypervisor.h>
    18.6  #include <asm-xen/xen-public/dom0_ops.h>
    18.7 -#include <asm-xen/queues.h>
    18.8  #include <asm-xen/xenbus.h>
    18.9  #include <linux/cpu.h>
   18.10  #include <linux/kthread.h>
   18.11 @@ -43,12 +42,10 @@ void machine_power_off(void)
   18.12  	HYPERVISOR_shutdown();
   18.13  }
   18.14  
   18.15 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
   18.16  int reboot_thru_bios = 0;	/* for dmi_scan.c */
   18.17  EXPORT_SYMBOL(machine_restart);
   18.18  EXPORT_SYMBOL(machine_halt);
   18.19  EXPORT_SYMBOL(machine_power_off);
   18.20 -#endif
   18.21  
   18.22  
   18.23  /******************************************************************************
   18.24 @@ -66,227 +63,194 @@ static int shutting_down = SHUTDOWN_INVA
   18.25  
   18.26  static int __do_suspend(void *ignore)
   18.27  {
   18.28 -    int i, j, k, fpp;
   18.29 -
   18.30 -#ifdef CONFIG_XEN_USB_FRONTEND
   18.31 -    extern void usbif_resume();
   18.32 -#else
   18.33 -#define usbif_resume() do{}while(0)
   18.34 -#endif
   18.35 -
   18.36 -    extern int gnttab_suspend(void);
   18.37 -    extern int gnttab_resume(void);
   18.38 -
   18.39 -    extern void time_suspend(void);
   18.40 -    extern void time_resume(void);
   18.41 -    extern unsigned long max_pfn;
   18.42 -    extern unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[];
   18.43 +	int i, j, k, fpp;
   18.44  
   18.45 -#ifdef CONFIG_SMP
   18.46 -    extern void smp_suspend(void);
   18.47 -    extern void smp_resume(void);
   18.48 -
   18.49 -    static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
   18.50 -    cpumask_t prev_online_cpus, prev_present_cpus;
   18.51 -
   18.52 -    void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
   18.53 -    int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
   18.54 -#endif
   18.55 -
   18.56 -    extern void xencons_suspend(void);
   18.57 -    extern void xencons_resume(void);
   18.58 -
   18.59 -    int err = 0;
   18.60 -
   18.61 -    BUG_ON(smp_processor_id() != 0);
   18.62 -    BUG_ON(in_interrupt());
   18.63 +	extern int gnttab_suspend(void);
   18.64 +	extern int gnttab_resume(void);
   18.65  
   18.66 -#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
   18.67 -    if (num_online_cpus() > 1) {
   18.68 -	printk(KERN_WARNING 
   18.69 -               "Can't suspend SMP guests without CONFIG_HOTPLUG_CPU\n");
   18.70 -	return -EOPNOTSUPP;
   18.71 -    }
   18.72 -#endif
   18.73 -
   18.74 -    preempt_disable();
   18.75 -#ifdef CONFIG_SMP
   18.76 -    /* Take all of the other cpus offline.  We need to be careful not
   18.77 -       to get preempted between the final test for num_online_cpus()
   18.78 -       == 1 and disabling interrupts, since otherwise userspace could
   18.79 -       bring another cpu online, and then we'd be stuffed.  At the
   18.80 -       same time, cpu_down can reschedule, so we need to enable
   18.81 -       preemption while doing that.  This kind of sucks, but should be
   18.82 -       correct. */
   18.83 -    /* (We don't need to worry about other cpus bringing stuff up,
   18.84 -       since by the time num_online_cpus() == 1, there aren't any
   18.85 -       other cpus) */
   18.86 -    cpus_clear(prev_online_cpus);
   18.87 -    while (num_online_cpus() > 1) {
   18.88 -	preempt_enable();
   18.89 -	for_each_online_cpu(i) {
   18.90 -	    if (i == 0)
   18.91 -		continue;
   18.92 -	    err = cpu_down(i);
   18.93 -	    if (err != 0) {
   18.94 -		printk(KERN_CRIT "Failed to take all CPUs down: %d.\n", err);
   18.95 -		goto out_reenable_cpus;
   18.96 -	    }
   18.97 -	    cpu_set(i, prev_online_cpus);
   18.98 -	}
   18.99 -	preempt_disable();
  18.100 -    }
  18.101 -#endif
  18.102 -
  18.103 -    __cli();
  18.104 -
  18.105 -    preempt_enable();
  18.106 +	extern void time_resume(void);
  18.107 +	extern unsigned long max_pfn;
  18.108 +	extern unsigned long *pfn_to_mfn_frame_list_list;
  18.109 +	extern unsigned long *pfn_to_mfn_frame_list[];
  18.110  
  18.111  #ifdef CONFIG_SMP
  18.112 -    cpus_clear(prev_present_cpus);
  18.113 -    for_each_present_cpu(i) {
  18.114 -	if (i == 0)
  18.115 -	    continue;
  18.116 -	save_vcpu_context(i, &suspended_cpu_records[i]);
  18.117 -	cpu_set(i, prev_present_cpus);
  18.118 -    }
  18.119 +	static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
  18.120 +	cpumask_t prev_online_cpus, prev_present_cpus;
  18.121 +
  18.122 +	void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
  18.123 +	int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
  18.124 +#endif
  18.125 +
  18.126 +	extern void xencons_resume(void);
  18.127 +
  18.128 +	int err = 0;
  18.129 +
  18.130 +	BUG_ON(smp_processor_id() != 0);
  18.131 +	BUG_ON(in_interrupt());
  18.132 +
  18.133 +#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
  18.134 +	if (num_online_cpus() > 1) {
  18.135 +		printk(KERN_WARNING "Can't suspend SMP guests "
  18.136 +		       "without CONFIG_HOTPLUG_CPU\n");
  18.137 +		return -EOPNOTSUPP;
  18.138 +	}
  18.139  #endif
  18.140  
  18.141 -#ifdef __i386__
  18.142 -    mm_pin_all();
  18.143 -    kmem_cache_shrink(pgd_cache);
  18.144 -#endif
  18.145 +	xenbus_suspend();
  18.146  
  18.147 -    time_suspend();
  18.148 -
  18.149 +	preempt_disable();
  18.150  #ifdef CONFIG_SMP
  18.151 -    smp_suspend();
  18.152 +	/* Take all of the other cpus offline.  We need to be careful not
  18.153 +	   to get preempted between the final test for num_online_cpus()
  18.154 +	   == 1 and disabling interrupts, since otherwise userspace could
  18.155 +	   bring another cpu online, and then we'd be stuffed.  At the
  18.156 +	   same time, cpu_down can reschedule, so we need to enable
  18.157 +	   preemption while doing that.  This kind of sucks, but should be
  18.158 +	   correct. */
  18.159 +	/* (We don't need to worry about other cpus bringing stuff up,
  18.160 +	   since by the time num_online_cpus() == 1, there aren't any
  18.161 +	   other cpus) */
  18.162 +	cpus_clear(prev_online_cpus);
  18.163 +	while (num_online_cpus() > 1) {
  18.164 +		preempt_enable();
  18.165 +		for_each_online_cpu(i) {
  18.166 +			if (i == 0)
  18.167 +				continue;
  18.168 +			err = cpu_down(i);
  18.169 +			if (err != 0) {
  18.170 +				printk(KERN_CRIT "Failed to take all CPUs "
  18.171 +				       "down: %d.\n", err);
  18.172 +				goto out_reenable_cpus;
  18.173 +			}
  18.174 +			cpu_set(i, prev_online_cpus);
  18.175 +		}
  18.176 +		preempt_disable();
  18.177 +	}
  18.178  #endif
  18.179  
  18.180 -    xenbus_suspend();
  18.181 +	__cli();
  18.182  
  18.183 -    xencons_suspend();
  18.184 -
  18.185 -    irq_suspend();
  18.186 +	preempt_enable();
  18.187  
  18.188 -    gnttab_suspend();
  18.189 +#ifdef CONFIG_SMP
  18.190 +	cpus_clear(prev_present_cpus);
  18.191 +	for_each_present_cpu(i) {
  18.192 +		if (i == 0)
  18.193 +			continue;
  18.194 +		save_vcpu_context(i, &suspended_cpu_records[i]);
  18.195 +		cpu_set(i, prev_present_cpus);
  18.196 +	}
  18.197 +#endif
  18.198  
  18.199 -    HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
  18.200 -    clear_fixmap(FIX_SHARED_INFO);
  18.201 -
  18.202 -    xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
  18.203 -    xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
  18.204 +	gnttab_suspend();
  18.205  
  18.206 -    /* We'll stop somewhere inside this hypercall.  When it returns,
  18.207 -       we'll start resuming after the restore. */
  18.208 -    HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
  18.209 +#ifdef __i386__
  18.210 +	mm_pin_all();
  18.211 +	kmem_cache_shrink(pgd_cache);
  18.212 +#endif
  18.213  
  18.214 -    shutting_down = SHUTDOWN_INVALID; 
  18.215 +	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
  18.216 +	clear_fixmap(FIX_SHARED_INFO);
  18.217 +
  18.218 +	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
  18.219 +	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
  18.220  
  18.221 -    set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
  18.222 +	/* We'll stop somewhere inside this hypercall.  When it returns,
  18.223 +	   we'll start resuming after the restore. */
  18.224 +	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
  18.225  
  18.226 -    HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
  18.227 +	shutting_down = SHUTDOWN_INVALID; 
  18.228  
  18.229 -    memset(empty_zero_page, 0, PAGE_SIZE);
  18.230 +	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
  18.231 +
  18.232 +	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
  18.233 +
  18.234 +	memset(empty_zero_page, 0, PAGE_SIZE);
  18.235  	     
  18.236 -    HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
  18.237 +	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
  18.238  		virt_to_mfn(pfn_to_mfn_frame_list_list);
  18.239    
  18.240 -    fpp = PAGE_SIZE/sizeof(unsigned long);
  18.241 -    for ( i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++ )
  18.242 -    {
  18.243 -	if ( (j % fpp) == 0 )
  18.244 -	{
  18.245 -	    k++;
  18.246 -	    pfn_to_mfn_frame_list_list[k] = 
  18.247 -		    virt_to_mfn(pfn_to_mfn_frame_list[k]);
  18.248 -	    j=0;
  18.249 +	fpp = PAGE_SIZE/sizeof(unsigned long);
  18.250 +	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
  18.251 +		if ((j % fpp) == 0) {
  18.252 +			k++;
  18.253 +			pfn_to_mfn_frame_list_list[k] = 
  18.254 +				virt_to_mfn(pfn_to_mfn_frame_list[k]);
  18.255 +			j = 0;
  18.256 +		}
  18.257 +		pfn_to_mfn_frame_list[k][j] = 
  18.258 +			virt_to_mfn(&phys_to_machine_mapping[i]);
  18.259  	}
  18.260 -	pfn_to_mfn_frame_list[k][j] = 
  18.261 -		virt_to_mfn(&phys_to_machine_mapping[i]);
  18.262 -    }
  18.263 -    HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
  18.264 +	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
  18.265 +
  18.266 +	gnttab_resume();
  18.267  
  18.268 -    gnttab_resume();
  18.269 -
  18.270 -    irq_resume();
  18.271 +	irq_resume();
  18.272  
  18.273 -    xencons_resume();
  18.274 -
  18.275 -    xenbus_resume();
  18.276 +	time_resume();
  18.277  
  18.278  #ifdef CONFIG_SMP
  18.279 -    smp_resume();
  18.280 +	for_each_cpu_mask(i, prev_present_cpus)
  18.281 +		restore_vcpu_context(i, &suspended_cpu_records[i]);
  18.282  #endif
  18.283  
  18.284 -    time_resume();
  18.285 -
  18.286 -    usbif_resume();
  18.287 +	__sti();
  18.288  
  18.289 -#ifdef CONFIG_SMP
  18.290 -    for_each_cpu_mask(i, prev_present_cpus)
  18.291 -	restore_vcpu_context(i, &suspended_cpu_records[i]);
  18.292 -#endif
  18.293 +	xencons_resume();
  18.294  
  18.295 -    __sti();
  18.296 +	xenbus_resume();
  18.297  
  18.298  #ifdef CONFIG_SMP
  18.299   out_reenable_cpus:
  18.300 -    for_each_cpu_mask(i, prev_online_cpus) {
  18.301 -	j = cpu_up(i);
  18.302 -	if (j != 0) {
  18.303 -	    printk(KERN_CRIT "Failed to bring cpu %d back up (%d).\n",
  18.304 -		   i, j);
  18.305 -	    err = j;
  18.306 +	for_each_cpu_mask(i, prev_online_cpus) {
  18.307 +		j = cpu_up(i);
  18.308 +		if (j != 0) {
  18.309 +			printk(KERN_CRIT "Failed to bring cpu "
  18.310 +			       "%d back up (%d).\n",
  18.311 +			       i, j);
  18.312 +			err = j;
  18.313 +		}
  18.314  	}
  18.315 -    }
  18.316  #endif
  18.317  
  18.318 -    return err;
  18.319 +	return err;
  18.320  }
  18.321  
  18.322  static int shutdown_process(void *__unused)
  18.323  {
  18.324 -    static char *envp[] = { "HOME=/", "TERM=linux", 
  18.325 -                            "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
  18.326 -    static char *restart_argv[]  = { "/sbin/reboot", NULL };
  18.327 -    static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
  18.328 +	static char *envp[] = { "HOME=/", "TERM=linux", 
  18.329 +				"PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
  18.330 +	static char *restart_argv[]  = { "/sbin/reboot", NULL };
  18.331 +	static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
  18.332  
  18.333 -    extern asmlinkage long sys_reboot(int magic1, int magic2,
  18.334 -                                      unsigned int cmd, void *arg);
  18.335 +	extern asmlinkage long sys_reboot(int magic1, int magic2,
  18.336 +					  unsigned int cmd, void *arg);
  18.337  
  18.338 -    daemonize(
  18.339 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  18.340 -        "shutdown"
  18.341 -#endif
  18.342 -        );
  18.343 +	daemonize("shutdown");
  18.344  
  18.345 -    switch ( shutting_down )
  18.346 -    {
  18.347 -    case SHUTDOWN_POWEROFF:
  18.348 -        if ( execve("/sbin/poweroff", poweroff_argv, envp) < 0 )
  18.349 -        {
  18.350 -            sys_reboot(LINUX_REBOOT_MAGIC1,
  18.351 -                       LINUX_REBOOT_MAGIC2,
  18.352 -                       LINUX_REBOOT_CMD_POWER_OFF,
  18.353 -                       NULL);
  18.354 -        }
  18.355 -        break;
  18.356 +	switch (shutting_down) {
  18.357 +	case SHUTDOWN_POWEROFF:
  18.358 +		if (execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
  18.359 +			sys_reboot(LINUX_REBOOT_MAGIC1,
  18.360 +				   LINUX_REBOOT_MAGIC2,
  18.361 +				   LINUX_REBOOT_CMD_POWER_OFF,
  18.362 +				   NULL);
  18.363 +		}
  18.364 +		break;
  18.365  
  18.366 -    case SHUTDOWN_REBOOT:
  18.367 -        if ( execve("/sbin/reboot", restart_argv, envp) < 0 )
  18.368 -        {
  18.369 -            sys_reboot(LINUX_REBOOT_MAGIC1,
  18.370 -                       LINUX_REBOOT_MAGIC2,
  18.371 -                       LINUX_REBOOT_CMD_RESTART,
  18.372 -                       NULL);
  18.373 -        }
  18.374 -        break;
  18.375 -    }
  18.376 +	case SHUTDOWN_REBOOT:
  18.377 +		if (execve("/sbin/reboot", restart_argv, envp) < 0) {
  18.378 +			sys_reboot(LINUX_REBOOT_MAGIC1,
  18.379 +				   LINUX_REBOOT_MAGIC2,
  18.380 +				   LINUX_REBOOT_CMD_RESTART,
  18.381 +				   NULL);
  18.382 +		}
  18.383 +		break;
  18.384 +	}
  18.385  
  18.386 -    shutting_down = SHUTDOWN_INVALID; /* could try again */
  18.387 +	shutting_down = SHUTDOWN_INVALID; /* could try again */
  18.388  
  18.389 -    return 0;
  18.390 +	return 0;
  18.391  }
  18.392  
  18.393  static struct task_struct *kthread_create_on_cpu(int (*f)(void *arg),
  18.394 @@ -294,113 +258,109 @@ static struct task_struct *kthread_creat
  18.395  						 const char *name,
  18.396  						 int cpu)
  18.397  {
  18.398 -    struct task_struct *p;
  18.399 -    p = kthread_create(f, arg, name);
  18.400 -    kthread_bind(p, cpu);
  18.401 -    wake_up_process(p);
  18.402 -    return p;
  18.403 +	struct task_struct *p;
  18.404 +	p = kthread_create(f, arg, name);
  18.405 +	kthread_bind(p, cpu);
  18.406 +	wake_up_process(p);
  18.407 +	return p;
  18.408  }
  18.409  
  18.410  static void __shutdown_handler(void *unused)
  18.411  {
  18.412 -    int err;
  18.413 +	int err;
  18.414  
  18.415 -    if ( shutting_down != SHUTDOWN_SUSPEND )
  18.416 -    {
  18.417 -        err = kernel_thread(shutdown_process, NULL, CLONE_FS | CLONE_FILES);
  18.418 -        if ( err < 0 )
  18.419 -            printk(KERN_ALERT "Error creating shutdown process!\n");
  18.420 -    }
  18.421 -    else
  18.422 -    {
  18.423 -	kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
  18.424 -    }
  18.425 +	if (shutting_down != SHUTDOWN_SUSPEND) {
  18.426 +		err = kernel_thread(shutdown_process, NULL,
  18.427 +				    CLONE_FS | CLONE_FILES);
  18.428 +		if ( err < 0 )
  18.429 +			printk(KERN_ALERT "Error creating shutdown "
  18.430 +			       "process!\n");
  18.431 +	} else {
  18.432 +		kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
  18.433 +	}
  18.434  }
  18.435  
  18.436  static void shutdown_handler(struct xenbus_watch *watch, const char *node)
  18.437  {
  18.438 -    static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
  18.439 -    char *str;
  18.440 -    int err;
  18.441 +	static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
  18.442 +	char *str;
  18.443 +	int err;
  18.444  
  18.445   again:
  18.446 -    err = xenbus_transaction_start();
  18.447 -    if (err)
  18.448 -	return;
  18.449 -    str = (char *)xenbus_read("control", "shutdown", NULL);
  18.450 -    /* Ignore read errors and empty reads. */
  18.451 -    if (XENBUS_IS_ERR_READ(str)) {
  18.452 -	xenbus_transaction_end(1);
  18.453 -	return;
  18.454 -    }
  18.455 +	err = xenbus_transaction_start();
  18.456 +	if (err)
  18.457 +		return;
  18.458 +	str = (char *)xenbus_read("control", "shutdown", NULL);
  18.459 +	/* Ignore read errors and empty reads. */
  18.460 +	if (XENBUS_IS_ERR_READ(str)) {
  18.461 +		xenbus_transaction_end(1);
  18.462 +		return;
  18.463 +	}
  18.464 +
  18.465 +	xenbus_write("control", "shutdown", "");
  18.466  
  18.467 -    xenbus_write("control", "shutdown", "");
  18.468 +	err = xenbus_transaction_end(0);
  18.469 +	if (err == -EAGAIN) {
  18.470 +		kfree(str);
  18.471 +		goto again;
  18.472 +	}
  18.473  
  18.474 -    err = xenbus_transaction_end(0);
  18.475 -    if (err == -EAGAIN) {
  18.476 +	if (strcmp(str, "poweroff") == 0)
  18.477 +		shutting_down = SHUTDOWN_POWEROFF;
  18.478 +	else if (strcmp(str, "reboot") == 0)
  18.479 +		shutting_down = SHUTDOWN_REBOOT;
  18.480 +	else if (strcmp(str, "suspend") == 0)
  18.481 +		shutting_down = SHUTDOWN_SUSPEND;
  18.482 +	else {
  18.483 +		printk("Ignoring shutdown request: %s\n", str);
  18.484 +		shutting_down = SHUTDOWN_INVALID;
  18.485 +	}
  18.486 +
  18.487  	kfree(str);
  18.488 -	goto again;
  18.489 -    }
  18.490  
  18.491 -    if (strcmp(str, "poweroff") == 0)
  18.492 -        shutting_down = SHUTDOWN_POWEROFF;
  18.493 -    else if (strcmp(str, "reboot") == 0)
  18.494 -        shutting_down = SHUTDOWN_REBOOT;
  18.495 -    else if (strcmp(str, "suspend") == 0)
  18.496 -        shutting_down = SHUTDOWN_SUSPEND;
  18.497 -    else {
  18.498 -        printk("Ignoring shutdown request: %s\n", str);
  18.499 -        shutting_down = SHUTDOWN_INVALID;
  18.500 -    }
  18.501 -
  18.502 -    kfree(str);
  18.503 -
  18.504 -    if (shutting_down != SHUTDOWN_INVALID)
  18.505 -        schedule_work(&shutdown_work);
  18.506 +	if (shutting_down != SHUTDOWN_INVALID)
  18.507 +		schedule_work(&shutdown_work);
  18.508  }
  18.509  
  18.510  #ifdef CONFIG_MAGIC_SYSRQ
  18.511  static void sysrq_handler(struct xenbus_watch *watch, const char *node)
  18.512  {
  18.513 -    char sysrq_key = '\0';
  18.514 -    int err;
  18.515 +	char sysrq_key = '\0';
  18.516 +	int err;
  18.517  
  18.518   again:
  18.519 -    err = xenbus_transaction_start();
  18.520 -    if (err)
  18.521 -	return;
  18.522 -    if (!xenbus_scanf("control", "sysrq", "%c", &sysrq_key)) {
  18.523 -        printk(KERN_ERR "Unable to read sysrq code in control/sysrq\n");
  18.524 -	xenbus_transaction_end(1);
  18.525 -	return;
  18.526 -    }
  18.527 -
  18.528 -    if (sysrq_key != '\0')
  18.529 -	xenbus_printf("control", "sysrq", "%c", '\0');
  18.530 +	err = xenbus_transaction_start();
  18.531 +	if (err)
  18.532 +		return;
  18.533 +	if (!xenbus_scanf("control", "sysrq", "%c", &sysrq_key)) {
  18.534 +		printk(KERN_ERR "Unable to read sysrq code in "
  18.535 +		       "control/sysrq\n");
  18.536 +		xenbus_transaction_end(1);
  18.537 +		return;
  18.538 +	}
  18.539  
  18.540 -    err = xenbus_transaction_end(0);
  18.541 -    if (err == -EAGAIN)
  18.542 -	goto again;
  18.543 +	if (sysrq_key != '\0')
  18.544 +		xenbus_printf("control", "sysrq", "%c", '\0');
  18.545  
  18.546 -    if (sysrq_key != '\0') {
  18.547 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
  18.548 -        handle_sysrq(sysrq_key, NULL, NULL);
  18.549 -#else
  18.550 -        handle_sysrq(sysrq_key, NULL, NULL, NULL);
  18.551 -#endif
  18.552 -    }
  18.553 +	err = xenbus_transaction_end(0);
  18.554 +	if (err == -EAGAIN)
  18.555 +		goto again;
  18.556 +
  18.557 +	if (sysrq_key != '\0') {
  18.558 +		handle_sysrq(sysrq_key, NULL, NULL);
  18.559 +	}
  18.560  }
  18.561  #endif
  18.562  
  18.563  static struct xenbus_watch shutdown_watch = {
  18.564 -    .node = "control/shutdown",
  18.565 -    .callback = shutdown_handler
  18.566 +	.node = "control/shutdown",
  18.567 +	.callback = shutdown_handler
  18.568  };
  18.569  
  18.570  #ifdef CONFIG_MAGIC_SYSRQ
  18.571  static struct xenbus_watch sysrq_watch = {
  18.572 -    .node ="control/sysrq",
  18.573 -    .callback = sysrq_handler
  18.574 +	.node ="control/sysrq",
  18.575 +	.callback = sysrq_handler
  18.576  };
  18.577  #endif
  18.578  
  18.579 @@ -413,39 +373,50 @@ static int setup_shutdown_watcher(struct
  18.580                                    unsigned long event,
  18.581                                    void *data)
  18.582  {
  18.583 -    int err1 = 0;
  18.584 +	int err1 = 0;
  18.585  #ifdef CONFIG_MAGIC_SYSRQ
  18.586 -    int err2 = 0;
  18.587 +	int err2 = 0;
  18.588  #endif
  18.589  
  18.590 -    BUG_ON(down_trylock(&xenbus_lock) == 0);
  18.591 +	BUG_ON(down_trylock(&xenbus_lock) == 0);
  18.592  
  18.593 -    err1 = register_xenbus_watch(&shutdown_watch);
  18.594 +	err1 = register_xenbus_watch(&shutdown_watch);
  18.595  #ifdef CONFIG_MAGIC_SYSRQ
  18.596 -    err2 = register_xenbus_watch(&sysrq_watch);
  18.597 +	err2 = register_xenbus_watch(&sysrq_watch);
  18.598  #endif
  18.599  
  18.600 -    if (err1) {
  18.601 -        printk(KERN_ERR "Failed to set shutdown watcher\n");
  18.602 -    }
  18.603 +	if (err1) {
  18.604 +		printk(KERN_ERR "Failed to set shutdown watcher\n");
  18.605 +	}
  18.606      
  18.607  #ifdef CONFIG_MAGIC_SYSRQ
  18.608 -    if (err2) {
  18.609 -        printk(KERN_ERR "Failed to set sysrq watcher\n");
  18.610 -    }
  18.611 +	if (err2) {
  18.612 +		printk(KERN_ERR "Failed to set sysrq watcher\n");
  18.613 +	}
  18.614  #endif
  18.615  
  18.616 -    return NOTIFY_DONE;
  18.617 +	return NOTIFY_DONE;
  18.618  }
  18.619  
  18.620  static int __init setup_shutdown_event(void)
  18.621  {
  18.622      
  18.623 -    xenstore_notifier.notifier_call = setup_shutdown_watcher;
  18.624 +	xenstore_notifier.notifier_call = setup_shutdown_watcher;
  18.625  
  18.626 -    register_xenstore_notifier(&xenstore_notifier);
  18.627 +	register_xenstore_notifier(&xenstore_notifier);
  18.628      
  18.629 -    return 0;
  18.630 +	return 0;
  18.631  }
  18.632  
  18.633  subsys_initcall(setup_shutdown_event);
  18.634 +
  18.635 +/*
  18.636 + * Local variables:
  18.637 + *  c-file-style: "linux"
  18.638 + *  indent-tabs-mode: t
  18.639 + *  c-indent-level: 8
  18.640 + *  c-basic-offset: 8
  18.641 + *  tab-width: 8
  18.642 + * End:
  18.643 + */
  18.644 +#
    19.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/smp.c	Thu Sep 29 16:22:02 2005 -0600
    19.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/smp.c	Thu Sep 29 17:28:44 2005 -0600
    19.3 @@ -11,6 +11,15 @@
    19.4  int setup_profiling_timer(unsigned int multiplier)
    19.5  {
    19.6  	printk("setup_profiling_timer\n");
    19.7 -
    19.8  	return 0;
    19.9  }
   19.10 +
   19.11 +/*
   19.12 + * Local variables:
   19.13 + *  c-file-style: "linux"
   19.14 + *  indent-tabs-mode: t
   19.15 + *  c-indent-level: 8
   19.16 + *  c-basic-offset: 8
   19.17 + *  tab-width: 8
   19.18 + * End:
   19.19 + */
    20.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c	Thu Sep 29 16:22:02 2005 -0600
    20.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c	Thu Sep 29 17:28:44 2005 -0600
    20.3 @@ -6,13 +6,23 @@ static struct proc_dir_entry *xen_base;
    20.4  
    20.5  struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
    20.6  {
    20.7 -    if ( xen_base == NULL )
    20.8 -        if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
    20.9 -            panic("Couldn't create /proc/xen");
   20.10 -    return create_proc_entry(name, mode, xen_base);
   20.11 +	if ( xen_base == NULL )
   20.12 +		if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
   20.13 +			panic("Couldn't create /proc/xen");
   20.14 +	return create_proc_entry(name, mode, xen_base);
   20.15  }
   20.16  
   20.17  void remove_xen_proc_entry(const char *name)
   20.18  {
   20.19 -    remove_proc_entry(name, xen_base);
   20.20 +	remove_proc_entry(name, xen_base);
   20.21  }
   20.22 +
   20.23 +/*
   20.24 + * Local variables:
   20.25 + *  c-file-style: "linux"
   20.26 + *  indent-tabs-mode: t
   20.27 + *  c-indent-level: 8
   20.28 + *  c-basic-offset: 8
   20.29 + *  tab-width: 8
   20.30 + * End:
   20.31 + */
    21.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/entry.S	Thu Sep 29 16:22:02 2005 -0600
    21.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/entry.S	Thu Sep 29 17:28:44 2005 -0600
    21.3 @@ -536,7 +536,7 @@ retint_kernel:
    21.4  	CFI_ENDPROC
    21.5  	.endm
    21.6  
    21.7 -#ifdef CONFIG_SMP	
    21.8 +#if 0
    21.9  ENTRY(reschedule_interrupt)
   21.10  	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
   21.11  
    22.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c	Thu Sep 29 16:22:02 2005 -0600
    22.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c	Thu Sep 29 17:28:44 2005 -0600
    22.3 @@ -31,14 +31,9 @@ DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IP
    22.4  
    22.5  static inline void __send_IPI_one(unsigned int cpu, int vector)
    22.6  {
    22.7 -	unsigned int evtchn;
    22.8 -	Dprintk("%s\n", __FUNCTION__);
    22.9 -
   22.10 -	evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
   22.11 -	if (evtchn)
   22.12 -		notify_via_evtchn(evtchn);
   22.13 -	else
   22.14 -		printk("send_IPI to unbound port %d/%d", cpu, vector);
   22.15 +	int evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
   22.16 +	BUG_ON(evtchn < 0);
   22.17 +	notify_remote_via_evtchn(evtchn);
   22.18  }
   22.19  
   22.20  void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
    23.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c	Thu Sep 29 16:22:02 2005 -0600
    23.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c	Thu Sep 29 17:28:44 2005 -0600
    23.3 @@ -771,11 +771,14 @@ void __init setup_arch(char **cmdline_p)
    23.4  				virt_to_mfn(&phys_to_machine_mapping[i]);
    23.5  		}
    23.6  		HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
    23.7 -		
    23.8 -		
    23.9 +	}
   23.10  
   23.11 -
   23.12 -
   23.13 +	if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
   23.14 +	{
   23.15 +		acpi_disabled = 1;
   23.16 +#ifdef  CONFIG_ACPI_BOOT
   23.17 +		acpi_ht = 0;
   23.18 +#endif
   23.19  	}
   23.20  #endif
   23.21  
    24.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c	Thu Sep 29 16:22:02 2005 -0600
    24.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/smpboot.c	Thu Sep 29 17:28:44 2005 -0600
    24.3 @@ -445,25 +445,6 @@ void __cpuinit smp_callin(void)
    24.4  }
    24.5  
    24.6  #ifdef CONFIG_XEN
    24.7 -static irqreturn_t ldebug_interrupt(
    24.8 -	int irq, void *dev_id, struct pt_regs *regs)
    24.9 -{
   24.10 -	return IRQ_HANDLED;
   24.11 -}
   24.12 -
   24.13 -static DEFINE_PER_CPU(int, ldebug_irq);
   24.14 -static char ldebug_name[NR_CPUS][15];
   24.15 -
   24.16 -void ldebug_setup(void)
   24.17 -{
   24.18 -	int cpu = smp_processor_id();
   24.19 -
   24.20 -	per_cpu(ldebug_irq, cpu) = bind_virq_to_irq(VIRQ_DEBUG);
   24.21 -	sprintf(ldebug_name[cpu], "ldebug%d", cpu);
   24.22 -	BUG_ON(request_irq(per_cpu(ldebug_irq, cpu), ldebug_interrupt,
   24.23 -	                   SA_INTERRUPT, ldebug_name[cpu], NULL));
   24.24 -}
   24.25 -
   24.26  extern void local_setup_timer(void);
   24.27  #endif
   24.28  
   24.29 @@ -498,7 +479,6 @@ void __cpuinit start_secondary(void)
   24.30  	enable_APIC_timer();
   24.31  #else
   24.32  	local_setup_timer();
   24.33 -	ldebug_setup();
   24.34  	smp_intr_init();
   24.35  	local_irq_enable();
   24.36  #endif
    25.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c	Thu Sep 29 16:22:02 2005 -0600
    25.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c	Thu Sep 29 17:28:44 2005 -0600
    25.3 @@ -149,7 +149,9 @@ void dump_pagetable(unsigned long addres
    25.4  	pmd_t *pmd;
    25.5  	pte_t *pte;
    25.6  
    25.7 +	preempt_disable();
    25.8  	pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
    25.9 +	preempt_enable();
   25.10  	pgd += pgd_index(address);
   25.11  
   25.12  	printk("PGD %lx ", pgd_val(*pgd));
   25.13 @@ -252,7 +254,9 @@ static int vmalloc_fault(unsigned long a
   25.14  
   25.15  	/* On Xen the line below does not always work. Needs investigating! */
   25.16  	/*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
   25.17 +	preempt_disable();
   25.18  	pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
   25.19 +	preempt_enable();
   25.20  	pgd += pgd_index(address);
   25.21  
   25.22  	pgd_ref = pgd_offset_k(address);
    26.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Thu Sep 29 16:22:02 2005 -0600
    26.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Thu Sep 29 17:28:44 2005 -0600
    26.3 @@ -481,7 +481,7 @@ static void make_response(blkif_t *blkif
    26.4  	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
    26.5  
    26.6  	/* Kick the relevant domain. */
    26.7 -	notify_via_evtchn(blkif->evtchn);
    26.8 +	notify_remote_via_irq(blkif->irq);
    26.9  }
   26.10  
   26.11  void blkif_deschedule(blkif_t *blkif)
    27.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/common.h	Thu Sep 29 16:22:02 2005 -0600
    27.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/common.h	Thu Sep 29 17:28:44 2005 -0600
    27.3 @@ -44,7 +44,7 @@ typedef struct blkif_st {
    27.4  	unsigned int      handle;
    27.5  	/* Physical parameters of the comms window. */
    27.6  	unsigned int      evtchn;
    27.7 -	unsigned int      remote_evtchn;
    27.8 +	unsigned int      irq;
    27.9  	/* Comms information. */
   27.10  	blkif_back_ring_t blk_ring;
   27.11  	struct vm_struct *blk_ring_area;
    28.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Sep 29 16:22:02 2005 -0600
    28.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Thu Sep 29 17:28:44 2005 -0600
    28.3 @@ -71,8 +71,6 @@ int blkif_map(blkif_t *blkif, unsigned l
    28.4  	evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
    28.5  	int err;
    28.6  
    28.7 -	BUG_ON(blkif->remote_evtchn);
    28.8 -
    28.9  	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
   28.10  		return -ENOMEM;
   28.11  
   28.12 @@ -94,13 +92,12 @@ int blkif_map(blkif_t *blkif, unsigned l
   28.13  	}
   28.14  
   28.15  	blkif->evtchn = op.u.bind_interdomain.port1;
   28.16 -	blkif->remote_evtchn = evtchn;
   28.17  
   28.18  	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
   28.19  	SHARED_RING_INIT(sring);
   28.20  	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
   28.21  
   28.22 -	bind_evtchn_to_irqhandler(
   28.23 +	blkif->irq = bind_evtchn_to_irqhandler(
   28.24  		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
   28.25  	blkif->status = CONNECTED;
   28.26  
   28.27 @@ -109,21 +106,13 @@ int blkif_map(blkif_t *blkif, unsigned l
   28.28  
   28.29  static void free_blkif(void *arg)
   28.30  {
   28.31 -	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   28.32  	blkif_t *blkif = (blkif_t *)arg;
   28.33  
   28.34 -	op.u.close.port = blkif->evtchn;
   28.35 -	op.u.close.dom = DOMID_SELF;
   28.36 -	HYPERVISOR_event_channel_op(&op);
   28.37 -	op.u.close.port = blkif->remote_evtchn;
   28.38 -	op.u.close.dom = blkif->domid;
   28.39 -	HYPERVISOR_event_channel_op(&op);
   28.40 +	if (blkif->irq)
   28.41 +		unbind_evtchn_from_irqhandler(blkif->irq, blkif);
   28.42  
   28.43  	vbd_free(&blkif->vbd);
   28.44  
   28.45 -	if (blkif->evtchn)
   28.46 -		unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
   28.47 -
   28.48  	if (blkif->blk_ring.sring) {
   28.49  		unmap_frontend_page(blkif);
   28.50  		free_vm_area(blkif->blk_ring_area);
    29.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Thu Sep 29 16:22:02 2005 -0600
    29.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Thu Sep 29 17:28:44 2005 -0600
    29.3 @@ -80,6 +80,15 @@ static void frontend_changed(struct xenb
    29.4  		return;
    29.5  	}
    29.6  
    29.7 +	/* Map the shared frame, irq etc. */
    29.8 +	err = blkif_map(be->blkif, ring_ref, evtchn);
    29.9 +	if (err) {
   29.10 +		xenbus_dev_error(be->dev, err, "mapping ring-ref %lu port %u",
   29.11 +				 ring_ref, evtchn);
   29.12 +		return;
   29.13 +	}
   29.14 +	/* XXX From here on should 'blkif_unmap' on error. */
   29.15 +
   29.16  again:
   29.17  	/* Supply the information about the device the frontend needs */
   29.18  	err = xenbus_transaction_start();
   29.19 @@ -112,14 +121,6 @@ again:
   29.20  		goto abort;
   29.21  	}
   29.22  
   29.23 -	/* Map the shared frame, irq etc. */
   29.24 -	err = blkif_map(be->blkif, ring_ref, evtchn);
   29.25 -	if (err) {
   29.26 -		xenbus_dev_error(be->dev, err, "mapping ring-ref %lu port %u",
   29.27 -				 ring_ref, evtchn);
   29.28 -		goto abort;
   29.29 -	}
   29.30 -
   29.31  	err = xenbus_transaction_end(0);
   29.32  	if (err == -EAGAIN)
   29.33  		goto again;
    30.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Thu Sep 29 16:22:02 2005 -0600
    30.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Thu Sep 29 17:28:44 2005 -0600
    30.3 @@ -57,10 +57,7 @@ static unsigned int blkif_state = BLKIF_
    30.4  
    30.5  #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
    30.6      (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE)
    30.7 -#define GRANTREF_INVALID (1<<15)
    30.8 -#define GRANT_INVALID_REF	(0xFFFF)
    30.9 -
   30.10 -static int recovery = 0; /* Recovery in progress: protected by blkif_io_lock */
   30.11 +#define GRANT_INVALID_REF	0
   30.12  
   30.13  static void kick_pending_request_queues(struct blkfront_info *info);
   30.14  
   30.15 @@ -84,22 +81,10 @@ static inline void ADD_ID_TO_FREELIST(
   30.16  	info->shadow_free = id;
   30.17  }
   30.18  
   30.19 -static inline void pickle_request(struct blk_shadow *s, blkif_request_t *r)
   30.20 -{
   30.21 -
   30.22 -	s->req = *r;
   30.23 -}
   30.24 -
   30.25 -static inline void unpickle_request(blkif_request_t *r, struct blk_shadow *s)
   30.26 -{
   30.27 -
   30.28 -	*r = s->req;
   30.29 -}
   30.30 -
   30.31  static inline void flush_requests(struct blkfront_info *info)
   30.32  {
   30.33  	RING_PUSH_REQUESTS(&info->ring);
   30.34 -	notify_via_evtchn(info->evtchn);
   30.35 +	notify_remote_via_irq(info->irq);
   30.36  }
   30.37  
   30.38  static void kick_pending_request_queues(struct blkfront_info *info)
   30.39 @@ -235,7 +220,7 @@ static int blkif_queue_request(struct re
   30.40  				rq_data_dir(req) );
   30.41  
   30.42  			info->shadow[id].frame[ring_req->nr_segments] =
   30.43 -				buffer_mfn;
   30.44 +				mfn_to_pfn(buffer_mfn);
   30.45  
   30.46  			ring_req->frame_and_sects[ring_req->nr_segments] =
   30.47  				blkif_fas_from_gref(ref, fsect, lsect);
   30.48 @@ -247,7 +232,7 @@ static int blkif_queue_request(struct re
   30.49  	info->ring.req_prod_pvt++;
   30.50  
   30.51  	/* Keep a private copy so we can reissue requests when recovering. */
   30.52 -	pickle_request(&info->shadow[id], ring_req);
   30.53 +	info->shadow[id].req = *ring_req;
   30.54  
   30.55  	gnttab_free_grant_references(gref_head);
   30.56  
   30.57 @@ -312,7 +297,7 @@ static irqreturn_t blkif_int(int irq, vo
   30.58  
   30.59  	spin_lock_irqsave(&blkif_io_lock, flags);
   30.60  
   30.61 -	if (unlikely(info->connected != BLKIF_STATE_CONNECTED || recovery)) {
   30.62 +	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
   30.63  		spin_unlock_irqrestore(&blkif_io_lock, flags);
   30.64  		return IRQ_HANDLED;
   30.65  	}
   30.66 @@ -372,8 +357,9 @@ static void blkif_free(struct blkfront_i
   30.67  	if (info->ring_ref != GRANT_INVALID_REF)
   30.68  		gnttab_end_foreign_access(info->ring_ref, 0);
   30.69  	info->ring_ref = GRANT_INVALID_REF;
   30.70 -	unbind_evtchn_from_irqhandler(info->evtchn, info); 
   30.71 -	info->evtchn = 0;
   30.72 +	if (info->irq)
   30.73 +		unbind_evtchn_from_irqhandler(info->irq, info); 
   30.74 +	info->evtchn = info->irq = 0;
   30.75  }
   30.76  
   30.77  static void blkif_recover(struct blkfront_info *info)
   30.78 @@ -401,28 +387,24 @@ static void blkif_recover(struct blkfron
   30.79  		if (copy[i].request == 0)
   30.80  			continue;
   30.81  
   30.82 -		/* Grab a request slot and unpickle shadow state into it. */
   30.83 +		/* Grab a request slot and copy shadow state into it. */
   30.84  		req = RING_GET_REQUEST(
   30.85  			&info->ring, info->ring.req_prod_pvt);
   30.86 -		unpickle_request(req, &copy[i]);
   30.87 +		*req = copy[i].req;
   30.88  
   30.89  		/* We get a new request id, and must reset the shadow state. */
   30.90  		req->id = GET_ID_FROM_FREELIST(info);
   30.91  		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
   30.92  
   30.93  		/* Rewrite any grant references invalidated by susp/resume. */
   30.94 -		for (j = 0; j < req->nr_segments; j++) {
   30.95 -			if ( req->frame_and_sects[j] & GRANTREF_INVALID )
   30.96 -				gnttab_grant_foreign_access_ref(
   30.97 -					blkif_gref_from_fas(
   30.98 -						req->frame_and_sects[j]),
   30.99 -					info->backend_id,
  30.100 -					info->shadow[req->id].frame[j],
  30.101 -					rq_data_dir(
  30.102 -						(struct request *)
  30.103 -						info->shadow[req->id].request));
  30.104 -			req->frame_and_sects[j] &= ~GRANTREF_INVALID;
  30.105 -		}
  30.106 +		for (j = 0; j < req->nr_segments; j++)
  30.107 +			gnttab_grant_foreign_access_ref(
  30.108 +				blkif_gref_from_fas(req->frame_and_sects[j]),
  30.109 +				info->backend_id,
  30.110 +				pfn_to_mfn(info->shadow[req->id].frame[j]),
  30.111 +				rq_data_dir(
  30.112 +					(struct request *)
  30.113 +					info->shadow[req->id].request));
  30.114  		info->shadow[req->id].req = *req;
  30.115  
  30.116  		info->ring.req_prod_pvt++;
  30.117 @@ -430,15 +412,13 @@ static void blkif_recover(struct blkfron
  30.118  
  30.119  	kfree(copy);
  30.120  
  30.121 -	recovery = 0;
  30.122 -
  30.123  	/* info->ring->req_prod will be set when we flush_requests().*/
  30.124  	wmb();
  30.125  
  30.126  	/* Kicks things back into life. */
  30.127  	flush_requests(info);
  30.128  
  30.129 -	/* Now safe to left other people use the interface. */
  30.130 +	/* Now safe to let other people use the interface. */
  30.131  	info->connected = BLKIF_STATE_CONNECTED;
  30.132  }
  30.133  
  30.134 @@ -450,10 +430,12 @@ static void blkif_connect(struct blkfron
  30.135  
  30.136  	err = bind_evtchn_to_irqhandler(
  30.137  		info->evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
  30.138 -	if (err != 0) {
  30.139 +	if (err <= 0) {
  30.140  		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
  30.141  		return;
  30.142  	}
  30.143 +
  30.144 +	info->irq = err;
  30.145  }
  30.146  
  30.147  
  30.148 @@ -487,8 +469,8 @@ static void watch_for_status(struct xenb
  30.149  		return;
  30.150  	}
  30.151  
  30.152 +	info->connected = BLKIF_STATE_CONNECTED;
  30.153  	xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
  30.154 -	info->connected = BLKIF_STATE_CONNECTED;
  30.155  
  30.156  	blkif_state = BLKIF_STATE_CONNECTED;
  30.157  
  30.158 @@ -591,17 +573,6 @@ again:
  30.159  		goto abort_transaction;
  30.160  	}
  30.161  
  30.162 -	info->backend = backend;
  30.163 -	backend = NULL;
  30.164 -
  30.165 -	info->watch.node = info->backend;
  30.166 -	info->watch.callback = watch_for_status;
  30.167 -	err = register_xenbus_watch(&info->watch);
  30.168 -	if (err) {
  30.169 -		message = "registering watch on backend";
  30.170 -		goto abort_transaction;
  30.171 -	}
  30.172 -
  30.173  	err = xenbus_transaction_end(0);
  30.174  	if (err) {
  30.175  		if (err == -EAGAIN)
  30.176 @@ -610,10 +581,17 @@ again:
  30.177  		goto destroy_blkring;
  30.178  	}
  30.179  
  30.180 - out:
  30.181 -	if (backend)
  30.182 -		kfree(backend);
  30.183 -	return err;
  30.184 +	info->watch.node = backend;
  30.185 +	info->watch.callback = watch_for_status;
  30.186 +	err = register_xenbus_watch(&info->watch);
  30.187 +	if (err) {
  30.188 +		message = "registering watch on backend";
  30.189 +		goto destroy_blkring;
  30.190 +	}
  30.191 +
  30.192 +	info->backend = backend;
  30.193 +
  30.194 +	return 0;
  30.195  
  30.196   abort_transaction:
  30.197  	xenbus_transaction_end(1);
  30.198 @@ -621,7 +599,10 @@ again:
  30.199  	xenbus_dev_error(dev, err, "%s", message);
  30.200   destroy_blkring:
  30.201  	blkif_free(info);
  30.202 -	goto out;
  30.203 + out:
  30.204 +	if (backend)
  30.205 +		kfree(backend);
  30.206 +	return err;
  30.207  }
  30.208  
  30.209  /* Setup supplies the backend dir, virtual device.
  30.210 @@ -702,9 +683,6 @@ static int blkfront_suspend(struct xenbu
  30.211  	kfree(info->backend);
  30.212  	info->backend = NULL;
  30.213  
  30.214 -	recovery = 1;
  30.215 -	blkif_free(info);
  30.216 -
  30.217  	return 0;
  30.218  }
  30.219  
  30.220 @@ -713,11 +691,12 @@ static int blkfront_resume(struct xenbus
  30.221  	struct blkfront_info *info = dev->data;
  30.222  	int err;
  30.223  
  30.224 -	/* FIXME: Check geometry hasn't changed here... */
  30.225 +	blkif_free(info);
  30.226 +
  30.227  	err = talk_to_backend(dev, info);
  30.228 -	if (!err) {
  30.229 +	if (!err)
  30.230  		blkif_recover(info);
  30.231 -	}
  30.232 +
  30.233  	return err;
  30.234  }
  30.235  
    31.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Thu Sep 29 16:22:02 2005 -0600
    31.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h	Thu Sep 29 17:28:44 2005 -0600
    31.3 @@ -123,7 +123,7 @@ struct blkfront_info
    31.4  	int backend_id;
    31.5  	int ring_ref;
    31.6  	blkif_front_ring_t ring;
    31.7 -	unsigned int evtchn;
    31.8 +	unsigned int evtchn, irq;
    31.9  	struct xlbd_major_info *mi;
   31.10  	request_queue_t *rq;
   31.11  	struct work_struct work;
    32.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Thu Sep 29 16:22:02 2005 -0600
    32.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Thu Sep 29 17:28:44 2005 -0600
    32.3 @@ -842,7 +842,7 @@ static void make_response(blkif_t *blkif
    32.4  	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
    32.5  
    32.6  	/* Kick the relevant domain. */
    32.7 -	notify_via_evtchn(blkif->evtchn);
    32.8 +	notify_remote_via_irq(blkif->irq);
    32.9  }
   32.10  
   32.11  static struct miscdevice blktap_miscdev = {
    33.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/common.h	Thu Sep 29 16:22:02 2005 -0600
    33.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/common.h	Thu Sep 29 17:28:44 2005 -0600
    33.3 @@ -46,7 +46,7 @@ typedef struct blkif_st {
    33.4  	unsigned int      handle;
    33.5  	/* Physical parameters of the comms window. */
    33.6  	unsigned int      evtchn;
    33.7 -	unsigned int      remote_evtchn;
    33.8 +	unsigned int      irq;
    33.9  	/* Comms information. */
   33.10  	blkif_back_ring_t blk_ring;
   33.11  	struct vm_struct *blk_ring_area;
    34.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c	Thu Sep 29 16:22:02 2005 -0600
    34.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c	Thu Sep 29 17:28:44 2005 -0600
    34.3 @@ -71,8 +71,6 @@ int blkif_map(blkif_t *blkif, unsigned l
    34.4  	evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
    34.5  	int err;
    34.6  
    34.7 -	BUG_ON(blkif->remote_evtchn);
    34.8 -
    34.9  	if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
   34.10  		return -ENOMEM;
   34.11  
   34.12 @@ -93,35 +91,26 @@ int blkif_map(blkif_t *blkif, unsigned l
   34.13  		return err;
   34.14  	}
   34.15  
   34.16 -
   34.17  	blkif->evtchn = op.u.bind_interdomain.port1;
   34.18 -	blkif->remote_evtchn = evtchn;
   34.19  
   34.20  	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
   34.21  	SHARED_RING_INIT(sring);
   34.22  	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
   34.23  
   34.24 -	bind_evtchn_to_irqhandler(
   34.25 +	blkif->irq = bind_evtchn_to_irqhandler(
   34.26  		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
   34.27 -	blkif->status        = CONNECTED;
   34.28 +
   34.29 +	blkif->status = CONNECTED;
   34.30  
   34.31  	return 0;
   34.32  }
   34.33  
   34.34  static void free_blkif(void *arg)
   34.35  {
   34.36 -	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   34.37  	blkif_t *blkif = (blkif_t *)arg;
   34.38  
   34.39 -	op.u.close.port = blkif->evtchn;
   34.40 -	op.u.close.dom = DOMID_SELF;
   34.41 -	HYPERVISOR_event_channel_op(&op);
   34.42 -	op.u.close.port = blkif->remote_evtchn;
   34.43 -	op.u.close.dom = blkif->domid;
   34.44 -	HYPERVISOR_event_channel_op(&op);
   34.45 -
   34.46 -	if (blkif->evtchn)
   34.47 -		unbind_evtchn_from_irqhandler(blkif->evtchn, blkif);
   34.48 +	if (blkif->irq)
   34.49 +		unbind_evtchn_from_irqhandler(blkif->irq, blkif);
   34.50  
   34.51  	if (blkif->blk_ring.sring) {
   34.52  		unmap_frontend_page(blkif);
    35.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Thu Sep 29 16:22:02 2005 -0600
    35.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Thu Sep 29 17:28:44 2005 -0600
    35.3 @@ -21,7 +21,6 @@
    35.4  #include <linux/err.h>
    35.5  #include "xencons_ring.h"
    35.6  
    35.7 -
    35.8  struct ring_head
    35.9  {
   35.10  	u32 cons;
   35.11 @@ -29,6 +28,7 @@ struct ring_head
   35.12  	char buf[0];
   35.13  } __attribute__((packed));
   35.14  
   35.15 +static int xencons_irq;
   35.16  
   35.17  #define XENCONS_RING_SIZE (PAGE_SIZE/2 - sizeof (struct ring_head))
   35.18  #define XENCONS_IDX(cnt) ((cnt) % XENCONS_RING_SIZE)
   35.19 @@ -46,7 +46,8 @@ static inline struct ring_head *inring(v
   35.20  
   35.21  
   35.22  /* don't block -  write as much as possible and return */
   35.23 -static int __xencons_ring_send(struct ring_head *ring, const char *data, unsigned len)
   35.24 +static int __xencons_ring_send(
   35.25 +	struct ring_head *ring, const char *data, unsigned len)
   35.26  {
   35.27  	int copied = 0;
   35.28  
   35.29 @@ -63,13 +64,9 @@ static int __xencons_ring_send(struct ri
   35.30  
   35.31  int xencons_ring_send(const char *data, unsigned len)
   35.32  {
   35.33 -	struct ring_head *out = outring();
   35.34 -	int sent = 0;
   35.35 -	
   35.36 -	sent = __xencons_ring_send(out, data, len);
   35.37 -	notify_via_evtchn(xen_start_info->console_evtchn);
   35.38 +	int sent = __xencons_ring_send(outring(), data, len);
   35.39 +	notify_remote_via_irq(xencons_irq);
   35.40  	return sent;
   35.41 -
   35.42  }	
   35.43  
   35.44  
   35.45 @@ -97,32 +94,28 @@ int xencons_ring_init(void)
   35.46  {
   35.47  	int err;
   35.48  
   35.49 +	if (xencons_irq)
   35.50 +		unbind_evtchn_from_irqhandler(xencons_irq, inring());
   35.51 +	xencons_irq = 0;
   35.52 +
   35.53  	if (!xen_start_info->console_evtchn)
   35.54  		return 0;
   35.55  
   35.56 -	err = bind_evtchn_to_irqhandler(xen_start_info->console_evtchn,
   35.57 -					handle_input, 0, "xencons", inring());
   35.58 -	if (err) {
   35.59 +	err = bind_evtchn_to_irqhandler(
   35.60 +		xen_start_info->console_evtchn,
   35.61 +		handle_input, 0, "xencons", inring());
   35.62 +	if (err <= 0) {
   35.63  		xprintk("XEN console request irq failed %i\n", err);
   35.64  		return err;
   35.65  	}
   35.66  
   35.67 +	xencons_irq = err;
   35.68 +
   35.69  	return 0;
   35.70  }
   35.71  
   35.72 -void xencons_suspend(void)
   35.73 -{
   35.74 -
   35.75 -	if (!xen_start_info->console_evtchn)
   35.76 -		return;
   35.77 -
   35.78 -	unbind_evtchn_from_irqhandler(xen_start_info->console_evtchn,
   35.79 -				      inring());
   35.80 -}
   35.81 -
   35.82  void xencons_resume(void)
   35.83  {
   35.84 -
   35.85  	(void)xencons_ring_init();
   35.86  }
   35.87  
    36.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Thu Sep 29 16:22:02 2005 -0600
    36.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Thu Sep 29 17:28:44 2005 -0600
    36.3 @@ -22,8 +22,6 @@
    36.4  #include <asm-xen/gnttab.h>
    36.5  #include <asm-xen/driver_util.h>
    36.6  
    36.7 -#define GRANT_INVALID_REF (0xFFFF)
    36.8 -
    36.9  #if 0
   36.10  #define ASSERT(_p) \
   36.11      if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
   36.12 @@ -52,7 +50,7 @@ typedef struct netif_st {
   36.13  	u16              rx_shmem_handle;
   36.14  	grant_ref_t      rx_shmem_ref; 
   36.15  	unsigned int     evtchn;
   36.16 -	unsigned int     remote_evtchn;
   36.17 +	unsigned int     irq;
   36.18  
   36.19  	/* The shared rings and indexes. */
   36.20  	netif_tx_interface_t *tx;
    37.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Thu Sep 29 16:22:02 2005 -0600
    37.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Thu Sep 29 17:28:44 2005 -0600
    37.3 @@ -15,18 +15,17 @@ static void __netif_up(netif_t *netif)
    37.4  	spin_lock_bh(&dev->xmit_lock);
    37.5  	netif->active = 1;
    37.6  	spin_unlock_bh(&dev->xmit_lock);
    37.7 -	(void)bind_evtchn_to_irqhandler(
    37.8 -		netif->evtchn, netif_be_int, 0, dev->name, netif);
    37.9 +	enable_irq(netif->irq);
   37.10  	netif_schedule_work(netif);
   37.11  }
   37.12  
   37.13  static void __netif_down(netif_t *netif)
   37.14  {
   37.15  	struct net_device *dev = netif->dev;
   37.16 +	disable_irq(netif->irq);
   37.17  	spin_lock_bh(&dev->xmit_lock);
   37.18  	netif->active = 0;
   37.19  	spin_unlock_bh(&dev->xmit_lock);
   37.20 -	unbind_evtchn_from_irqhandler(netif->evtchn, netif);
   37.21  	netif_deschedule_work(netif);
   37.22  }
   37.23  
   37.24 @@ -203,7 +202,10 @@ int netif_map(netif_t *netif, unsigned l
   37.25  	}
   37.26  
   37.27  	netif->evtchn = op.u.bind_interdomain.port1;
   37.28 -	netif->remote_evtchn = evtchn;
   37.29 +
   37.30 +	netif->irq = bind_evtchn_to_irqhandler(
   37.31 +		netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
   37.32 +	disable_irq(netif->irq);
   37.33  
   37.34  	netif->tx = (netif_tx_interface_t *)netif->comms_area->addr;
   37.35  	netif->rx = (netif_rx_interface_t *)
   37.36 @@ -224,21 +226,15 @@ int netif_map(netif_t *netif, unsigned l
   37.37  
   37.38  static void free_netif_callback(void *arg)
   37.39  {
   37.40 -	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   37.41  	netif_t *netif = (netif_t *)arg;
   37.42  
   37.43  	/*
   37.44 -	 * These can't be done in netif_disconnect() because at that point
   37.45 +	 * This can't be done in netif_disconnect() because at that point
   37.46  	 * there may be outstanding requests in the network stack whose
   37.47  	 * asynchronous responses must still be notified to the remote driver.
   37.48  	 */
   37.49 -
   37.50 -	op.u.close.port = netif->evtchn;
   37.51 -	op.u.close.dom = DOMID_SELF;
   37.52 -	HYPERVISOR_event_channel_op(&op);
   37.53 -	op.u.close.port = netif->remote_evtchn;
   37.54 -	op.u.close.dom = netif->domid;
   37.55 -	HYPERVISOR_event_channel_op(&op);
   37.56 +	if (netif->irq)
   37.57 +		unbind_evtchn_from_irqhandler(netif->irq, netif);
   37.58  
   37.59  	unregister_netdev(netif->dev);
   37.60  
    38.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Sep 29 16:22:02 2005 -0600
    38.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Thu Sep 29 17:28:44 2005 -0600
    38.3 @@ -42,7 +42,7 @@ static multicall_entry_t rx_mcl[NETIF_RX
    38.4  static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
    38.5  
    38.6  static gnttab_transfer_t grant_rx_op[MAX_PENDING_REQS];
    38.7 -static unsigned char rx_notify[NR_EVENT_CHANNELS];
    38.8 +static unsigned char rx_notify[NR_IRQS];
    38.9  
   38.10  /* Don't currently gate addition of an interface to the tx scheduling list. */
   38.11  #define tx_work_exists(_if) (1)
   38.12 @@ -209,7 +209,7 @@ static void net_rx_action(unsigned long 
   38.13  {
   38.14  	netif_t *netif = NULL; 
   38.15  	s8 status;
   38.16 -	u16 size, id, evtchn;
   38.17 +	u16 size, id, irq;
   38.18  	multicall_entry_t *mcl;
   38.19  	mmu_update_t *mmu;
   38.20  	gnttab_transfer_t *gop;
   38.21 @@ -320,16 +320,16 @@ static void net_rx_action(unsigned long 
   38.22  				gop->status, netif->domid);
   38.23  			/* XXX SMH: should free 'old_mfn' here */
   38.24  			status = NETIF_RSP_ERROR; 
   38.25 -		} 
   38.26 -		evtchn = netif->evtchn;
   38.27 +		}
   38.28 +		irq = netif->irq;
   38.29  		id = netif->rx->ring[
   38.30  			MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
   38.31  		if (make_rx_response(netif, id, status,
   38.32  				     (unsigned long)skb->data & ~PAGE_MASK,
   38.33  				     size, skb->proto_csum_valid) &&
   38.34 -		    (rx_notify[evtchn] == 0)) {
   38.35 -			rx_notify[evtchn] = 1;
   38.36 -			notify_list[notify_nr++] = evtchn;
   38.37 +		    (rx_notify[irq] == 0)) {
   38.38 +			rx_notify[irq] = 1;
   38.39 +			notify_list[notify_nr++] = irq;
   38.40  		}
   38.41  
   38.42  		netif_put(netif);
   38.43 @@ -339,9 +339,9 @@ static void net_rx_action(unsigned long 
   38.44  	}
   38.45  
   38.46  	while (notify_nr != 0) {
   38.47 -		evtchn = notify_list[--notify_nr];
   38.48 -		rx_notify[evtchn] = 0;
   38.49 -		notify_via_evtchn(evtchn);
   38.50 +		irq = notify_list[--notify_nr];
   38.51 +		rx_notify[irq] = 0;
   38.52 +		notify_remote_via_irq(irq);
   38.53  	}
   38.54  
   38.55  	/* More work to do? */
   38.56 @@ -434,7 +434,6 @@ inline static void net_tx_action_dealloc
   38.57  		gop->host_addr    = MMAP_VADDR(pending_idx);
   38.58  		gop->dev_bus_addr = 0;
   38.59  		gop->handle       = grant_tx_ref[pending_idx];
   38.60 -		grant_tx_ref[pending_idx] = GRANT_INVALID_REF;
   38.61  		gop++;
   38.62  	}
   38.63  	BUG_ON(HYPERVISOR_grant_table_op(
   38.64 @@ -718,7 +717,7 @@ static void make_tx_response(netif_t *ne
   38.65  
   38.66  	mb(); /* Update producer before checking event threshold. */
   38.67  	if (i == netif->tx->event)
   38.68 -		notify_via_evtchn(netif->evtchn);
   38.69 +		notify_remote_via_irq(netif->irq);
   38.70  }
   38.71  
   38.72  static int make_rx_response(netif_t *netif, 
    39.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Sep 29 16:22:02 2005 -0600
    39.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Thu Sep 29 17:28:44 2005 -0600
    39.3 @@ -1,7 +1,7 @@
    39.4  /******************************************************************************
    39.5   * Virtual network driver for conversing with remote driver backends.
    39.6   * 
    39.7 - * Copyright (c) 2002-2004, K A Fraser
    39.8 + * Copyright (c) 2002-2005, K A Fraser
    39.9   * 
   39.10   * This file may be distributed separately from the Linux kernel, or
   39.11   * incorporated into other software packages, subject to the following license:
   39.12 @@ -57,7 +57,7 @@
   39.13  #include <asm-xen/xen-public/grant_table.h>
   39.14  #include <asm-xen/gnttab.h>
   39.15  
   39.16 -#define GRANT_INVALID_REF	(0xFFFF)
   39.17 +#define GRANT_INVALID_REF	0
   39.18  
   39.19  #ifndef __GFP_NOWARN
   39.20  #define __GFP_NOWARN 0
   39.21 @@ -127,7 +127,7 @@ struct net_private
   39.22  	spinlock_t   rx_lock;
   39.23  
   39.24  	unsigned int handle;
   39.25 -	unsigned int evtchn;
   39.26 +	unsigned int evtchn, irq;
   39.27  
   39.28  	/* What is the status of our connection to the remote backend? */
   39.29  #define BEST_CLOSED       0
   39.30 @@ -457,7 +457,7 @@ static int network_start_xmit(struct sk_
   39.31  	/* Only notify Xen if we really have to. */
   39.32  	mb();
   39.33  	if (np->tx->TX_TEST_IDX == i)
   39.34 -		notify_via_evtchn(np->evtchn);
   39.35 +		notify_remote_via_irq(np->irq);
   39.36  
   39.37  	return 0;
   39.38  
   39.39 @@ -700,6 +700,7 @@ static void network_connect(struct net_d
   39.40  	struct net_private *np;
   39.41  	int i, requeue_idx;
   39.42  	netif_tx_request_t *tx;
   39.43 +	struct sk_buff *skb;
   39.44  
   39.45  	np = netdev_priv(dev);
   39.46  	spin_lock_irq(&np->tx_lock);
   39.47 @@ -711,7 +712,8 @@ static void network_connect(struct net_d
   39.48  	np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
   39.49  	np->rx->event = np->tx->event = 1;
   39.50  
   39.51 -	/* Step 2: Rebuild the RX and TX ring contents.
   39.52 +	/*
   39.53 +	 * Step 2: Rebuild the RX and TX ring contents.
   39.54  	 * NB. We could just free the queued TX packets now but we hope
   39.55  	 * that sending them out might do some good.  We have to rebuild
   39.56  	 * the RX ring because some of our pages are currently flipped out
   39.57 @@ -722,56 +724,59 @@ static void network_connect(struct net_d
   39.58  	 * them.
   39.59  	 */
   39.60  
   39.61 -	/* Rebuild the TX buffer freelist and the TX ring itself.
   39.62 +	/*
   39.63 +	 * Rebuild the TX buffer freelist and the TX ring itself.
   39.64  	 * NB. This reorders packets.  We could keep more private state
   39.65  	 * to avoid this but maybe it doesn't matter so much given the
   39.66  	 * interface has been down.
   39.67  	 */
   39.68  	for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
   39.69 -		if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
   39.70 -			struct sk_buff *skb = np->tx_skbs[i];
   39.71 +		if ((unsigned long)np->tx_skbs[i] < __PAGE_OFFSET)
   39.72 +			continue;
   39.73  
   39.74 -			tx = &np->tx->ring[requeue_idx++].req;
   39.75 +		skb = np->tx_skbs[i];
   39.76 +
   39.77 +		tx = &np->tx->ring[requeue_idx++].req;
   39.78  
   39.79 -			tx->id   = i;
   39.80 -			gnttab_grant_foreign_access_ref(
   39.81 -				np->grant_tx_ref[i], np->backend_id, 
   39.82 -				virt_to_mfn(np->tx_skbs[i]->data),
   39.83 -				GNTMAP_readonly); 
   39.84 -			tx->gref = np->grant_tx_ref[i];
   39.85 -			tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
   39.86 -			tx->size = skb->len;
   39.87 +		tx->id = i;
   39.88 +		gnttab_grant_foreign_access_ref(
   39.89 +			np->grant_tx_ref[i], np->backend_id, 
   39.90 +			virt_to_mfn(np->tx_skbs[i]->data),
   39.91 +			GNTMAP_readonly); 
   39.92 +		tx->gref = np->grant_tx_ref[i];
   39.93 +		tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
   39.94 +		tx->size = skb->len;
   39.95 +		tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
   39.96  
   39.97 -			np->stats.tx_bytes += skb->len;
   39.98 -			np->stats.tx_packets++;
   39.99 -		}
  39.100 +		np->stats.tx_bytes += skb->len;
  39.101 +		np->stats.tx_packets++;
  39.102  	}
  39.103  	wmb();
  39.104  	np->tx->req_prod = requeue_idx;
  39.105  
  39.106  	/* Rebuild the RX buffer freelist and the RX ring itself. */
  39.107  	for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
  39.108 -		if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
  39.109 -			gnttab_grant_foreign_transfer_ref(
  39.110 -				np->grant_rx_ref[i], np->backend_id);
  39.111 -			np->rx->ring[requeue_idx].req.gref =
  39.112 -				np->grant_rx_ref[i];
  39.113 -			np->rx->ring[requeue_idx].req.id = i;
  39.114 -			requeue_idx++; 
  39.115 -		}
  39.116 +		if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
  39.117 +			continue;
  39.118 +		gnttab_grant_foreign_transfer_ref(
  39.119 +			np->grant_rx_ref[i], np->backend_id);
  39.120 +		np->rx->ring[requeue_idx].req.gref =
  39.121 +			np->grant_rx_ref[i];
  39.122 +		np->rx->ring[requeue_idx].req.id = i;
  39.123 +		requeue_idx++; 
  39.124  	}
  39.125 -
  39.126  	wmb();                
  39.127  	np->rx->req_prod = requeue_idx;
  39.128  
  39.129 -	/* Step 3: All public and private state should now be sane.  Get
  39.130 +	/*
  39.131 +	 * Step 3: All public and private state should now be sane.  Get
  39.132  	 * ready to start sending and receiving packets and give the driver
  39.133  	 * domain a kick because we've probably just requeued some
  39.134  	 * packets.
  39.135  	 */
  39.136  	np->backend_state = BEST_CONNECTED;
  39.137  	wmb();
  39.138 -	notify_via_evtchn(np->evtchn);  
  39.139 +	notify_remote_via_irq(np->irq);
  39.140  	network_tx_buf_gc(dev);
  39.141  
  39.142  	if (np->user_state == UST_OPEN)
  39.143 @@ -798,7 +803,8 @@ static void show_device(struct net_priva
  39.144  #endif
  39.145  }
  39.146  
  39.147 -/* Move the vif into connected state.
  39.148 +/*
  39.149 + * Move the vif into connected state.
  39.150   * Sets the mac and event channel from the message.
  39.151   * Binds the irq to the event channel.
  39.152   */
  39.153 @@ -809,7 +815,7 @@ connect_device(struct net_private *np, u
  39.154  	memcpy(dev->dev_addr, np->mac, ETH_ALEN);
  39.155  	np->evtchn = evtchn;
  39.156  	network_connect(dev);
  39.157 -	(void)bind_evtchn_to_irqhandler(
  39.158 +	np->irq = bind_evtchn_to_irqhandler(
  39.159  		np->evtchn, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
  39.160  	(void)send_fake_arp(dev);
  39.161  	show_device(np);
  39.162 @@ -1049,12 +1055,12 @@ static void netif_free(struct netfront_i
  39.163  		gnttab_end_foreign_access(info->rx_ring_ref, 0);
  39.164  	info->rx_ring_ref = GRANT_INVALID_REF;
  39.165  
  39.166 -	unbind_evtchn_from_irqhandler(info->evtchn, info->netdev);
  39.167 -	info->evtchn = 0;
  39.168 +	if (info->irq)
  39.169 +		unbind_evtchn_from_irqhandler(info->irq, info->netdev);
  39.170 +	info->evtchn = info->irq = 0;
  39.171  }
  39.172  
  39.173 -/* Stop network device and free tx/rx queues and irq.
  39.174 - */
  39.175 +/* Stop network device and free tx/rx queues and irq. */
  39.176  static void shutdown_device(struct net_private *np)
  39.177  {
  39.178  	/* Stop old i/f to prevent errors whilst we rebuild the state. */
  39.179 @@ -1148,17 +1154,6 @@ again:
  39.180  		goto abort_transaction;
  39.181  	}
  39.182  
  39.183 -	info->backend = backend;
  39.184 -	backend = NULL;
  39.185 -
  39.186 -	info->watch.node = info->backend;
  39.187 -	info->watch.callback = watch_for_status;
  39.188 -	err = register_xenbus_watch(&info->watch);
  39.189 -	if (err) {
  39.190 -		message = "registering watch on backend";
  39.191 -		goto abort_transaction;
  39.192 -	}
  39.193 -
  39.194  	err = xenbus_transaction_end(0);
  39.195  	if (err) {
  39.196  		if (err == -EAGAIN)
  39.197 @@ -1167,12 +1162,19 @@ again:
  39.198  		goto destroy_ring;
  39.199  	}
  39.200  
  39.201 +	info->watch.node = backend;
  39.202 +	info->watch.callback = watch_for_status;
  39.203 +	err = register_xenbus_watch(&info->watch);
  39.204 +	if (err) {
  39.205 +		message = "registering watch on backend";
  39.206 +		goto destroy_ring;
  39.207 +	}
  39.208 +
  39.209 +	info->backend = backend;
  39.210 +
  39.211  	netif_state = NETIF_STATE_CONNECTED;
  39.212  
  39.213 - out:
  39.214 -	if (backend)
  39.215 -		kfree(backend);
  39.216 -	return err;
  39.217 +	return 0;
  39.218  
  39.219   abort_transaction:
  39.220  	xenbus_transaction_end(1);
  39.221 @@ -1180,13 +1182,17 @@ again:
  39.222  	xenbus_dev_error(dev, err, "%s", message);
  39.223   destroy_ring:
  39.224  	shutdown_device(info);
  39.225 -	goto out;
  39.226 + out:
  39.227 +	if (backend)
  39.228 +		kfree(backend);
  39.229 +	return err;
  39.230  }
  39.231  
  39.232 -/* Setup supplies the backend dir, virtual device.
  39.233 -
  39.234 -   We place an event channel and shared frame entries.
  39.235 -   We watch backend to wait if it's ok. */
  39.236 +/*
  39.237 + * Setup supplies the backend dir, virtual device.
  39.238 + * We place an event channel and shared frame entries.
  39.239 + * We watch backend to wait if it's ok.
  39.240 + */
  39.241  static int netfront_probe(struct xenbus_device *dev,
  39.242  			  const struct xenbus_device_id *id)
  39.243  {
  39.244 @@ -1241,24 +1247,17 @@ static int netfront_remove(struct xenbus
  39.245  static int netfront_suspend(struct xenbus_device *dev)
  39.246  {
  39.247  	struct netfront_info *info = dev->data;
  39.248 -
  39.249  	unregister_xenbus_watch(&info->watch);
  39.250  	kfree(info->backend);
  39.251  	info->backend = NULL;
  39.252 -
  39.253 -	netif_free(info);
  39.254 -
  39.255  	return 0;
  39.256  }
  39.257  
  39.258  static int netfront_resume(struct xenbus_device *dev)
  39.259  {
  39.260 -	struct net_private *np = dev->data;
  39.261 -	int err;
  39.262 -
  39.263 -	err = talk_to_backend(dev, np);
  39.264 -
  39.265 -	return err;
  39.266 +	struct netfront_info *info = dev->data;
  39.267 +	netif_free(info);
  39.268 +	return talk_to_backend(dev, info);
  39.269  }
  39.270  
  39.271  static struct xenbus_driver netfront = {
    40.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu Sep 29 16:22:02 2005 -0600
    40.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Thu Sep 29 17:28:44 2005 -0600
    40.3 @@ -35,7 +35,7 @@ typedef struct tpmif_st {
    40.4  
    40.5  	/* Physical parameters of the comms window. */
    40.6  	unsigned int evtchn;
    40.7 -	unsigned int remote_evtchn;
    40.8 +	unsigned int irq;
    40.9  
   40.10  	/* The shared rings and indexes. */
   40.11  	tpmif_tx_interface_t *tx;
    41.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Thu Sep 29 16:22:02 2005 -0600
    41.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Thu Sep 29 17:28:44 2005 -0600
    41.3 @@ -120,8 +120,6 @@ tpmif_map(tpmif_t *tpmif, unsigned long 
    41.4  	evtchn_op_t op = {.cmd = EVTCHNOP_bind_interdomain };
    41.5  	int err;
    41.6  
    41.7 -	BUG_ON(tpmif->remote_evtchn);
    41.8 -
    41.9  	if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
   41.10  		return -ENOMEM;
   41.11  
   41.12 @@ -143,12 +141,11 @@ tpmif_map(tpmif_t *tpmif, unsigned long 
   41.13  	}
   41.14  
   41.15  	tpmif->evtchn = op.u.bind_interdomain.port1;
   41.16 -	tpmif->remote_evtchn = evtchn;
   41.17  
   41.18  	tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
   41.19  
   41.20 -	bind_evtchn_to_irqhandler(tpmif->evtchn,
   41.21 -				  tpmif_be_int, 0, "tpmif-backend", tpmif);
   41.22 +	tpmif->irq = bind_evtchn_to_irqhandler(
   41.23 +		tpmif->evtchn, tpmif_be_int, 0, "tpmif-backend", tpmif);
   41.24  	tpmif->status = CONNECTED;
   41.25  	tpmif->shmem_ref = shared_page;
   41.26  	tpmif->active = 1;
   41.27 @@ -159,18 +156,10 @@ tpmif_map(tpmif_t *tpmif, unsigned long 
   41.28  static void
   41.29  __tpmif_disconnect_complete(void *arg)
   41.30  {
   41.31 -	evtchn_op_t op = {.cmd = EVTCHNOP_close };
   41.32  	tpmif_t *tpmif = (tpmif_t *) arg;
   41.33  
   41.34 -	op.u.close.port = tpmif->evtchn;
   41.35 -	op.u.close.dom = DOMID_SELF;
   41.36 -	HYPERVISOR_event_channel_op(&op);
   41.37 -	op.u.close.port = tpmif->remote_evtchn;
   41.38 -	op.u.close.dom = tpmif->domid;
   41.39 -	HYPERVISOR_event_channel_op(&op);
   41.40 -
   41.41 -	if (tpmif->evtchn)
   41.42 -		unbind_evtchn_from_irqhandler(tpmif->evtchn, tpmif);
   41.43 +	if (tpmif->irq)
   41.44 +		unbind_evtchn_from_irqhandler(tpmif->irq, tpmif);
   41.45  
   41.46  	if (tpmif->tx) {
   41.47  		unmap_frontend_page(tpmif);
    42.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu Sep 29 16:22:02 2005 -0600
    42.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Thu Sep 29 17:28:44 2005 -0600
    42.3 @@ -308,7 +308,7 @@ static int
    42.4  	rc = offset;
    42.5  	DPRINTK("Notifying frontend via event channel %d\n",
    42.6  	        tpmif->evtchn);
    42.7 -	notify_via_evtchn(tpmif->evtchn);
    42.8 +	notify_remote_via_irq(tpmif->irq);
    42.9  
   42.10  	return rc;
   42.11  }
    43.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Thu Sep 29 16:22:02 2005 -0600
    43.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Thu Sep 29 17:28:44 2005 -0600
    43.3 @@ -88,6 +88,26 @@ static void frontend_changed(struct xenb
    43.4  		return;
    43.5  	}
    43.6  
    43.7 +	err = tpmif_map(be->tpmif, ringref, evtchn);
    43.8 +	if (err) {
    43.9 +		xenbus_dev_error(be->dev, err,
   43.10 +				 "mapping shared-frame %lu port %u",
   43.11 +				 ringref, evtchn);
   43.12 +		return;
   43.13 +	}
   43.14 +
   43.15 +	err = tpmif_vtpm_open(be->tpmif,
   43.16 +	                      be->frontend_id,
   43.17 +	                      be->instance);
   43.18 +	if (err) {
   43.19 +		xenbus_dev_error(be->dev, err,
   43.20 +		                 "queueing vtpm open packet");
   43.21 +		/*
   43.22 +		 * Should close down this device and notify FE
   43.23 +		 * about closure.
   43.24 +		 */
   43.25 +		return;
   43.26 +	}
   43.27  
   43.28  	/*
   43.29  	 * Tell the front-end that we are ready to go -
   43.30 @@ -107,27 +127,6 @@ again:
   43.31  		goto abort;
   43.32  	}
   43.33  
   43.34 -	err = tpmif_map(be->tpmif, ringref, evtchn);
   43.35 -	if (err) {
   43.36 -		xenbus_dev_error(be->dev, err,
   43.37 -				 "mapping shared-frame %lu port %u",
   43.38 -				 ringref, evtchn);
   43.39 -		goto abort;
   43.40 -	}
   43.41 -
   43.42 -	err = tpmif_vtpm_open(be->tpmif,
   43.43 -	                      be->frontend_id,
   43.44 -	                      be->instance);
   43.45 -	if (err) {
   43.46 -		xenbus_dev_error(be->dev, err,
   43.47 -		                 "queueing vtpm open packet");
   43.48 -		/*
   43.49 -		 * Should close down this device and notify FE
   43.50 -		 * about closure.
   43.51 -		 */
   43.52 -		goto abort;
   43.53 -	}
   43.54 -
   43.55  	err = xenbus_transaction_end(0);
   43.56  	if (err == -EAGAIN)
   43.57  		goto again;
    44.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu Sep 29 16:22:02 2005 -0600
    44.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu Sep 29 17:28:44 2005 -0600
    44.3 @@ -292,8 +292,10 @@ static void destroy_tpmring(struct tpmfr
    44.4  		free_page((unsigned long)tp->tx);
    44.5  		tp->tx = NULL;
    44.6  	}
    44.7 -	unbind_evtchn_from_irqhandler(tp->evtchn, NULL);
    44.8 -	tp->evtchn = 0;
    44.9 +
   44.10 +	if (tpm->irq)
   44.11 +		unbind_evtchn_from_irqhandler(tp->irq, NULL);
   44.12 +	tp->evtchn = tpm->irq = 0;
   44.13  }
   44.14  
   44.15  
   44.16 @@ -352,17 +354,6 @@ again:
   44.17  		goto abort_transaction;
   44.18  	}
   44.19  
   44.20 -	info->backend = backend;
   44.21 -	backend = NULL;
   44.22 -
   44.23 -	info->watch.node = info->backend;
   44.24 -	info->watch.callback = watch_for_status;
   44.25 -	err = register_xenbus_watch(&info->watch);
   44.26 -	if (err) {
   44.27 -		message = "registering watch on backend";
   44.28 -		goto abort_transaction;
   44.29 -	}
   44.30 -
   44.31  	err = xenbus_transaction_end(0);
   44.32  	if (err == -EAGAIN)
   44.33  		goto again;
   44.34 @@ -371,10 +362,17 @@ again:
   44.35  		goto destroy_tpmring;
   44.36  	}
   44.37  
   44.38 -out:
   44.39 -	if (backend)
   44.40 -		kfree(backend);
   44.41 -	return err;
   44.42 +	info->watch.node = backend;
   44.43 +	info->watch.callback = watch_for_status;
   44.44 +	err = register_xenbus_watch(&info->watch);
   44.45 +	if (err) {
   44.46 +		message = "registering watch on backend";
   44.47 +		goto destroy_tpmring;
   44.48 +	}
   44.49 +
   44.50 +	info->backend = backend;
   44.51 +
   44.52 +	return 0;
   44.53  
   44.54  abort_transaction:
   44.55  	xenbus_transaction_end(1);
   44.56 @@ -382,7 +380,10 @@ abort_transaction:
   44.57  	xenbus_dev_error(dev, err, "%s", message);
   44.58  destroy_tpmring:
   44.59  	destroy_tpmring(info, &my_private);
   44.60 -	goto out;
   44.61 +out:
   44.62 +	if (backend)
   44.63 +		kfree(backend);
   44.64 +	return err;
   44.65  }
   44.66  
   44.67  
   44.68 @@ -502,10 +503,12 @@ static void tpmif_connect(u16 evtchn, do
   44.69  	err = bind_evtchn_to_irqhandler(
   44.70  		tp->evtchn,
   44.71  		tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
   44.72 -	if ( err != 0 ) {
   44.73 +	if ( err <= 0 ) {
   44.74  		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
   44.75  		return;
   44.76  	}
   44.77 +
   44.78 +	tp->irq = err;
   44.79  }
   44.80  
   44.81  static struct xenbus_device_id tpmfront_ids[] = {
   44.82 @@ -679,7 +682,7 @@ tpm_xmit(struct tpm_private *tp,
   44.83  	DPRINTK("Notifying backend via event channel %d\n",
   44.84  	        tp->evtchn);
   44.85  
   44.86 -	notify_via_evtchn(tp->evtchn);
   44.87 +	notify_remote_via_irq(tp->irq);
   44.88  
   44.89  	spin_unlock_irq(&tp->tx_lock);
   44.90  	return offset;
    45.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu Sep 29 16:22:02 2005 -0600
    45.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu Sep 29 17:28:44 2005 -0600
    45.3 @@ -5,7 +5,7 @@
    45.4  struct tpm_private
    45.5  {
    45.6  	tpmif_tx_interface_t *tx;
    45.7 -	unsigned int evtchn;
    45.8 +	unsigned int evtchn, irq;
    45.9  	int connected;
   45.10  
   45.11  	spinlock_t tx_lock;
    46.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Thu Sep 29 16:22:02 2005 -0600
    46.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Thu Sep 29 17:28:44 2005 -0600
    46.3 @@ -44,6 +44,8 @@ struct ringbuf_head
    46.4  	char buf[0];
    46.5  } __attribute__((packed));
    46.6  
    46.7 +static int xenbus_irq;
    46.8 +
    46.9  DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
   46.10  
   46.11  static inline struct ringbuf_head *outbuf(void)
   46.12 @@ -145,7 +147,7 @@ int xb_write(const void *data, unsigned 
   46.13  		data += avail;
   46.14  		len -= avail;
   46.15  		update_output_chunk(out, avail);
   46.16 -		notify_via_evtchn(xen_start_info->store_evtchn);
   46.17 +		notify_remote_via_irq(xenbus_irq);
   46.18  	} while (len != 0);
   46.19  
   46.20  	return 0;
   46.21 @@ -190,7 +192,7 @@ int xb_read(void *data, unsigned len)
   46.22  		pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
   46.23  		/* If it was full, tell them we've taken some. */
   46.24  		if (was_full)
   46.25 -			notify_via_evtchn(xen_start_info->store_evtchn);
   46.26 +			notify_remote_via_irq(xenbus_irq);
   46.27  	}
   46.28  
   46.29  	/* If we left something, wake watch thread to deal with it. */
   46.30 @@ -205,33 +207,29 @@ int xb_init_comms(void)
   46.31  {
   46.32  	int err;
   46.33  
   46.34 +	if (xenbus_irq)
   46.35 +		unbind_evtchn_from_irqhandler(xenbus_irq, &xb_waitq);
   46.36 +	xenbus_irq = 0;
   46.37 +
   46.38  	if (!xen_start_info->store_evtchn)
   46.39  		return 0;
   46.40  
   46.41  	err = bind_evtchn_to_irqhandler(
   46.42  		xen_start_info->store_evtchn, wake_waiting,
   46.43  		0, "xenbus", &xb_waitq);
   46.44 -	if (err) {
   46.45 +	if (err <= 0) {
   46.46  		printk(KERN_ERR "XENBUS request irq failed %i\n", err);
   46.47 -		unbind_evtchn_from_irq(xen_start_info->store_evtchn);
   46.48  		return err;
   46.49  	}
   46.50  
   46.51 +	xenbus_irq = err;
   46.52 +
   46.53  	/* FIXME zero out page -- domain builder should probably do this*/
   46.54  	memset(mfn_to_virt(xen_start_info->store_mfn), 0, PAGE_SIZE);
   46.55  
   46.56  	return 0;
   46.57  }
   46.58  
   46.59 -void xb_suspend_comms(void)
   46.60 -{
   46.61 -
   46.62 -	if (!xen_start_info->store_evtchn)
   46.63 -		return;
   46.64 -
   46.65 -	unbind_evtchn_from_irqhandler(xen_start_info->store_evtchn, &xb_waitq);
   46.66 -}
   46.67 -
   46.68  /*
   46.69   * Local variables:
   46.70   *  c-file-style: "linux"
    47.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h	Thu Sep 29 16:22:02 2005 -0600
    47.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.h	Thu Sep 29 17:28:44 2005 -0600
    47.3 @@ -30,7 +30,6 @@
    47.4  
    47.5  int xs_init(void);
    47.6  int xb_init_comms(void);
    47.7 -void xb_suspend_comms(void);
    47.8  
    47.9  /* Low level routines. */
   47.10  int xb_write(const void *data, unsigned len);
    48.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Thu Sep 29 16:22:02 2005 -0600
    48.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c	Thu Sep 29 17:28:44 2005 -0600
    48.3 @@ -607,7 +607,6 @@ void xenbus_suspend(void)
    48.4  	down(&xenbus_lock);
    48.5  	bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
    48.6  	bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, suspend_dev);
    48.7 -	xb_suspend_comms();
    48.8  }
    48.9  
   48.10  void xenbus_resume(void)
   48.11 @@ -651,7 +650,6 @@ int do_xenbus_probe(void *unused)
   48.12  	int err = 0;
   48.13  
   48.14  	/* Initialize xenstore comms unless already done. */
   48.15 -	printk("store_evtchn = %i\n", xen_start_info->store_evtchn);
   48.16  	err = xs_init();
   48.17  	if (err) {
   48.18  		printk("XENBUS: Error initializing xenstore comms:"
    50.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h	Thu Sep 29 16:22:02 2005 -0600
    50.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h	Thu Sep 29 17:28:44 2005 -0600
    50.3 @@ -80,11 +80,9 @@
    50.4   * the usable vector space is 0x20-0xff (224 vectors)
    50.5   */
    50.6  
    50.7 -#define NR_IPIS 8
    50.8 -
    50.9 -#define RESCHEDULE_VECTOR	1
   50.10 -#define INVALIDATE_TLB_VECTOR	2
   50.11 -#define CALL_FUNCTION_VECTOR	3
   50.12 +#define RESCHEDULE_VECTOR	0
   50.13 +#define CALL_FUNCTION_VECTOR	1
   50.14 +#define NR_IPIS			2
   50.15  
   50.16  /*
   50.17   * The maximum number of vectors supported by i386 processors
    52.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h	Thu Sep 29 16:22:02 2005 -0600
    52.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h	Thu Sep 29 17:28:44 2005 -0600
    52.3 @@ -78,11 +78,9 @@
    52.4   * the usable vector space is 0x20-0xff (224 vectors)
    52.5   */
    52.6  
    52.7 -#define NR_IPIS 8
    52.8 -
    52.9 -#define RESCHEDULE_VECTOR	1
   52.10 -#define INVALIDATE_TLB_VECTOR	2
   52.11 -#define CALL_FUNCTION_VECTOR	3
   52.12 +#define RESCHEDULE_VECTOR	0
   52.13 +#define CALL_FUNCTION_VECTOR	1
   52.14 +#define NR_IPIS			2
   52.15  
   52.16  /*
   52.17   * The maximum number of vectors supported by i386 processors
    53.1 --- a/linux-2.6-xen-sparse/include/asm-xen/balloon.h	Thu Sep 29 16:22:02 2005 -0600
    53.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/balloon.h	Thu Sep 29 17:28:44 2005 -0600
    53.3 @@ -58,3 +58,13 @@ extern spinlock_t balloon_lock;
    53.4  #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
    53.5  
    53.6  #endif /* __ASM_BALLOON_H__ */
    53.7 +
    53.8 +/*
    53.9 + * Local variables:
   53.10 + *  c-file-style: "linux"
   53.11 + *  indent-tabs-mode: t
   53.12 + *  c-indent-level: 8
   53.13 + *  c-basic-offset: 8
   53.14 + *  tab-width: 8
   53.15 + * End:
   53.16 + */
    54.1 --- a/linux-2.6-xen-sparse/include/asm-xen/driver_util.h	Thu Sep 29 16:22:02 2005 -0600
    54.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/driver_util.h	Thu Sep 29 17:28:44 2005 -0600
    54.3 @@ -14,3 +14,13 @@ extern void lock_vm_area(struct vm_struc
    54.4  extern void unlock_vm_area(struct vm_struct *area);
    54.5  
    54.6  #endif /* __ASM_XEN_DRIVER_UTIL_H__ */
    54.7 +
    54.8 +/*
    54.9 + * Local variables:
   54.10 + *  c-file-style: "linux"
   54.11 + *  indent-tabs-mode: t
   54.12 + *  c-indent-level: 8
   54.13 + *  c-basic-offset: 8
   54.14 + *  tab-width: 8
   54.15 + * End:
   54.16 + */
    55.1 --- a/linux-2.6-xen-sparse/include/asm-xen/evtchn.h	Thu Sep 29 16:22:02 2005 -0600
    55.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h	Thu Sep 29 17:28:44 2005 -0600
    55.3 @@ -4,7 +4,7 @@
    55.4   * Communication via Xen event channels.
    55.5   * Also definitions for the device that demuxes notifications to userspace.
    55.6   * 
    55.7 - * Copyright (c) 2004, K A Fraser
    55.8 + * Copyright (c) 2004-2005, K A Fraser
    55.9   * 
   55.10   * This file may be distributed separately from the Linux kernel, or
   55.11   * incorporated into other software packages, subject to the following license:
   55.12 @@ -51,24 +51,36 @@ extern void unbind_virq_from_irq(int vir
   55.13  extern int  bind_ipi_to_irq(int ipi);
   55.14  extern void unbind_ipi_from_irq(int ipi);
   55.15  
   55.16 -/* Dynamically bind an event-channel port to Linux IRQ space. */
   55.17 +/*
   55.18 + * Dynamically bind an event-channel port to Linux IRQ space.
   55.19 + * BIND:   Returns IRQ or error.
   55.20 + * UNBIND: Takes IRQ to unbind from; automatically closes the event channel.
   55.21 + */
   55.22  extern int  bind_evtchn_to_irq(unsigned int evtchn);
   55.23 -extern void unbind_evtchn_from_irq(unsigned int evtchn);
   55.24 +extern void unbind_evtchn_from_irq(unsigned int irq);
   55.25  
   55.26  /*
   55.27   * Dynamically bind an event-channel port to an IRQ-like callback handler.
   55.28   * On some platforms this may not be implemented via the Linux IRQ subsystem.
   55.29 - * You *cannot* trust the irq argument passed to the callback handler.
   55.30 + * The IRQ argument passed to the callback handler is the same as returned
   55.31 + * from the bind call. It may not correspond to a Linux IRQ number.
   55.32 + * BIND:   Returns IRQ or error.
   55.33 + * UNBIND: Takes IRQ to unbind from; automatically closes the event channel.
   55.34   */
   55.35  extern int  bind_evtchn_to_irqhandler(
   55.36 -    unsigned int evtchn,
   55.37 -    irqreturn_t (*handler)(int, void *, struct pt_regs *),
   55.38 -    unsigned long irqflags,
   55.39 -    const char *devname,
   55.40 -    void *dev_id);
   55.41 -extern void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id);
   55.42 +	unsigned int evtchn,
   55.43 +	irqreturn_t (*handler)(int, void *, struct pt_regs *),
   55.44 +	unsigned long irqflags,
   55.45 +	const char *devname,
   55.46 +	void *dev_id);
   55.47 +extern void unbind_evtchn_from_irqhandler(unsigned int irq, void *dev_id);
   55.48  
   55.49 -extern void irq_suspend(void);
   55.50 +/*
   55.51 + * Unlike notify_remote_via_evtchn(), this is safe to use across
   55.52 + * save/restore. Notifications on a broken connection are silently dropped.
   55.53 + */
   55.54 +void notify_remote_via_irq(int irq);
   55.55 +
   55.56  extern void irq_resume(void);
   55.57  
   55.58  /* Entry point for notifications into Linux subsystems. */
   55.59 @@ -79,42 +91,42 @@ void evtchn_device_upcall(int port);
   55.60  
   55.61  static inline void mask_evtchn(int port)
   55.62  {
   55.63 -    shared_info_t *s = HYPERVISOR_shared_info;
   55.64 -    synch_set_bit(port, &s->evtchn_mask[0]);
   55.65 +	shared_info_t *s = HYPERVISOR_shared_info;
   55.66 +	synch_set_bit(port, &s->evtchn_mask[0]);
   55.67  }
   55.68  
   55.69  static inline void unmask_evtchn(int port)
   55.70  {
   55.71 -    shared_info_t *s = HYPERVISOR_shared_info;
   55.72 -    vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
   55.73 +	shared_info_t *s = HYPERVISOR_shared_info;
   55.74 +	vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
   55.75  
   55.76 -    synch_clear_bit(port, &s->evtchn_mask[0]);
   55.77 +	synch_clear_bit(port, &s->evtchn_mask[0]);
   55.78  
   55.79 -    /*
   55.80 -     * The following is basically the equivalent of 'hw_resend_irq'. Just like
   55.81 -     * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
   55.82 -     */
   55.83 -    if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
   55.84 -         !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel) )
   55.85 -    {
   55.86 -        vcpu_info->evtchn_upcall_pending = 1;
   55.87 -        if ( !vcpu_info->evtchn_upcall_mask )
   55.88 -            force_evtchn_callback();
   55.89 -    }
   55.90 +	/*
   55.91 +	 * The following is basically the equivalent of 'hw_resend_irq'. Just
   55.92 +	 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
   55.93 +	 * masked.
   55.94 +	 */
   55.95 +	if (synch_test_bit         (port,    &s->evtchn_pending[0]) && 
   55.96 +	    !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel)) {
   55.97 +		vcpu_info->evtchn_upcall_pending = 1;
   55.98 +		if (!vcpu_info->evtchn_upcall_mask)
   55.99 +			force_evtchn_callback();
  55.100 +	}
  55.101  }
  55.102  
  55.103  static inline void clear_evtchn(int port)
  55.104  {
  55.105 -    shared_info_t *s = HYPERVISOR_shared_info;
  55.106 -    synch_clear_bit(port, &s->evtchn_pending[0]);
  55.107 +	shared_info_t *s = HYPERVISOR_shared_info;
  55.108 +	synch_clear_bit(port, &s->evtchn_pending[0]);
  55.109  }
  55.110  
  55.111 -static inline int notify_via_evtchn(int port)
  55.112 +static inline void notify_remote_via_evtchn(int port)
  55.113  {
  55.114 -    evtchn_op_t op;
  55.115 -    op.cmd = EVTCHNOP_send;
  55.116 -    op.u.send.local_port = port;
  55.117 -    return HYPERVISOR_event_channel_op(&op);
  55.118 +	evtchn_op_t op;
  55.119 +	op.cmd = EVTCHNOP_send;
  55.120 +	op.u.send.local_port = port;
  55.121 +	(void)HYPERVISOR_event_channel_op(&op);
  55.122  }
  55.123  
  55.124  /*
  55.125 @@ -133,3 +145,13 @@ static inline int notify_via_evtchn(int 
  55.126  #define EVTCHN_UNBIND _IO('E', 3)
  55.127  
  55.128  #endif /* __ASM_EVTCHN_H__ */
  55.129 +
  55.130 +/*
  55.131 + * Local variables:
  55.132 + *  c-file-style: "linux"
  55.133 + *  indent-tabs-mode: t
  55.134 + *  c-indent-level: 8
  55.135 + *  c-basic-offset: 8
  55.136 + *  tab-width: 8
  55.137 + * End:
  55.138 + */
    56.1 --- a/linux-2.6-xen-sparse/include/asm-xen/foreign_page.h	Thu Sep 29 16:22:02 2005 -0600
    56.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/foreign_page.h	Thu Sep 29 17:28:44 2005 -0600
    56.3 @@ -28,3 +28,13 @@
    56.4  	( (void (*) (struct page *)) (page)->mapping )
    56.5  
    56.6  #endif /* __ASM_XEN_FOREIGN_PAGE_H__ */
    56.7 +
    56.8 +/*
    56.9 + * Local variables:
   56.10 + *  c-file-style: "linux"
   56.11 + *  indent-tabs-mode: t
   56.12 + *  c-indent-level: 8
   56.13 + *  c-basic-offset: 8
   56.14 + *  tab-width: 8
   56.15 + * End:
   56.16 + */
    57.1 --- a/linux-2.6-xen-sparse/include/asm-xen/gnttab.h	Thu Sep 29 16:22:02 2005 -0600
    57.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/gnttab.h	Thu Sep 29 17:28:44 2005 -0600
    57.3 @@ -6,7 +6,7 @@
    57.4   * 2. Accessing others' memory reservations via grant references.
    57.5   * (i.e., mechanisms for both sender and recipient of grant references)
    57.6   * 
    57.7 - * Copyright (c) 2004, K A Fraser
    57.8 + * Copyright (c) 2004-2005, K A Fraser
    57.9   * Copyright (c) 2005, Christopher Clark
   57.10   */
   57.11  
   57.12 @@ -25,10 +25,10 @@
   57.13  #endif
   57.14  
   57.15  struct gnttab_free_callback {
   57.16 -    struct gnttab_free_callback *next;
   57.17 -    void (*fn)(void *);
   57.18 -    void *arg;
   57.19 -    u16 count;
   57.20 +	struct gnttab_free_callback *next;
   57.21 +	void (*fn)(void *);
   57.22 +	void *arg;
   57.23 +	u16 count;
   57.24  };
   57.25  
   57.26  int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
   57.27 @@ -73,3 +73,13 @@ void gnttab_grant_foreign_transfer_ref(g
   57.28  #endif
   57.29  
   57.30  #endif /* __ASM_GNTTAB_H__ */
   57.31 +
   57.32 +/*
   57.33 + * Local variables:
   57.34 + *  c-file-style: "linux"
   57.35 + *  indent-tabs-mode: t
   57.36 + *  c-indent-level: 8
   57.37 + *  c-basic-offset: 8
   57.38 + *  tab-width: 8
   57.39 + * End:
   57.40 + */
    58.1 --- a/linux-2.6-xen-sparse/include/asm-xen/queues.h	Thu Sep 29 16:22:02 2005 -0600
    58.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    58.3 @@ -1,81 +0,0 @@
    58.4 -
    58.5 -/*
    58.6 - * Oh dear. Task queues were removed from Linux 2.6 and replaced by work 
    58.7 - * queues. Unfortunately the semantics is not the same. With task queues we 
    58.8 - * can defer work until a particular event occurs -- this is not
    58.9 - * straightforwardly done with work queues (queued work is performed asap, or
   58.10 - * after some fixed timeout). Conversely, work queues are a (slightly) neater
   58.11 - * way of deferring work to a process context than using task queues in 2.4.
   58.12 - * 
   58.13 - * This is a bit of a needless reimplementation -- should have just pulled
   58.14 - * the code from 2.4, but I tried leveraging work queues to simplify things.
   58.15 - * They didn't help. :-(
   58.16 - */
   58.17 -
   58.18 -#ifndef __QUEUES_H__
   58.19 -#define __QUEUES_H__
   58.20 -
   58.21 -#include <linux/version.h>
   58.22 -#include <linux/list.h>
   58.23 -#include <linux/workqueue.h>
   58.24 -
   58.25 -struct tq_struct { 
   58.26 -    void (*fn)(void *);
   58.27 -    void *arg;
   58.28 -    struct list_head list;
   58.29 -    unsigned long pending;
   58.30 -};
   58.31 -#define INIT_TQUEUE(_name, _fn, _arg)               \
   58.32 -    do {                                            \
   58.33 -        INIT_LIST_HEAD(&(_name)->list);             \
   58.34 -        (_name)->pending = 0;                       \
   58.35 -        (_name)->fn = (_fn); (_name)->arg = (_arg); \
   58.36 -    } while ( 0 )
   58.37 -#define DECLARE_TQUEUE(_name, _fn, _arg)            \
   58.38 -    struct tq_struct _name = { (_fn), (_arg), LIST_HEAD_INIT((_name).list), 0 }
   58.39 -
   58.40 -typedef struct {
   58.41 -    struct list_head list;
   58.42 -    spinlock_t       lock;
   58.43 -} task_queue;
   58.44 -#define DECLARE_TASK_QUEUE(_name) \
   58.45 -    task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }
   58.46 -
   58.47 -static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
   58.48 -{
   58.49 -    unsigned long flags;
   58.50 -    if ( test_and_set_bit(0, &tqe->pending) )
   58.51 -        return 0;
   58.52 -    spin_lock_irqsave(&tql->lock, flags);
   58.53 -    list_add_tail(&tqe->list, &tql->list);
   58.54 -    spin_unlock_irqrestore(&tql->lock, flags);
   58.55 -    return 1;
   58.56 -}
   58.57 -
   58.58 -static inline void run_task_queue(task_queue *tql)
   58.59 -{
   58.60 -    struct list_head head, *ent;
   58.61 -    struct tq_struct *tqe;
   58.62 -    unsigned long flags;
   58.63 -    void (*fn)(void *);
   58.64 -    void *arg;
   58.65 -
   58.66 -    spin_lock_irqsave(&tql->lock, flags);
   58.67 -    list_add(&head, &tql->list);
   58.68 -    list_del_init(&tql->list);
   58.69 -    spin_unlock_irqrestore(&tql->lock, flags);
   58.70 -
   58.71 -    while ( !list_empty(&head) )
   58.72 -    {
   58.73 -        ent = head.next;
   58.74 -        list_del_init(ent);
   58.75 -        tqe = list_entry(ent, struct tq_struct, list);
   58.76 -        fn  = tqe->fn;
   58.77 -        arg = tqe->arg;
   58.78 -        wmb();
   58.79 -        tqe->pending = 0;
   58.80 -        fn(arg);
   58.81 -    }
   58.82 -}
   58.83 -
   58.84 -#endif /* __QUEUES_H__ */
    59.1 --- a/linux-2.6-xen-sparse/include/asm-xen/xen_proc.h	Thu Sep 29 16:22:02 2005 -0600
    59.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/xen_proc.h	Thu Sep 29 17:28:44 2005 -0600
    59.3 @@ -6,8 +6,18 @@
    59.4  #include <linux/proc_fs.h>
    59.5  
    59.6  extern struct proc_dir_entry *create_xen_proc_entry(
    59.7 -    const char *name, mode_t mode);
    59.8 +	const char *name, mode_t mode);
    59.9  extern void remove_xen_proc_entry(
   59.10 -    const char *name);
   59.11 +	const char *name);
   59.12  
   59.13  #endif /* __ASM_XEN_PROC_H__ */
   59.14 +
   59.15 +/*
   59.16 + * Local variables:
   59.17 + *  c-file-style: "linux"
   59.18 + *  indent-tabs-mode: t
   59.19 + *  c-indent-level: 8
   59.20 + *  c-basic-offset: 8
   59.21 + *  tab-width: 8
   59.22 + * End:
   59.23 + */
    60.1 --- a/linux-2.6-xen-sparse/include/asm-xen/xenbus.h	Thu Sep 29 16:22:02 2005 -0600
    60.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/xenbus.h	Thu Sep 29 17:28:44 2005 -0600
    60.3 @@ -139,3 +139,13 @@ void xenbus_resume(void);
    60.4  #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
    60.5  
    60.6  #endif /* _ASM_XEN_XENBUS_H */
    60.7 +
    60.8 +/*
    60.9 + * Local variables:
   60.10 + *  c-file-style: "linux"
   60.11 + *  indent-tabs-mode: t
   60.12 + *  c-indent-level: 8
   60.13 + *  c-basic-offset: 8
   60.14 + *  tab-width: 8
   60.15 + * End:
   60.16 + */
    62.1 --- a/tools/console/daemon/io.c	Thu Sep 29 16:22:02 2005 -0600
    62.2 +++ b/tools/console/daemon/io.c	Thu Sep 29 17:28:44 2005 -0600
    62.3 @@ -399,7 +399,7 @@ void enum_domains(void)
    62.4  
    62.5  	while (xc_domain_getinfo(xc, domid, 1, &dominfo) == 1) {
    62.6  		dom = lookup_domain(dominfo.domid);
    62.7 -		if (dominfo.dying || dominfo.crashed || dominfo.shutdown) {
    62.8 +		if (dominfo.dying) {
    62.9  			if (dom)
   62.10  				shutdown_domain(dom);
   62.11  		} else {
    63.1 --- a/tools/debugger/gdb/README	Thu Sep 29 16:22:02 2005 -0600
    63.2 +++ b/tools/debugger/gdb/README	Thu Sep 29 17:28:44 2005 -0600
    63.3 @@ -20,10 +20,18 @@ To build a debuggable guest kernel image
    63.4  
    63.5  To debug a running guest:
    63.6   1. Use 'xm list' to discover its domain id ($domid). 
    63.7 - 2. Run 'gdbserver-xen 127.0.0.1:9999 --attach $domid'
    63.8 - 3. Run 'gdb /path/to/vmlinux-syms-2.6.xx-xenU'
    63.9 + 2. Run 'gdbserver-xen 127.0.0.1:9999 --attach $domid'.
   63.10 + 3. Run 'gdb /path/to/vmlinux-syms-2.6.xx-xenU'.
   63.11   4. From within the gdb client session:
   63.12      # directory /path/to/linux-2.6.xx-xenU [*]
   63.13      # target remote 127.0.0.1:9999
   63.14      # bt
   63.15      # disass
   63.16 +
   63.17 +To debug a crashed guest:
   63.18 + 1. Add '(enable-dump yes)' to /etc/xen/xend-config.sxp before
   63.19 +    starting xend.
   63.20 + 2. When the domain crashes, a core file is written to
   63.21 +    '/var/xen/dump/<domain-name>.<domain-id>.core'.
   63.22 + 3. Run 'gdbserver-xen 127.0.0.1:9999 --file <core-file>'.
   63.23 + 4. Connect to the server as for a running guest.
    67.1 --- a/tools/firmware/vmxassist/gen.c	Thu Sep 29 16:22:02 2005 -0600
    67.2 +++ b/tools/firmware/vmxassist/gen.c	Thu Sep 29 17:28:44 2005 -0600
    67.3 @@ -23,7 +23,7 @@
    67.4  #include <vm86.h>
    67.5  
    67.6  int
    67.7 -main()
    67.8 +main(void)
    67.9  {
   67.10  	printf("/* MACHINE GENERATED; DO NOT EDIT */\n");
   67.11  	printf("#define VMX_ASSIST_CTX_GS_SEL	0x%x\n",
    68.1 --- a/tools/firmware/vmxassist/head.S	Thu Sep 29 16:22:02 2005 -0600
    68.2 +++ b/tools/firmware/vmxassist/head.S	Thu Sep 29 17:28:44 2005 -0600
    68.3 @@ -110,6 +110,10 @@ 1:
    68.4  _start:
    68.5  	cli
    68.6  
    68.7 +	/* save register parameters to C land */
    68.8 +	movl	%edx, booting_cpu
    68.9 +	movl	%ebx, booting_vector
   68.10 +
   68.11  	/* clear bss */
   68.12  	cld
   68.13  	xorb	%al, %al
   68.14 @@ -129,7 +133,6 @@ 1:
   68.15  	call    main
   68.16  	jmp	halt
   68.17  
   68.18 -
   68.19  /*
   68.20   * Something bad happened, print invoking %eip and loop forever
   68.21   */
    69.1 --- a/tools/firmware/vmxassist/setup.c	Thu Sep 29 16:22:02 2005 -0600
    69.2 +++ b/tools/firmware/vmxassist/setup.c	Thu Sep 29 17:28:44 2005 -0600
    69.3 @@ -29,6 +29,9 @@
    69.4  
    69.5  #define	min(a, b)	((a) > (b) ? (b) : (a))
    69.6  
    69.7 +/* Which CPU are we booting, and what is the initial CS segment? */
    69.8 +int booting_cpu, booting_vector;
    69.9 +
   69.10  unsigned long long gdt[] __attribute__ ((aligned(32))) = {
   69.11  	0x0000000000000000ULL,		/* 0x00: reserved */
   69.12  	0x0000890000000000ULL,		/* 0x08: 32-bit TSS */
   69.13 @@ -201,12 +204,17 @@ enter_real_mode(struct regs *regs)
   69.14  		initialize_real_mode = 0;
   69.15  		regs->eflags |= EFLAGS_VM | 0x02;
   69.16  		regs->ves = regs->vds = regs->vfs = regs->vgs = 0xF000;
   69.17 -		regs->cs = 0xF000; /* ROM BIOS POST entry point */
   69.18 +		if (booting_cpu == 0) {
   69.19 +			regs->cs = 0xF000; /* ROM BIOS POST entry point */
   69.20  #ifdef TEST
   69.21 -		regs->eip = 0xFFE0;
   69.22 +			regs->eip = 0xFFE0;
   69.23  #else
   69.24 -		regs->eip = 0xFFF0;
   69.25 +			regs->eip = 0xFFF0;
   69.26  #endif
   69.27 +		} else {
   69.28 +			regs->cs = booting_vector << 8; /* AP entry point */
   69.29 +			regs->eip = 0;
   69.30 +		}
   69.31  		regs->uesp = 0;
   69.32  		regs->uss = 0;
   69.33  		printf("Starting emulated 16-bit real-mode: ip=%04x:%04x\n",
   69.34 @@ -215,8 +223,8 @@ enter_real_mode(struct regs *regs)
   69.35  		mode = VM86_REAL; /* becomes previous mode */
   69.36  		set_mode(regs, VM86_REAL);
   69.37  
   69.38 -                /* this should get us into 16-bit mode */
   69.39 -                return;
   69.40 +		/* this should get us into 16-bit mode */
   69.41 +		return;
   69.42  	} else {
   69.43  		/* go from protected to real mode */
   69.44  		regs->eflags |= EFLAGS_VM;
   69.45 @@ -334,7 +342,12 @@ start_bios(void)
   69.46  {
   69.47  	unsigned long cr0;
   69.48  
   69.49 -	printf("Start BIOS ...\n");
   69.50 +	if (booting_cpu == 0)
   69.51 +		printf("Start BIOS ...\n");
   69.52 +	else
   69.53 +		printf("Start AP %d from %08x ...\n",
   69.54 +		       booting_cpu, booting_vector << 12);
   69.55 +
   69.56  	initialize_real_mode = 1;
   69.57  	cr0 = get_cr0();
   69.58  #ifndef TEST
   69.59 @@ -345,20 +358,28 @@ start_bios(void)
   69.60  }
   69.61  
   69.62  int
   69.63 -main()
   69.64 +main(void)
   69.65  {
   69.66 -	banner();
   69.67 +	if (booting_cpu == 0)
   69.68 +		banner();
   69.69 +
   69.70  #ifdef TEST
   69.71  	setup_paging();
   69.72  #endif
   69.73 +
   69.74  	setup_gdt();
   69.75  	setup_idt();
   69.76 +
   69.77  #ifndef	TEST
   69.78 -	set_cr4(get_cr4() | CR4_VME); 
   69.79 +	set_cr4(get_cr4() | CR4_VME);
   69.80  #endif
   69.81 +
   69.82  	setup_ctx();
   69.83 -	setup_pic();
   69.84 +
   69.85 +	if (booting_cpu == 0)
   69.86 +		setup_pic();
   69.87 +
   69.88  	start_bios();
   69.89 +
   69.90  	return 0;
   69.91  }
   69.92 -
    70.1 --- a/tools/firmware/vmxassist/vm86.c	Thu Sep 29 16:22:02 2005 -0600
    70.2 +++ b/tools/firmware/vmxassist/vm86.c	Thu Sep 29 17:28:44 2005 -0600
    70.3 @@ -470,10 +470,21 @@ load_seg(unsigned long sel, u32 *base, u
    70.4  	unsigned long long entry;
    70.5  
    70.6  	/* protected mode: use seg as index into gdt */
    70.7 -	if (sel == 0 || sel > oldctx.gdtr_limit)
    70.8 +	if (sel > oldctx.gdtr_limit)
    70.9  		return 0;
   70.10  
   70.11 +    if (sel == 0) {
   70.12 +        arbytes->fields.null_bit = 1;
   70.13 +        return 1;
   70.14 +    }
   70.15 +
   70.16  	entry =  ((unsigned long long *) oldctx.gdtr_base)[sel >> 3];
   70.17 +
   70.18 +    /* Check the P bit fisrt*/
   70.19 +    if (!((entry >> (15+32)) & 0x1) && sel != 0) {
   70.20 +        return 0;
   70.21 +    }
   70.22 +
   70.23  	*base =  (((entry >> (56-24)) & 0xFF000000) |
   70.24  		  ((entry >> (32-16)) & 0x00FF0000) |
   70.25  		  ((entry >> (   16)) & 0x0000FFFF));
   70.26 @@ -519,22 +530,42 @@ protected_mode(struct regs *regs)
   70.27  	if (load_seg(regs->ves, &oldctx.es_base,
   70.28  				&oldctx.es_limit, &oldctx.es_arbytes))
   70.29  		oldctx.es_sel = regs->ves;
   70.30 +    else {
   70.31 +        load_seg(0, &oldctx.es_base,&oldctx.es_limit, &oldctx.es_arbytes);
   70.32 +        oldctx.es_sel = 0;
   70.33 +    }
   70.34  
   70.35  	if (load_seg(regs->uss, &oldctx.ss_base,
   70.36  				&oldctx.ss_limit, &oldctx.ss_arbytes))
   70.37  		oldctx.ss_sel = regs->uss;
   70.38 +    else {
   70.39 +        load_seg(0, &oldctx.ss_base, &oldctx.ss_limit, &oldctx.ss_arbytes);
   70.40 +        oldctx.ss_sel = 0;
   70.41 +    }
   70.42  
   70.43  	if (load_seg(regs->vds, &oldctx.ds_base,
   70.44  				&oldctx.ds_limit, &oldctx.ds_arbytes))
   70.45  		oldctx.ds_sel = regs->vds;
   70.46 +    else {
   70.47 +        load_seg(0, &oldctx.ds_base, &oldctx.ds_limit, &oldctx.ds_arbytes);
   70.48 +        oldctx.ds_sel = 0;
   70.49 +    }
   70.50  
   70.51  	if (load_seg(regs->vfs, &oldctx.fs_base,
   70.52  				&oldctx.fs_limit, &oldctx.fs_arbytes))
   70.53  		oldctx.fs_sel = regs->vfs;
   70.54 +    else {
   70.55 +        load_seg(0, &oldctx.fs_base, &oldctx.fs_limit, &oldctx.fs_arbytes);
   70.56 +        oldctx.fs_sel = 0;
   70.57 +    }
   70.58  
   70.59  	if (load_seg(regs->vgs, &oldctx.gs_base,
   70.60  				&oldctx.gs_limit, &oldctx.gs_arbytes))
   70.61  		oldctx.gs_sel = regs->vgs;
   70.62 +    else {
   70.63 +        load_seg(0, &oldctx.gs_base, &oldctx.gs_limit, &oldctx.gs_arbytes);
   70.64 +        oldctx.gs_sel = 0;
   70.65 +    }
   70.66  
   70.67  	/* initialize jump environment to warp back to protected mode */
   70.68  	regs->cs = CODE_SELECTOR;
   70.69 @@ -752,6 +783,9 @@ opcode(struct regs *regs)
   70.70  					goto invalid;
   70.71  				}
   70.72  				break;
   70.73 +			case 0x09: /* wbinvd */
   70.74 +				asm volatile ( "wbinvd" );
   70.75 +				return OPC_EMULATED;
   70.76  			case 0x20: /* mov Rd, Cd (1h) */
   70.77  			case 0x22:
   70.78  				if (!movcr(regs, prefix, opc))
    71.1 --- a/tools/firmware/vmxassist/vmxloader.c	Thu Sep 29 16:22:02 2005 -0600
    71.2 +++ b/tools/firmware/vmxassist/vmxloader.c	Thu Sep 29 17:28:44 2005 -0600
    71.3 @@ -132,11 +132,12 @@ main(void)
    71.4  		 memcpy((void *)ACPI_PHYSICAL_ADDRESS, acpi, sizeof(acpi));
    71.5  	}
    71.6  #endif
    71.7 -			
    71.8 +
    71.9  	puts("Loading VMXAssist ...\n");
   71.10  	memcpy((void *)TEXTADDR, vmxassist, sizeof(vmxassist));
   71.11 +
   71.12  	puts("Go ...\n");
   71.13 -	((void (*)())TEXTADDR)();
   71.14 +	asm volatile ( "jmp *%%eax" : : "a" (TEXTADDR), "d" (0) );
   71.15 +
   71.16  	return 0;
   71.17  }
   71.18 -
    76.1 --- a/tools/ioemu/vl.c	Thu Sep 29 16:22:02 2005 -0600
    76.2 +++ b/tools/ioemu/vl.c	Thu Sep 29 17:28:44 2005 -0600
    76.3 @@ -2385,7 +2385,8 @@ int
    76.4  setup_mapping(int xc_handle, u32 dom, unsigned long toptab, unsigned long  *mem_page_array, unsigned long *page_table_array, unsigned long v_start, unsigned long v_end)
    76.5  {
    76.6      l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
    76.7 -    l2_pgentry_t *vl2tab[4], *vl2e=NULL, *vl2_table = NULL;
    76.8 +    l2_pgentry_t *vl2tab[4] = {NULL, NULL, NULL, NULL};
    76.9 +    l2_pgentry_t *vl2e=NULL, *vl2_table = NULL;
   76.10      unsigned long l1tab;
   76.11      unsigned long ppt_alloc = 0;
   76.12      unsigned long count;
    82.1 --- a/tools/python/xen/web/SrvBase.py	Thu Sep 29 16:22:02 2005 -0600
    82.2 +++ b/tools/python/xen/web/SrvBase.py	Thu Sep 29 17:28:44 2005 -0600
    82.3 @@ -81,7 +81,14 @@ class SrvBase(resource.Resource):
    82.4              req.write("Operation not implemented: " + op)
    82.5              return ''
    82.6          else:
    82.7 -            return op_method(op, req)
    82.8 +            try:
    82.9 +                return op_method(op, req)
   82.10 +            except Exception, exn:
   82.11 +                log.exception("Request %s failed.", op)
   82.12 +                if req.useSxp():
   82.13 +                    return ['xend.err', "Exception: " + str(exn)]
   82.14 +                else:
   82.15 +                    return "<p>%s</p>" % str(exn)
   82.16  
   82.17      def print_path(self, req):
   82.18          """Print the path with hyperlinks.
    83.1 --- a/tools/python/xen/web/http.py	Thu Sep 29 16:22:02 2005 -0600
    83.2 +++ b/tools/python/xen/web/http.py	Thu Sep 29 17:28:44 2005 -0600
    83.3 @@ -22,6 +22,7 @@
    83.4  from  mimetools import Message
    83.5  from cStringIO import StringIO
    83.6  import math
    83.7 +import socket
    83.8  import time
    83.9  import cgi
   83.10  
    84.1 --- a/tools/python/xen/xend/PrettyPrint.py	Thu Sep 29 16:22:02 2005 -0600
    84.2 +++ b/tools/python/xen/xend/PrettyPrint.py	Thu Sep 29 17:28:44 2005 -0600
    84.3 @@ -39,9 +39,9 @@ class PrettyItem:
    84.4          print '***PrettyItem>output>', self
    84.5          pass
    84.6  
    84.7 -    def prettyprint(self, _, width):
    84.8 +    def prettyprint(self, _):
    84.9          print '***PrettyItem>prettyprint>', self
   84.10 -        return width
   84.11 +        return self.width
   84.12  
   84.13  class PrettyString(PrettyItem):
   84.14  
   84.15 @@ -52,7 +52,7 @@ class PrettyString(PrettyItem):
   84.16      def output(self, out):
   84.17          out.write(self.value)
   84.18  
   84.19 -    def prettyprint(self, line, _):
   84.20 +    def prettyprint(self, line):
   84.21          line.output(self)
   84.22  
   84.23      def show(self, out):
   84.24 @@ -63,7 +63,7 @@ class PrettySpace(PrettyItem):
   84.25      def output(self, out):
   84.26          out.write(' ' * self.width)
   84.27  
   84.28 -    def prettyprint(self, line, _):
   84.29 +    def prettyprint(self, line):
   84.30          line.output(self)
   84.31  
   84.32      def show(self, out):
   84.33 @@ -80,7 +80,7 @@ class PrettyBreak(PrettyItem):
   84.34      def output(self, out):
   84.35          out.write(' ' * self.width)
   84.36  
   84.37 -    def prettyprint(self, line, _):
   84.38 +    def prettyprint(self, line):
   84.39          if line.breaks(self.space):
   84.40              self.active = 1
   84.41              line.newline(self.indent)
   84.42 @@ -97,7 +97,7 @@ class PrettyNewline(PrettySpace):
   84.43          block.newline()
   84.44          block.addtoline(self)
   84.45  
   84.46 -    def prettyprint(self, line, _):
   84.47 +    def prettyprint(self, line):
   84.48          line.newline(0)
   84.49          line.output(self)
   84.50  
   84.51 @@ -127,7 +127,7 @@ class PrettyLine(PrettyItem):
   84.52              lastbreak.space = (width - lastwidth)
   84.53          self.width = width
   84.54   
   84.55 -    def prettyprint(self, line, _):
   84.56 +    def prettyprint(self, line):
   84.57          for x in self.content:
   84.58              x.prettyprint(line)
   84.59  
   84.60 @@ -168,7 +168,7 @@ class PrettyBlock(PrettyItem):
   84.61      def addtoline(self, x):
   84.62          self.lines[-1].write(x)
   84.63  
   84.64 -    def prettyprint(self, line, _):
   84.65 +    def prettyprint(self, line):
   84.66          self.indent = line.used
   84.67          line.block = self
   84.68          if not line.fits(self.width):
   84.69 @@ -252,7 +252,7 @@ class PrettyPrinter:
   84.70          self.block = self.block.parent
   84.71  
   84.72      def prettyprint(self, out=sys.stdout):
   84.73 -        self.top.prettyprint(Line(out, self.width), self.width)
   84.74 +        self.top.prettyprint(Line(out, self.width))
   84.75  
   84.76  class SXPPrettyPrinter(PrettyPrinter):
   84.77      """An SXP prettyprinter.
    85.1 --- a/tools/python/xen/xend/XendCheckpoint.py	Thu Sep 29 16:22:02 2005 -0600
    85.2 +++ b/tools/python/xen/xend/XendCheckpoint.py	Thu Sep 29 17:28:44 2005 -0600
    85.3 @@ -10,8 +10,12 @@ import select
    85.4  import sxp
    85.5  from string import join
    85.6  from struct import pack, unpack, calcsize
    85.7 +
    85.8  from xen.util.xpopen import xPopen3
    85.9 +
   85.10  import xen.lowlevel.xc
   85.11 +
   85.12 +import XendDomainInfo
   85.13  from xen.xend.xenstore.xsutil import IntroduceDomain
   85.14  
   85.15  from XendError import XendError
   85.16 @@ -74,7 +78,7 @@ def save(xd, fd, dominfo, live):
   85.17                  if l.rstrip() == "suspend":
   85.18                      log.info("suspending %d" % dominfo.domid)
   85.19                      xd.domain_shutdown(dominfo.domid, reason='suspend')
   85.20 -                    dominfo.state_wait("suspended")
   85.21 +                    dominfo.state_wait(XendDomainInfo.STATE_VM_SUSPENDED)
   85.22                      log.info("suspend %d done" % dominfo.domid)
   85.23                      child.tochild.write("done\n")
   85.24                      child.tochild.flush()
    86.1 --- a/tools/python/xen/xend/XendClient.py	Thu Sep 29 16:22:02 2005 -0600
    86.2 +++ b/tools/python/xen/xend/XendClient.py	Thu Sep 29 17:28:44 2005 -0600
    86.3 @@ -302,12 +302,6 @@ class Xend:
    86.4                               {'op'      : 'devices',
    86.5                                'type'    : type })
    86.6  
    86.7 -    def xend_domain_device(self, id, type, idx):
    86.8 -        return self.xendPost(self.domainurl(id),
    86.9 -                             {'op'      : 'device',
   86.10 -                              'type'    : type,
   86.11 -                              'idx'     : idx })
   86.12 -    
   86.13      def xend_domain_device_create(self, id, config):
   86.14          return self.xendPost(self.domainurl(id),
   86.15                               {'op'      : 'device_create',
    87.1 --- a/tools/python/xen/xend/XendDomain.py	Thu Sep 29 16:22:02 2005 -0600
    87.2 +++ b/tools/python/xen/xend/XendDomain.py	Thu Sep 29 17:28:44 2005 -0600
    87.3 @@ -28,16 +28,11 @@ import xen.lowlevel.xc
    87.4  from xen.xend import sxp
    87.5  from xen.xend import XendRoot
    87.6  from xen.xend import XendCheckpoint
    87.7 -from xen.xend.XendDomainInfo import XendDomainInfo, shutdown_reason
    87.8 +from xen.xend.XendDomainInfo import XendDomainInfo
    87.9  from xen.xend import EventServer
   87.10  from xen.xend.XendError import XendError
   87.11  from xen.xend.XendLogging import log
   87.12 -from xen.xend import scheduler
   87.13  from xen.xend.server import relocate
   87.14 -from xen.xend.uuid import getUuid
   87.15 -from xen.xend.xenstore import XenNode, DBMap
   87.16 -from xen.xend.xenstore.xstransact import xstransact
   87.17 -from xen.xend.xenstore.xsutil import GetDomainPath
   87.18  
   87.19  
   87.20  xc = xen.lowlevel.xc.new()
   87.21 @@ -47,14 +42,7 @@ eserver = EventServer.instance()
   87.22  
   87.23  __all__ = [ "XendDomain" ]
   87.24  
   87.25 -SHUTDOWN_TIMEOUT = 30
   87.26 -PRIV_DOMAIN      =  0
   87.27 -
   87.28 -def is_dead(dom):
   87.29 -    return dom['crashed'] or dom['shutdown'] or (
   87.30 -        dom['dying'] and not(dom['running'] or dom['paused'] or
   87.31 -                             dom['blocked']))
   87.32 -
   87.33 +PRIV_DOMAIN = 0
   87.34  
   87.35  class XendDomainDict(dict):
   87.36      def get_by_name(self, name):
   87.37 @@ -77,11 +65,8 @@ class XendDomain:
   87.38          # So we stuff the XendDomain instance (self) into xroot's components.
   87.39          xroot.add_component("xen.xend.XendDomain", self)
   87.40          self.domains = XendDomainDict()
   87.41 -        self.domroot = "/domain"
   87.42 -        self.vmroot = "/domain"
   87.43 -        self.dbmap = DBMap(db=XenNode(self.vmroot))
   87.44          self.watchReleaseDomain()
   87.45 -        self.initial_refresh()
   87.46 +        self.refresh()
   87.47          self.dom0_setup()
   87.48  
   87.49      def list(self):
   87.50 @@ -110,9 +95,7 @@ class XendDomain:
   87.51          return map(lambda x: x.getName(), doms)
   87.52  
   87.53      def onReleaseDomain(self):
   87.54 -        self.reap()
   87.55          self.refresh()
   87.56 -        self.domain_restarts()
   87.57  
   87.58      def watchReleaseDomain(self):
   87.59          from xen.xend.xenstore.xswatch import xswatch
   87.60 @@ -141,43 +124,22 @@ class XendDomain:
   87.61              dominfo = dominfo[0]
   87.62          return dominfo
   87.63  
   87.64 -    def initial_refresh(self):
   87.65 -        """Refresh initial domain info from db.
   87.66 -        """
   87.67 -        doms = self.xen_domains()
   87.68 -        self.dbmap.readDB()             # XXX only needed for "xend"
   87.69 -        for dom in doms.values():
   87.70 -            domid = dom['dom']
   87.71 -            dompath = GetDomainPath(domid)
   87.72 -            if not dompath:
   87.73 -                continue
   87.74 -            vmpath = xstransact.Read(dompath, "vm")
   87.75 -            if not vmpath:
   87.76 -                continue
   87.77 -            uuid = xstransact.Read(vmpath, "uuid")
   87.78 -            if not uuid:
   87.79 -                continue
   87.80 -            log.info("recreating domain %d, uuid %s" % (domid, uuid))
   87.81 -            dompath = "/".join(dompath.split("/")[0:-1])
   87.82 -            try:
   87.83 -                dominfo = XendDomainInfo.recreate(uuid, dompath, domid, dom)
   87.84 -            except Exception, ex:
   87.85 -                log.exception("Error recreating domain info: id=%d", domid)
   87.86 -                continue
   87.87 -            self._add_domain(dominfo)
   87.88 -        self.reap()
   87.89 -        self.refresh()
   87.90 -        self.domain_restarts()
   87.91 +
   87.92 +    def recreate_domain(self, xeninfo):
   87.93 +        """Refresh initial domain info from db."""
   87.94 +
   87.95 +        dominfo = XendDomainInfo.recreate(xeninfo)
   87.96 +        self._add_domain(dominfo)
   87.97 +        return dominfo
   87.98 +
   87.99  
  87.100      def dom0_setup(self):
  87.101          dom0 = self.domain_lookup(PRIV_DOMAIN)
  87.102          if not dom0:
  87.103 -            dom0 = self.dom0_unknown()
  87.104 -        dom0.dom0_init_store()    
  87.105 +            dom0 = self.recreate_domain(self.xen_domain(PRIV_DOMAIN))
  87.106 +        dom0.dom0_init_store()
  87.107          dom0.dom0_enforce_vcpus()
  87.108  
  87.109 -    def close(self):
  87.110 -        pass
  87.111  
  87.112      def _add_domain(self, info, notify=True):
  87.113          """Add a domain entry to the tables.
  87.114 @@ -193,70 +155,45 @@ class XendDomain:
  87.115              eserver.inject('xend.domain.create', [info.getName(),
  87.116                                                    info.getDomid()])
  87.117  
  87.118 -    def _delete_domain(self, id, notify=True):
  87.119 +    def _delete_domain(self, domid, notify=True):
  87.120          """Remove a domain from the tables.
  87.121  
  87.122          @param id:     domain id
  87.123          @param notify: send a domain died event if true
  87.124          """
  87.125 -        info = self.domains.get(id)
  87.126 +        info = self.domains.get(domid)
  87.127          if info:
  87.128 -            del self.domains[id]
  87.129 +            del self.domains[domid]
  87.130              info.cleanup()
  87.131              info.delete()
  87.132              if notify:
  87.133                  eserver.inject('xend.domain.died', [info.getName(),
  87.134                                                      info.getDomid()])
  87.135 -        # XXX this should not be needed
  87.136 -        for domdb in self.dbmap.values():
  87.137 -            if not domdb.has_key("xend"):
  87.138 -                continue
  87.139 -            db = domdb.addChild("xend")
  87.140 -            try:
  87.141 -                domid = int(domdb["domid"].getData())
  87.142 -            except:
  87.143 -                domid = None
  87.144 -            if (domid is None) or (domid == id):
  87.145 -                domdb.delete()
  87.146  
  87.147 -    def reap(self):
  87.148 -        """Look for domains that have crashed or stopped.
  87.149 -        Tidy them up.
  87.150 -        """
  87.151 -        doms = self.xen_domains()
  87.152 -        for d in doms.values():
  87.153 -            if not is_dead(d):
  87.154 -                continue
  87.155 -            domid = d['dom']
  87.156 -            dominfo = self.domains.get(domid)
  87.157 -            if not dominfo or dominfo.is_terminated():
  87.158 -                continue
  87.159 -            log.debug('domain died name=%s domid=%d', dominfo.getName(), domid)
  87.160 -            if d['crashed'] and xroot.get_enable_dump():
  87.161 -                self.domain_dumpcore(domid)
  87.162 -            if d['shutdown']:
  87.163 -                reason = shutdown_reason(d['shutdown_reason'])
  87.164 -                log.debug('shutdown name=%s id=%d reason=%s',
  87.165 -                          dominfo.getName(), domid, reason)
  87.166 -                if reason == 'suspend':
  87.167 -                    dominfo.state_set("suspended")
  87.168 -                    continue
  87.169 -                if reason in ['poweroff', 'reboot']:
  87.170 -                    self.domain_restart_schedule(domid, reason)
  87.171 -            dominfo.destroy()
  87.172  
  87.173      def refresh(self):
  87.174          """Refresh domain list from Xen.
  87.175          """
  87.176          doms = self.xen_domains()
  87.177 -        # Remove entries for domains that no longer exist.
  87.178 -        # Update entries for existing domains.
  87.179          for d in self.domains.values():
  87.180              info = doms.get(d.getDomid())
  87.181              if info:
  87.182                  d.update(info)
  87.183 -            elif not d.restart_pending():
  87.184 +            else:
  87.185                  self._delete_domain(d.getDomid())
  87.186 +        for d in doms:
  87.187 +            if d not in self.domains:
  87.188 +                try:
  87.189 +                    self.recreate_domain(doms[d])
  87.190 +                except:
  87.191 +                    log.exception(
  87.192 +                        "Failed to recreate information for domain %d.  "
  87.193 +                        "Destroying it in the hope of recovery.", d)
  87.194 +                    try:
  87.195 +                        xc.domain_destroy(dom = d)
  87.196 +                    except:
  87.197 +                        log.exception('Destruction of %d failed.', d)
  87.198 +
  87.199  
  87.200      def update_domain(self, id):
  87.201          """Update information for a single domain.
  87.202 @@ -277,34 +214,10 @@ class XendDomain:
  87.203          @param config: configuration
  87.204          @return: domain
  87.205          """
  87.206 -        dominfo = XendDomainInfo.create(self.dbmap.getPath(), config)
  87.207 +        dominfo = XendDomainInfo.create(config)
  87.208          self._add_domain(dominfo)
  87.209          return dominfo
  87.210  
  87.211 -    def domain_restart(self, dominfo):
  87.212 -        """Restart a domain.
  87.213 -
  87.214 -        @param dominfo: domain object
  87.215 -        """
  87.216 -        log.info("Restarting domain: name=%s id=%s", dominfo.getName(),
  87.217 -                 dominfo.getDomid())
  87.218 -        eserver.inject("xend.domain.restart",
  87.219 -                       [dominfo.getName(), dominfo.getDomid(), "begin"])
  87.220 -        try:
  87.221 -            dominfo.restart()
  87.222 -            log.info('Restarted domain name=%s id=%s', dominfo.getName(),
  87.223 -                     dominfo.getDomid())
  87.224 -            eserver.inject("xend.domain.restart",
  87.225 -                           [dominfo.getName(), dominfo.getDomid(),
  87.226 -                            "success"])
  87.227 -            self.domain_unpause(dominfo.getDomid())
  87.228 -        except Exception, ex:
  87.229 -            log.exception("Exception restarting domain: name=%s id=%s",
  87.230 -                          dominfo.getName(), dominfo.getDomid())
  87.231 -            eserver.inject("xend.domain.restart",
  87.232 -                           [dominfo.getName(), dominfo.getDomid(), "fail"])
  87.233 -        return dominfo
  87.234 -
  87.235      def domain_configure(self, config):
  87.236          """Configure an existing domain. This is intended for internal
  87.237          use by domain restore and migrate.
  87.238 @@ -318,13 +231,12 @@ class XendDomain:
  87.239          nested = sxp.child_value(config, 'config')
  87.240          if nested:
  87.241              config = nested
  87.242 -        return XendDomainInfo.restore(self.dbmap.getPath(), config)
  87.243 +        return XendDomainInfo.restore(config)
  87.244  
  87.245 -    def domain_restore(self, src, progress=False):
  87.246 +    def domain_restore(self, src):
  87.247          """Restore a domain from file.
  87.248  
  87.249          @param src:      source file
  87.250 -        @param progress: output progress if true
  87.251          """
  87.252  
  87.253          try:
  87.254 @@ -345,33 +257,7 @@ class XendDomain:
  87.255          self.update_domain(id)
  87.256          return self.domains.get(id)
  87.257  
  87.258 -    def dom0_unknown(self):
  87.259 -        dom0 = PRIV_DOMAIN
  87.260 -        uuid = None
  87.261 -        info = self.xen_domain(dom0)
  87.262 -        dompath = GetDomainPath(dom0)
  87.263 -        if dompath:
  87.264 -            vmpath = xstransact.Read(dompath, "vm")
  87.265 -            if vmpath:
  87.266 -                uuid = xstransact.Read(vmpath, "uuid")
  87.267 -            if not uuid:
  87.268 -                uuid = dompath.split("/")[-1]
  87.269 -            dompath = "/".join(dompath.split("/")[0:-1])
  87.270 -        if not uuid:
  87.271 -            uuid = getUuid()
  87.272 -            dompath = self.domroot
  87.273 -        log.info("Creating entry for unknown xend domain: id=%d uuid=%s",
  87.274 -                 dom0, uuid)
  87.275 -        try:
  87.276 -            dominfo = XendDomainInfo.recreate(uuid, dompath, dom0, info)
  87.277 -            self._add_domain(dominfo)
  87.278 -            return dominfo
  87.279 -        except Exception, exn:
  87.280 -            log.exception(exn)
  87.281 -            raise XendError("Error recreating xend domain info: id=%d: %s" %
  87.282 -                            (dom0, str(exn)))
  87.283  
  87.284 -        
  87.285      def domain_lookup(self, id):
  87.286          return self.domains.get(id)
  87.287  
  87.288 @@ -410,8 +296,9 @@ class XendDomain:
  87.289              return xc.domain_pause(dom=dominfo.getDomid())
  87.290          except Exception, ex:
  87.291              raise XendError(str(ex))
  87.292 -    
  87.293 -    def domain_shutdown(self, id, reason='poweroff'):
  87.294 +
  87.295 +
  87.296 +    def domain_shutdown(self, domid, reason='poweroff'):
  87.297          """Shutdown domain (nicely).
  87.298           - poweroff: restart according to exit code and restart mode
  87.299           - reboot:   restart on exit
  87.300 @@ -422,89 +309,13 @@ class XendDomain:
  87.301          @param id:     domain id
  87.302          @param reason: shutdown type: poweroff, reboot, suspend, halt
  87.303          """
  87.304 -        dominfo = self.domain_lookup(id)
  87.305 -        self.domain_restart_schedule(dominfo.getDomid(), reason, force=True)
  87.306 -        eserver.inject('xend.domain.shutdown', [dominfo.getName(),
  87.307 -                                                dominfo.getDomid(), reason])
  87.308 -        if reason == 'halt':
  87.309 -            reason = 'poweroff'
  87.310 -        val = dominfo.shutdown(reason)
  87.311 -        if not reason in ['suspend']:
  87.312 -            self.domain_shutdowns()
  87.313 -        return val
  87.314 -
  87.315 -
  87.316 -    def domain_sysrq(self, id, key):
  87.317 -        """Send a SysRq to the specified domain."""
  87.318 -        return self.callInfo(id, XendDomainInfo.send_sysrq, key)
  87.319 +        self.callInfo(domid, XendDomainInfo.shutdown, reason)
  87.320  
  87.321  
  87.322 -    def domain_shutdowns(self):
  87.323 -        """Process pending domain shutdowns.
  87.324 -        Destroys domains whose shutdowns have timed out.
  87.325 -        """
  87.326 -        timeout = SHUTDOWN_TIMEOUT + 1
  87.327 -        for dominfo in self.domains.values():
  87.328 -            if not dominfo.shutdown_pending:
  87.329 -                # domain doesn't need shutdown
  87.330 -                continue
  87.331 -            id = dominfo.getDomid()
  87.332 -            left = dominfo.shutdown_time_left(SHUTDOWN_TIMEOUT)
  87.333 -            if left <= 0:
  87.334 -                # Shutdown expired - destroy domain.
  87.335 -                try:
  87.336 -                    log.info("Domain shutdown timeout expired: name=%s id=%s",
  87.337 -                             dominfo.getName(), id)
  87.338 -                    self.domain_destroy(id, reason=
  87.339 -                                        dominfo.shutdown_pending['reason'])
  87.340 -                except Exception:
  87.341 -                    pass
  87.342 -            else:
  87.343 -                # Shutdown still pending.
  87.344 -                timeout = min(timeout, left)
  87.345 -        if timeout <= SHUTDOWN_TIMEOUT:
  87.346 -            # Pending shutdowns remain - reschedule.
  87.347 -            scheduler.later(timeout, self.domain_shutdowns)
  87.348 -
  87.349 -    def domain_restart_schedule(self, id, reason, force=False):
  87.350 -        """Schedule a restart for a domain if it needs one.
  87.351 +    def domain_sysrq(self, domid, key):
  87.352 +        """Send a SysRq to the specified domain."""
  87.353 +        return self.callInfo(domid, XendDomainInfo.send_sysrq, key)
  87.354  
  87.355 -        @param id:     domain id
  87.356 -        @param reason: shutdown reason
  87.357 -        """
  87.358 -        log.debug('domain_restart_schedule> %d %s %d', id, reason, force)
  87.359 -        dominfo = self.domain_lookup(id)
  87.360 -        if not dominfo:
  87.361 -            return
  87.362 -        restart = (force and reason == 'reboot') or dominfo.restart_needed(reason)
  87.363 -        if restart:
  87.364 -            log.info('Scheduling restart for domain: name=%s id=%s',
  87.365 -                     dominfo.getName(), dominfo.getDomid())
  87.366 -            eserver.inject("xend.domain.restart",
  87.367 -                           [dominfo.getName(), dominfo.getDomid(),
  87.368 -                            "schedule"])
  87.369 -            dominfo.restarting()
  87.370 -        else:
  87.371 -            log.info('Cancelling restart for domain: name=%s id=%s',
  87.372 -                     dominfo.getName(), dominfo.getDomid())
  87.373 -            eserver.inject("xend.domain.restart",
  87.374 -                           [dominfo.getName(), dominfo.getDomid(), "cancel"])
  87.375 -            dominfo.restart_cancel()
  87.376 -
  87.377 -    def domain_restarts(self):
  87.378 -        """Execute any scheduled domain restarts for domains that have gone.
  87.379 -        """
  87.380 -        doms = self.xen_domains()
  87.381 -        for dominfo in self.domains.values():
  87.382 -            if not dominfo.restart_pending():
  87.383 -                continue
  87.384 -            info = doms.get(dominfo.getDomid())
  87.385 -            if info:
  87.386 -                # Don't execute restart for domains still running.
  87.387 -                continue
  87.388 -            # Remove it from the restarts.
  87.389 -            log.info('restarting: %s' % dominfo.getName())
  87.390 -            self.domain_restart(dominfo)
  87.391  
  87.392      def domain_destroy(self, domid, reason='halt'):
  87.393          """Terminate domain immediately.
  87.394 @@ -517,7 +328,6 @@ class XendDomain:
  87.395          if domid == PRIV_DOMAIN:
  87.396              raise XendError("Cannot destroy privileged domain %i" % domid)
  87.397          
  87.398 -        self.domain_restart_schedule(domid, reason, force=True)
  87.399          dominfo = self.domain_lookup(domid)
  87.400          if dominfo:
  87.401              val = dominfo.destroy()
  87.402 @@ -554,12 +364,11 @@ class XendDomain:
  87.403          
  87.404          return None
  87.405  
  87.406 -    def domain_save(self, id, dst, progress=False):
  87.407 +    def domain_save(self, id, dst):
  87.408          """Start saving a domain to file.
  87.409  
  87.410          @param id:       domain id
  87.411          @param dst:      destination file
  87.412 -        @param progress: output progress if true
  87.413          """
  87.414  
  87.415          try:
  87.416 @@ -661,14 +470,6 @@ class XendDomain:
  87.417          return self.callInfo(domid, XendDomainInfo.getDeviceSxprs, devtype)
  87.418  
  87.419  
  87.420 -    def domain_devtype_get(self, domid, devtype, devid):
  87.421 -        """Get a device from a domain.
  87.422 -        
  87.423 -        @return: device object (or None)
  87.424 -        """
  87.425 -        return self.callInfo(domid, XendDomainInfo.getDevice, devtype, devid)
  87.426 -
  87.427 -
  87.428      def domain_vif_limit_set(self, id, vif, credit, period):
  87.429          """Limit the vif's transmission rate
  87.430          """
  87.431 @@ -730,10 +531,16 @@ class XendDomain:
  87.432      ## private:
  87.433  
  87.434      def callInfo(self, domid, fn, *args, **kwargs):
  87.435 -        self.refresh()
  87.436 -        dominfo = self.domains.get(domid)
  87.437 -        if dominfo:
  87.438 -            return fn(dominfo, *args, **kwargs)
  87.439 +        try:
  87.440 +            self.refresh()
  87.441 +            dominfo = self.domains.get(domid)
  87.442 +            if dominfo:
  87.443 +                return fn(dominfo, *args, **kwargs)
  87.444 +        except XendError:
  87.445 +            raise
  87.446 +        except Exception, exn:
  87.447 +            log.exception("")
  87.448 +            raise XendError(str(exn))
  87.449  
  87.450  
  87.451  def instance():
    88.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Thu Sep 29 16:22:02 2005 -0600
    88.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Thu Sep 29 17:28:44 2005 -0600
    88.3 @@ -35,7 +35,9 @@ from xen.util.blkif import blkdev_uname_
    88.4  from xen.xend.server.channel import EventChannel
    88.5  
    88.6  from xen.xend import image
    88.7 +from xen.xend import scheduler
    88.8  from xen.xend import sxp
    88.9 +from xen.xend import XendRoot
   88.10  from xen.xend.XendBootloader import bootloader
   88.11  from xen.xend.XendLogging import log
   88.12  from xen.xend.XendError import XendError, VmError
   88.13 @@ -43,7 +45,7 @@ from xen.xend.XendRoot import get_compon
   88.14  
   88.15  from xen.xend.uuid import getUuid
   88.16  from xen.xend.xenstore.xstransact import xstransact
   88.17 -from xen.xend.xenstore.xsutil import IntroduceDomain
   88.18 +from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain
   88.19  
   88.20  """Shutdown code for poweroff."""
   88.21  DOMAIN_POWEROFF = 0
   88.22 @@ -75,9 +77,6 @@ restart_modes = [
   88.23      RESTART_NEVER,
   88.24      ]
   88.25  
   88.26 -STATE_RESTART_PENDING = 'pending'
   88.27 -STATE_RESTART_BOOTING = 'booting'
   88.28 -
   88.29  STATE_VM_OK         = "ok"
   88.30  STATE_VM_TERMINATED = "terminated"
   88.31  STATE_VM_SUSPENDED  = "suspended"
   88.32 @@ -92,7 +91,29 @@ SIF_NET_BE_DOMAIN = (1<<5)
   88.33  SIF_TPM_BE_DOMAIN = (1<<7)
   88.34  
   88.35  
   88.36 +SHUTDOWN_TIMEOUT = 30
   88.37 +
   88.38 +
   88.39 +DOMROOT = '/domain'
   88.40 +VMROOT  = '/domain'
   88.41 +
   88.42 +
   88.43  xc = xen.lowlevel.xc.new()
   88.44 +xroot = XendRoot.instance()
   88.45 +
   88.46 +
   88.47 +## Configuration entries that we expect to round-trip -- be read from the
   88.48 +# config file or xc, written to save-files (i.e. through sxpr), and reused as
   88.49 +# config on restart or restore, all without munging.  Some configuration
   88.50 +# entries are munged for backwards compatibility reasons, or because they
   88.51 +# don't come out of xc in the same form as they are specified in the config
   88.52 +# file, so those are handled separately.
   88.53 +ROUNDTRIPPING_CONFIG_ENTRIES = [
   88.54 +        ('name',         str),
   88.55 +        ('ssidref',      int),
   88.56 +        ('cpu_weight',   float),
   88.57 +        ('bootloader',   str)
   88.58 +    ]
   88.59  
   88.60  
   88.61  def domain_exists(name):
   88.62 @@ -133,47 +154,64 @@ class XendDomainInfo:
   88.63      MINIMUM_RESTART_TIME = 20
   88.64  
   88.65  
   88.66 -    def create(cls, dompath, config):
   88.67 +    def create(cls, config):
   88.68          """Create a VM from a configuration.
   88.69  
   88.70 -        @param dompath:   The path to all domain information
   88.71          @param config    configuration
   88.72          @raise: VmError for invalid configuration
   88.73          """
   88.74  
   88.75 -        log.debug("XendDomainInfo.create(%s, ...)", dompath)
   88.76 +        log.debug("XendDomainInfo.create(...)")
   88.77          
   88.78 -        vm = cls(getUuid(), dompath, cls.parseConfig(config))
   88.79 +        vm = cls(getUuid(), cls.parseConfig(config))
   88.80          vm.construct()
   88.81 +        vm.refreshShutdown()
   88.82          return vm
   88.83  
   88.84      create = classmethod(create)
   88.85  
   88.86  
   88.87 -    def recreate(cls, uuid, dompath, domid, info):
   88.88 -        """Create the VM object for an existing domain.
   88.89 +    def recreate(cls, xeninfo):
   88.90 +        """Create the VM object for an existing domain."""
   88.91 +
   88.92 +        log.debug("XendDomainInfo.recreate(%s)", xeninfo)
   88.93  
   88.94 -        @param dompath:   The path to all domain information
   88.95 -        @param info:      domain info from xc
   88.96 -        """
   88.97 +        domid = xeninfo['dom']
   88.98 +        try:
   88.99 +            dompath = GetDomainPath(domid)
  88.100 +            if not dompath:
  88.101 +                raise XendError(
  88.102 +                    'No domain path in store for existing domain %d' % domid)
  88.103 +            vmpath = xstransact.Read(dompath, "vm")
  88.104 +            if not vmpath:
  88.105 +                raise XendError(
  88.106 +                    'No vm path in store for existing domain %d' % domid)
  88.107 +            uuid = xstransact.Read(vmpath, "uuid")
  88.108 +            if not uuid:
  88.109 +                raise XendError(
  88.110 +                    'No vm/uuid path in store for existing domain %d' % domid)
  88.111  
  88.112 -        log.debug("XendDomainInfo.recreate(%s, %s, %s, %s)", uuid, dompath,
  88.113 -                  domid, info)
  88.114 +        except Exception, exn:
  88.115 +            log.warn(str(exn))
  88.116 +            uuid = getUuid()
  88.117  
  88.118 -        return cls(uuid, dompath, info, domid, True)
  88.119 +        log.info("Recreating domain %d, uuid %s", domid, uuid)
  88.120 +
  88.121 +        vm = cls(uuid, xeninfo, domid, True)
  88.122 +        vm.refreshShutdown(xeninfo)
  88.123 +        return vm
  88.124  
  88.125      recreate = classmethod(recreate)
  88.126  
  88.127  
  88.128 -    def restore(cls, dompath, config, uuid = None):
  88.129 +    def restore(cls, config, uuid = None):
  88.130          """Create a domain and a VM object to do a restore.
  88.131  
  88.132 -        @param dompath:   The path to all domain information
  88.133          @param config:    domain configuration
  88.134          @param uuid:      uuid to use
  88.135          """
  88.136          
  88.137 -        log.debug("XendDomainInfo.restore(%s, %s, %s)", dompath, config, uuid)
  88.138 +        log.debug("XendDomainInfo.restore(%s, %s)", config, uuid)
  88.139  
  88.140          if not uuid:
  88.141              uuid = getUuid()
  88.142 @@ -183,14 +221,12 @@ class XendDomainInfo:
  88.143          except TypeError, exn:
  88.144              raise VmError('Invalid ssidref in config: %s' % exn)
  88.145  
  88.146 -        log.debug('restoring with ssidref = %d' % ssidref)
  88.147 -
  88.148 -        vm = cls(uuid, dompath, cls.parseConfig(config),
  88.149 +        vm = cls(uuid, cls.parseConfig(config),
  88.150                   xc.domain_create(ssidref = ssidref))
  88.151 -        vm.clear_shutdown()
  88.152          vm.create_channel()
  88.153          vm.configure()
  88.154          vm.exportToDB()
  88.155 +        vm.refreshShutdown()
  88.156          return vm
  88.157  
  88.158      restore = classmethod(restore)
  88.159 @@ -214,33 +250,28 @@ class XendDomainInfo:
  88.160          log.debug("parseConfig: config is %s" % str(config))
  88.161  
  88.162          result = {}
  88.163 -        imagecfg = "()"
  88.164  
  88.165 -        result['name']         = get_cfg('name')
  88.166 -        result['ssidref']      = get_cfg('ssidref',    int)
  88.167 +        for e in ROUNDTRIPPING_CONFIG_ENTRIES:
  88.168 +            result[e[0]] = get_cfg(e[0], e[1])
  88.169 +
  88.170          result['memory']       = get_cfg('memory',     int)
  88.171          result['mem_kb']       = get_cfg('mem_kb',     int)
  88.172          result['maxmem']       = get_cfg('maxmem',     int)
  88.173          result['maxmem_kb']    = get_cfg('maxmem_kb',  int)
  88.174          result['cpu']          = get_cfg('cpu',        int)
  88.175 -        result['cpu_weight']   = get_cfg('cpu_weight', float)
  88.176 -        result['bootloader']   = get_cfg('bootloader')
  88.177          result['restart_mode'] = get_cfg('restart')
  88.178 +        result['image']        = get_cfg('image')
  88.179  
  88.180          try:
  88.181 -            imagecfg = get_cfg('image')
  88.182 -
  88.183 -            if imagecfg:
  88.184 -                result['image'] = imagecfg
  88.185 -                result['vcpus'] = int(sxp.child_value(imagecfg, 'vcpus',
  88.186 -                                                      1))
  88.187 +            if result['image']:
  88.188 +                result['vcpus'] = int(sxp.child_value(result['image'],
  88.189 +                                                      'vcpus', 1))
  88.190              else:
  88.191                  result['vcpus'] = 1
  88.192          except TypeError, exn:
  88.193              raise VmError(
  88.194                  'Invalid configuration setting: vcpus = %s: %s' %
  88.195 -                (sxp.child_value(imagecfg, 'vcpus', 1),
  88.196 -                 str(exn)))
  88.197 +                (sxp.child_value(result['image'], 'vcpus', 1), str(exn)))
  88.198  
  88.199          result['backend'] = []
  88.200          for c in sxp.children(config, 'backend'):
  88.201 @@ -258,12 +289,12 @@ class XendDomainInfo:
  88.202      parseConfig = classmethod(parseConfig)
  88.203  
  88.204      
  88.205 -    def __init__(self, uuid, parentpath, info, domid = None, augment = False):
  88.206 +    def __init__(self, uuid, info, domid = None, augment = False):
  88.207  
  88.208          self.uuid = uuid
  88.209          self.info = info
  88.210  
  88.211 -        self.path = parentpath + "/" + uuid
  88.212 +        self.path = DOMROOT + "/" + uuid
  88.213  
  88.214          if domid:
  88.215              self.domid = domid
  88.216 @@ -283,26 +314,26 @@ class XendDomainInfo:
  88.217          self.store_mfn = None
  88.218          self.console_channel = None
  88.219          self.console_mfn = None
  88.220 -        
  88.221 -        #todo: state: running, suspended
  88.222 +
  88.223          self.state = STATE_VM_OK
  88.224          self.state_updated = threading.Condition()
  88.225 -        self.shutdown_pending = None
  88.226  
  88.227 -        self.restart_state = None
  88.228 -        self.restart_time = None
  88.229 -        self.restart_count = 0
  88.230 -        
  88.231          self.writeVm("uuid", self.uuid)
  88.232          self.storeDom("vm", self.path)
  88.233  
  88.234  
  88.235      def augmentInfo(self):
  88.236 +        """Augment self.info, as given to us through {@link #recreate}, with
  88.237 +        values taken from the store.  This recovers those values known to xend
  88.238 +        but not to the hypervisor.
  88.239 +        """
  88.240          def useIfNeeded(name, val):
  88.241              if not self.infoIsSet(name) and val is not None:
  88.242                  self.info[name] = val
  88.243  
  88.244          params = (("name", str),
  88.245 +                  ("restart-mode", str),
  88.246 +                  ("image",        str),
  88.247                    ("start-time", float))
  88.248  
  88.249          from_store = self.gatherVm(*params)
  88.250 @@ -322,13 +353,18 @@ class XendDomainInfo:
  88.251              defaultInfo('name',         lambda: "Domain-%d" % self.domid)
  88.252              defaultInfo('ssidref',      lambda: 0)
  88.253              defaultInfo('restart_mode', lambda: RESTART_ONREBOOT)
  88.254 +            defaultInfo('cpu',          lambda: None)
  88.255              defaultInfo('cpu_weight',   lambda: 1.0)
  88.256              defaultInfo('bootloader',   lambda: None)
  88.257              defaultInfo('backend',      lambda: [])
  88.258              defaultInfo('device',       lambda: [])
  88.259 +            defaultInfo('image',        lambda: None)
  88.260  
  88.261              self.check_name(self.info['name'])
  88.262  
  88.263 +            if isinstance(self.info['image'], str):
  88.264 +                self.info['image'] = sxp.from_string(self.info['image'])
  88.265 +
  88.266              # Internally, we keep only maxmem_KiB, and not maxmem or maxmem_kb
  88.267              # (which come from outside, and are in MiB and KiB respectively).
  88.268              # This means that any maxmem or maxmem_kb settings here have come
  88.269 @@ -451,17 +487,16 @@ class XendDomainInfo:
  88.270              'domid':              str(self.domid),
  88.271              'uuid':               self.uuid,
  88.272  
  88.273 -            'restart_time':       str(self.restart_time),
  88.274 -
  88.275 -            'xend/state':         self.state,
  88.276 -            'xend/restart_count': str(self.restart_count),
  88.277              'xend/restart_mode':  str(self.info['restart_mode']),
  88.278  
  88.279              'memory/target':      str(self.info['memory_KiB'])
  88.280              }
  88.281  
  88.282          for (k, v) in self.info.items():
  88.283 -            to_store[k] = str(v)
  88.284 +            if v:
  88.285 +                to_store[k] = str(v)
  88.286 +
  88.287 +        to_store['image'] = sxp.to_string(self.info['image'])
  88.288  
  88.289          log.debug("Storing %s" % str(to_store))
  88.290  
  88.291 @@ -513,6 +548,88 @@ class XendDomainInfo:
  88.292                        self.info['backend'], 0)
  88.293  
  88.294  
  88.295 +    def refreshShutdown(self, xeninfo = None):
  88.296 +        if xeninfo is None:
  88.297 +            xeninfo = dom_get(self.domid)
  88.298 +            if xeninfo is None:
  88.299 +                # The domain no longer exists.  This will occur if we have
  88.300 +                # scheduled a timer to check for shutdown timeouts and the
  88.301 +                # shutdown succeeded.
  88.302 +                return
  88.303 +
  88.304 +        if xeninfo['dying']:
  88.305 +            # Dying means that a domain has been destroyed, but has not yet
  88.306 +            # been cleaned up by Xen.  This could persist indefinitely if,
  88.307 +            # for example, another domain has some of its pages mapped.
  88.308 +            # We might like to diagnose this problem in the future, but for
  88.309 +            # now all we can sensibly do is ignore it.
  88.310 +            pass
  88.311 +
  88.312 +        elif xeninfo['crashed']:
  88.313 +            log.warn('Domain has crashed: name=%s id=%d.',
  88.314 +                     self.info['name'], self.domid)
  88.315 +
  88.316 +            if xroot.get_enable_dump():
  88.317 +                self.dumpCore()
  88.318 +
  88.319 +            self.maybeRestart('crashed')
  88.320 +
  88.321 +        elif xeninfo['shutdown']:
  88.322 +            reason = shutdown_reason(xeninfo['shutdown_reason'])
  88.323 +
  88.324 +            log.info('Domain has shutdown: name=%s id=%d reason=%s.',
  88.325 +                     self.info['name'], self.domid, reason)
  88.326 +
  88.327 +            self.clearRestart()
  88.328 +
  88.329 +            if reason == 'suspend':
  88.330 +                self.state_set(STATE_VM_SUSPENDED)
  88.331 +                # Don't destroy the domain.  XendCheckpoint will do this once
  88.332 +                # it has finished.
  88.333 +            elif reason in ['poweroff', 'reboot']:
  88.334 +                self.maybeRestart(reason)
  88.335 +            else:
  88.336 +                self.destroy()
  88.337 +
  88.338 +        else:
  88.339 +            # Domain is alive.  If we are shutting it down, then check
  88.340 +            # the timeout on that, and destroy it if necessary.
  88.341 +
  88.342 +            sst = self.readVm('xend/shutdown_start_time')
  88.343 +            if sst:
  88.344 +                sst = float(sst)
  88.345 +                timeout = SHUTDOWN_TIMEOUT - time.time() + sst
  88.346 +                if timeout < 0:
  88.347 +                    log.info(
  88.348 +                        "Domain shutdown timeout expired: name=%s id=%s",
  88.349 +                        self.info['name'], self.domid)
  88.350 +                    self.destroy()
  88.351 +                else:
  88.352 +                    log.debug(
  88.353 +                        "Scheduling refreshShutdown on domain %d in %ds.",
  88.354 +                        self.domid, timeout)
  88.355 +                    scheduler.later(timeout, self.refreshShutdown)
  88.356 +
  88.357 +
  88.358 +    def shutdown(self, reason):
  88.359 +        if not reason in shutdown_reasons.values():
  88.360 +            raise XendError('invalid reason:' + reason)
  88.361 +        self.storeVm("control/shutdown", reason)
  88.362 +        if not reason == 'suspend':
  88.363 +            self.storeVm('xend/shutdown_start_time', time.time())
  88.364 +
  88.365 +
  88.366 +    def clearRestart(self):
  88.367 +        self.removeVm("xend/shutdown_start_time")
  88.368 +
  88.369 +
  88.370 +    def maybeRestart(self, reason):
  88.371 +        if self.restart_needed(reason):
  88.372 +            self.restart()
  88.373 +        else:
  88.374 +            self.destroy()
  88.375 +
  88.376 +
  88.377      def dumpCore(self):
  88.378          """Create a core dump for this domain.  Nothrow guarantee."""
  88.379          
  88.380 @@ -526,18 +643,32 @@ class XendDomainInfo:
  88.381                        self.domid, self.info['name'], str(exn))
  88.382  
  88.383  
  88.384 -    def closeStoreChannel(self):
  88.385 -        """Close the store channel, if any.  Nothrow guarantee."""
  88.386 +    def closeChannel(self, channel, entry):
  88.387 +        """Close the given channel, if set, and remove the given entry in the
  88.388 +        store.  Nothrow guarantee."""
  88.389          
  88.390          try:
  88.391 -            if self.store_channel:
  88.392 -                try:
  88.393 -                    self.store_channel.close()
  88.394 -                    self.removeDom("store/port")
  88.395 -                finally:
  88.396 -                    self.store_channel = None
  88.397 +            try:
  88.398 +                if channel:
  88.399 +                    channel.close()
  88.400 +            finally:
  88.401 +                self.removeDom(entry)
  88.402          except Exception, exn:
  88.403              log.exception(exn)
  88.404 +        
  88.405 +
  88.406 +    def closeStoreChannel(self):
  88.407 +        """Close the store channel, if any.  Nothrow guarantee."""
  88.408 +
  88.409 +        self.closeChannel(self.store_channel, "store/port")
  88.410 +        self.store_channel = None
  88.411 +
  88.412 +
  88.413 +    def closeConsoleChannel(self):
  88.414 +        """Close the console channel, if any.  Nothrow guarantee."""
  88.415 +
  88.416 +        self.closeChannel(self.console_channel, "console/port")
  88.417 +        self.console_channel = None
  88.418  
  88.419  
  88.420      def setConsoleRef(self, ref):
  88.421 @@ -566,18 +697,23 @@ class XendDomainInfo:
  88.422              
  88.423          self.info.update(info)
  88.424          self.validateInfo()
  88.425 +        self.refreshShutdown(info)
  88.426  
  88.427          log.debug("XendDomainInfo.update done on domain %d: %s", self.domid,
  88.428                    self.info)
  88.429  
  88.430  
  88.431 +    ## private:
  88.432 +
  88.433      def state_set(self, state):
  88.434          self.state_updated.acquire()
  88.435          if self.state != state:
  88.436              self.state = state
  88.437              self.state_updated.notifyAll()
  88.438          self.state_updated.release()
  88.439 -        self.exportToDB()
  88.440 +
  88.441 +
  88.442 +    ## public:
  88.443  
  88.444      def state_wait(self, state):
  88.445          self.state_updated.acquire()
  88.446 @@ -585,6 +721,7 @@ class XendDomainInfo:
  88.447              self.state_updated.wait()
  88.448          self.state_updated.release()
  88.449  
  88.450 +
  88.451      def __str__(self):
  88.452          s = "<domain"
  88.453          s += " id=" + str(self.domid)
  88.454 @@ -597,13 +734,6 @@ class XendDomainInfo:
  88.455      __repr__ = __str__
  88.456  
  88.457  
  88.458 -    def getDeviceController(self, name):
  88.459 -        if name not in controllerClasses:
  88.460 -            raise XendError("unknown device type: " + str(name))
  88.461 -
  88.462 -        return controllerClasses[name](self)
  88.463 -
  88.464 -
  88.465      def createDevice(self, deviceClass, devconfig):
  88.466          return self.getDeviceController(deviceClass).createDevice(devconfig)
  88.467  
  88.468 @@ -617,49 +747,66 @@ class XendDomainInfo:
  88.469          return self.getDeviceController(deviceClass).destroyDevice(devid)
  88.470  
  88.471  
  88.472 +    def getDeviceSxprs(self, deviceClass):
  88.473 +        return self.getDeviceController(deviceClass).sxprs()
  88.474 +
  88.475 +
  88.476 +    ## private:
  88.477 +
  88.478 +    def getDeviceController(self, name):
  88.479 +        if name not in controllerClasses:
  88.480 +            raise XendError("unknown device type: " + str(name))
  88.481 +
  88.482 +        return controllerClasses[name](self)
  88.483 +
  88.484 +
  88.485 +    ## public:
  88.486 +
  88.487      def sxpr(self):
  88.488          sxpr = ['domain',
  88.489 -                ['domid', self.domid],
  88.490 -                ['name', self.info['name']],
  88.491 -                ['memory', self.info['memory_KiB'] / 1024],
  88.492 -                ['ssidref', self.info['ssidref']]]
  88.493 -        if self.uuid:
  88.494 -            sxpr.append(['uuid', self.uuid])
  88.495 -        if self.info:
  88.496 -            sxpr.append(['maxmem', self.info['maxmem_KiB'] / 1024])
  88.497 +                ['domid',   self.domid],
  88.498 +                ['uuid',    self.uuid],
  88.499 +                ['memory',  self.info['memory_KiB'] / 1024]]
  88.500 +
  88.501 +        for e in ROUNDTRIPPING_CONFIG_ENTRIES:
  88.502 +            if self.infoIsSet(e[0]):
  88.503 +                sxpr.append([e[0], self.info[e[0]]])
  88.504 +        
  88.505 +        sxpr.append(['maxmem', self.info['maxmem_KiB'] / 1024])
  88.506  
  88.507 -            if self.infoIsSet('device'):
  88.508 -                for (_, c) in self.info['device']:
  88.509 -                    sxpr.append(['device', c])
  88.510 +        if self.infoIsSet('image'):
  88.511 +            sxpr.append(['image', self.info['image']])
  88.512  
  88.513 -            def stateChar(name):
  88.514 -                if name in self.info:
  88.515 -                    if self.info[name]:
  88.516 -                        return name[0]
  88.517 -                    else:
  88.518 -                        return '-'
  88.519 +        if self.infoIsSet('device'):
  88.520 +            for (_, c) in self.info['device']:
  88.521 +                sxpr.append(['device', c])
  88.522 +
  88.523 +        def stateChar(name):
  88.524 +            if name in self.info:
  88.525 +                if self.info[name]:
  88.526 +                    return name[0]
  88.527                  else:
  88.528 -                    return '?'
  88.529 +                    return '-'
  88.530 +            else:
  88.531 +                return '?'
  88.532  
  88.533 -            state = reduce(
  88.534 -                lambda x, y: x + y,
  88.535 -                map(stateChar,
  88.536 -                    ['running', 'blocked', 'paused', 'shutdown', 'crashed']))
  88.537 +        state = reduce(
  88.538 +            lambda x, y: x + y,
  88.539 +            map(stateChar,
  88.540 +                ['running', 'blocked', 'paused', 'shutdown', 'crashed',
  88.541 +                 'dying']))
  88.542  
  88.543 -            sxpr.append(['state', state])
  88.544 -            if self.infoIsSet('shutdown'):
  88.545 -                reason = shutdown_reason(self.info['shutdown_reason'])
  88.546 -                sxpr.append(['shutdown_reason', reason])
  88.547 -            if self.infoIsSet('cpu_time'):
  88.548 -                sxpr.append(['cpu_time', self.info['cpu_time']/1e9])    
  88.549 -            sxpr.append(['vcpus', self.info['vcpus']])
  88.550 -            sxpr.append(['cpumap', self.info['cpumap']])
  88.551 -            if self.infoIsSet('vcpu_to_cpu'):
  88.552 -                sxpr.append(['cpu', self.info['vcpu_to_cpu'][0]])
  88.553 -                # build a string, using '|' to separate items, show only up
  88.554 -                # to number of vcpus in domain, and trim the trailing '|'
  88.555 -                sxpr.append(['vcpu_to_cpu', ''.join(map(lambda x: str(x)+'|',
  88.556 -                            self.info['vcpu_to_cpu'][0:self.info['vcpus']]))[:-1]])
  88.557 +        sxpr.append(['state', state])
  88.558 +        if self.infoIsSet('shutdown'):
  88.559 +            reason = shutdown_reason(self.info['shutdown_reason'])
  88.560 +            sxpr.append(['shutdown_reason', reason])
  88.561 +        if self.infoIsSet('cpu_time'):
  88.562 +            sxpr.append(['cpu_time', self.info['cpu_time']/1e9])
  88.563 +        sxpr.append(['vcpus', self.info['vcpus']])
  88.564 +        sxpr.append(['cpumap', self.info['cpumap']])
  88.565 +        if self.infoIsSet('vcpu_to_cpu'):
  88.566 +            sxpr.append(['cpu', self.info['vcpu_to_cpu'][0]])
  88.567 +            sxpr.append(['vcpu_to_cpu', self.prettyVCpuMap()])
  88.568              
  88.569          if self.infoIsSet('start_time'):
  88.570              up_time =  time.time() - self.info['start_time']
  88.571 @@ -674,14 +821,17 @@ class XendDomainInfo:
  88.572              sxpr.append(['console_channel', self.console_channel.sxpr()])
  88.573          if self.console_mfn:
  88.574              sxpr.append(['console_mfn', self.console_mfn])
  88.575 -        if self.restart_count:
  88.576 -            sxpr.append(['restart_count', self.restart_count])
  88.577 -        if self.restart_state:
  88.578 -            sxpr.append(['restart_state', self.restart_state])
  88.579 -        if self.restart_time:
  88.580 -            sxpr.append(['restart_time', str(self.restart_time)])
  88.581 +
  88.582          return sxpr
  88.583  
  88.584 +
  88.585 +    ## private:
  88.586 +
  88.587 +    def prettyVCpuMap(self):
  88.588 +        return '|'.join(map(str,
  88.589 +                            self.info['vcpu_to_cpu'][0:self.info['vcpus']]))
  88.590 +
  88.591 +
  88.592      def check_name(self, name):
  88.593          """Check if a vm name is valid. Valid names contain alphabetic characters,
  88.594          digits, or characters in '_-.:/+'.
  88.595 @@ -719,11 +869,19 @@ class XendDomainInfo:
  88.596          @param config: configuration
  88.597          @raise: VmError on error
  88.598          """
  88.599 -        # todo - add support for scheduling params?
  88.600 +
  88.601 +        log.debug('XendDomainInfo.construct: %s %s',
  88.602 +                  str(self.domid),
  88.603 +                  str(self.info['ssidref']))
  88.604 +
  88.605 +        self.domid = xc.domain_create(dom = 0, ssidref = self.info['ssidref'])
  88.606 +
  88.607 +        if self.domid <= 0:
  88.608 +            raise VmError('Creating domain failed: name=%s' %
  88.609 +                          self.info['name'])
  88.610 +
  88.611          try:
  88.612              self.initDomain()
  88.613 -
  88.614 -            # Create domain devices.
  88.615              self.construct_image()
  88.616              self.configure()
  88.617              self.exportToDB()
  88.618 @@ -737,30 +895,23 @@ class XendDomainInfo:
  88.619  
  88.620  
  88.621      def initDomain(self):
  88.622 -        log.debug('XendDomainInfo.initDomain: %s %s %s %s)',
  88.623 +        log.debug('XendDomainInfo.initDomain: %s %s %s',
  88.624                    str(self.domid),
  88.625                    str(self.info['memory_KiB']),
  88.626 -                  str(self.info['ssidref']),
  88.627                    str(self.info['cpu_weight']))
  88.628  
  88.629 -        self.domid = xc.domain_create(dom = self.domid or 0,
  88.630 -                                      ssidref = self.info['ssidref'])
  88.631 -
  88.632 -        if 'image' not in self.info:
  88.633 +        if not self.infoIsSet('image'):
  88.634              raise VmError('Missing image in configuration')
  88.635  
  88.636          self.image = image.create(self,
  88.637                                    self.info['image'],
  88.638                                    self.info['device'])
  88.639  
  88.640 -        if self.domid <= 0:
  88.641 -            raise VmError('Creating domain failed: name=%s' %
  88.642 -                          self.info['name'])
  88.643 -
  88.644          if self.info['bootloader']:
  88.645              self.image.handleBootloading()
  88.646  
  88.647          xc.domain_setcpuweight(self.domid, self.info['cpu_weight'])
  88.648 +        # XXX Merge with configure_maxmem?
  88.649          m = self.image.getDomainMemory(self.info['memory_KiB'])
  88.650          xc.domain_setmaxmem(self.domid, m)
  88.651          xc.domain_memory_increase_reservation(self.domid, m, 0, 0)
  88.652 @@ -794,6 +945,8 @@ class XendDomainInfo:
  88.653          self.configure_vcpus(self.info['vcpus'])
  88.654  
  88.655  
  88.656 +    ## public:
  88.657 +
  88.658      def delete(self):
  88.659          """Delete the vm's db.
  88.660          """
  88.661 @@ -803,48 +956,46 @@ class XendDomainInfo:
  88.662              log.warning("error in domain db delete: %s", ex)
  88.663  
  88.664  
  88.665 -    def destroy_domain(self):
  88.666 -        """Destroy the vm's domain.
  88.667 -        The domain will not finally go away unless all vm
  88.668 -        devices have been released.
  88.669 -        """
  88.670 -        if self.domid is None:
  88.671 -            return
  88.672 -        try:
  88.673 -            xc.domain_destroy(dom=self.domid)
  88.674 -        except Exception, err:
  88.675 -            log.exception("Domain destroy failed: %s", self.info['name'])
  88.676 +    def cleanup(self):
  88.677 +        """Cleanup vm resources: release devices.  Nothrow guarantee."""
  88.678  
  88.679 -    def cleanup(self):
  88.680 -        """Cleanup vm resources: release devices.
  88.681 -        """
  88.682 -        self.state = STATE_VM_TERMINATED
  88.683 +        self.state_set(STATE_VM_TERMINATED)
  88.684          self.release_devices()
  88.685          self.closeStoreChannel()
  88.686 -        if self.console_channel:
  88.687 -            # notify processes using this console?
  88.688 -            try:
  88.689 -                self.console_channel.close()
  88.690 -                self.console_channel = None
  88.691 -            except:
  88.692 -                pass
  88.693 +        self.closeConsoleChannel()
  88.694 +
  88.695          if self.image:
  88.696              try:
  88.697                  self.image.destroy()
  88.698 -                self.image = None
  88.699              except:
  88.700 -                pass
  88.701 +                log.exception(
  88.702 +                    "XendDomainInfo.cleanup: image.destroy() failed.")
  88.703 +            self.image = None
  88.704 +
  88.705  
  88.706      def destroy(self):
  88.707 -        """Cleanup vm and destroy domain.
  88.708 -        """
  88.709 +        """Cleanup vm and destroy domain.  Nothrow guarantee."""
  88.710 +
  88.711 +        log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
  88.712  
  88.713 -        log.debug("XendDomainInfo.destroy")
  88.714 +        self.cleanup()
  88.715 +
  88.716 +        try:
  88.717 +            self.removeVm()
  88.718 +        except Exception:
  88.719 +            log.exception("Removing VM path failed.")
  88.720  
  88.721 -        self.destroy_domain()
  88.722 -        self.cleanup()
  88.723 -        self.exportToDB()
  88.724 -        return 0
  88.725 +        try:
  88.726 +            self.removeDom()
  88.727 +        except Exception:
  88.728 +            log.exception("Removing domain path failed.")
  88.729 +
  88.730 +        try:
  88.731 +            if self.domid is not None:
  88.732 +                xc.domain_destroy(dom=self.domid)
  88.733 +        except Exception:
  88.734 +            log.exception("XendDomainInfo.destroy: xc.domain_destroy failed.")
  88.735 +
  88.736  
  88.737      def is_terminated(self):
  88.738          """Check if a domain has been terminated.
  88.739 @@ -852,8 +1003,7 @@ class XendDomainInfo:
  88.740          return self.state == STATE_VM_TERMINATED
  88.741  
  88.742      def release_devices(self):
  88.743 -        """Release all vm devices.
  88.744 -        """
  88.745 +        """Release all vm devices.  Nothrow guarantee."""
  88.746  
  88.747          while True:
  88.748              t = xstransact("%s/device" % self.path)
  88.749 @@ -865,8 +1015,8 @@ class XendDomainInfo:
  88.750                          # Log and swallow any exceptions in removal --
  88.751                          # there's nothing more we can do.
  88.752                          log.exception(
  88.753 -                           "Device release failed: %s; %s; %s; %s" %
  88.754 -                            (self.info['name'], n, d, str(ex)))
  88.755 +                           "Device release failed: %s; %s; %s",
  88.756 +                           self.info['name'], n, d)
  88.757              if t.commit():
  88.758                  break
  88.759  
  88.760 @@ -902,8 +1052,7 @@ class XendDomainInfo:
  88.761  
  88.762          @raise: VmError for invalid devices
  88.763          """
  88.764 -        if not self.rebooting():
  88.765 -            self.create_configured_devices()
  88.766 +        self.create_configured_devices()
  88.767          if self.image:
  88.768              self.image.createDeviceModel()
  88.769  
  88.770 @@ -942,23 +1091,6 @@ class XendDomainInfo:
  88.771              return reason == 'reboot'
  88.772          return False
  88.773  
  88.774 -    def restart_cancel(self):
  88.775 -        """Cancel a vm restart.
  88.776 -        """
  88.777 -        self.restart_state = None
  88.778 -
  88.779 -    def restarting(self):
  88.780 -        """Put the vm into restart mode.
  88.781 -        """
  88.782 -        self.restart_state = STATE_RESTART_PENDING
  88.783 -
  88.784 -    def restart_pending(self):
  88.785 -        """Test if the vm has a pending restart.
  88.786 -        """
  88.787 -        return self.restart_state == STATE_RESTART_PENDING
  88.788 -
  88.789 -    def rebooting(self):
  88.790 -        return self.restart_state == STATE_RESTART_BOOTING
  88.791  
  88.792      def restart_check(self):
  88.793          """Check if domain restart is OK.
  88.794 @@ -976,23 +1108,37 @@ class XendDomainInfo:
  88.795          self.restart_time = tnow
  88.796          self.restart_count += 1
  88.797  
  88.798 +
  88.799      def restart(self):
  88.800 -        """Restart the domain after it has exited.
  88.801 -        Reuses the domain id
  88.802 +        """Restart the domain after it has exited. """
  88.803 +
  88.804 +        #            self.restart_check()
  88.805 +        self.cleanup()
  88.806 +
  88.807 +        config = self.sxpr()
  88.808  
  88.809 -        """
  88.810 +        if self.readVm('xend/restart_in_progress'):
  88.811 +            log.error('Xend failed during restart of domain %d.  '
  88.812 +                      'Refusing to restart to avoid loops.',
  88.813 +                      self.domid)
  88.814 +            self.destroy()
  88.815 +            return
  88.816 +
  88.817 +        self.writeVm('xend/restart_in_progress', 'True')
  88.818 +
  88.819          try:
  88.820 -            self.clear_shutdown()
  88.821 -            self.state = STATE_VM_OK
  88.822 -            self.shutdown_pending = None
  88.823 -            self.restart_check()
  88.824 -            self.exportToDB()
  88.825 -            self.restart_state = STATE_RESTART_BOOTING
  88.826 -            self.configure_bootloader()
  88.827 -            self.construct()
  88.828 -            self.exportToDB()
  88.829 +            self.destroy()
  88.830 +            try:
  88.831 +                xd = get_component('xen.xend.XendDomain')
  88.832 +                xd.domain_unpause(xd.domain_create(config).getDomid())
  88.833 +            except Exception, exn:
  88.834 +                log.exception('Failed to restart domain %d.', self.domid)
  88.835          finally:
  88.836 -            self.restart_state = None
  88.837 +            self.removeVm('xend/restart_in_progress')
  88.838 +            
  88.839 +        # self.configure_bootloader()
  88.840 +        #        self.exportToDB()
  88.841 +
  88.842  
  88.843      def configure_bootloader(self):
  88.844          if not self.info['bootloader']:
  88.845 @@ -1006,7 +1152,8 @@ class XendDomainInfo:
  88.846          if dev:
  88.847              disk = sxp.child_value(dev, "uname")
  88.848              fn = blkdev_uname_to_file(disk)
  88.849 -            blcfg = bootloader(self.info['bootloader'], fn, 1, self.info['vcpus'])
  88.850 +            blcfg = bootloader(self.info['bootloader'], fn, 1,
  88.851 +                               self.info['vcpus'])
  88.852          if blcfg is None:
  88.853              msg = "Had a bootloader specified, but can't find disk"
  88.854              log.error(msg)
  88.855 @@ -1023,7 +1170,9 @@ class XendDomainInfo:
  88.856  
  88.857  
  88.858      def configure_maxmem(self):
  88.859 -        xc.domain_setmaxmem(self.domid, maxmem_kb = self.info['maxmem_KiB'])
  88.860 +        if self.image:
  88.861 +            m = self.image.getDomainMemory(self.info['memory_KiB'])
  88.862 +            xc.domain_setmaxmem(self.domid, maxmem_kb = m)
  88.863  
  88.864  
  88.865      def vcpu_hotplug(self, vcpu, state):
  88.866 @@ -1038,24 +1187,9 @@ class XendDomainInfo:
  88.867              availability = "online"
  88.868          self.storeVm("cpu/%d/availability" % vcpu, availability)
  88.869  
  88.870 -    def shutdown(self, reason):
  88.871 -        if not reason in shutdown_reasons.values():
  88.872 -            raise XendError('invalid reason:' + reason)
  88.873 -        self.storeVm("control/shutdown", reason)
  88.874 -        if not reason in ['suspend']:
  88.875 -            self.shutdown_pending = {'start':time.time(), 'reason':reason}
  88.876 -
  88.877 -    def clear_shutdown(self):
  88.878 -        self.removeVm("control/shutdown")
  88.879 -
  88.880      def send_sysrq(self, key=0):
  88.881          self.storeVm("control/sysrq", '%c' % key)
  88.882  
  88.883 -    def shutdown_time_left(self, timeout):
  88.884 -        if not self.shutdown_pending:
  88.885 -            return 0
  88.886 -        return timeout - (time.time() - self.shutdown_pending['start'])
  88.887 -
  88.888      def dom0_init_store(self):
  88.889          if not self.store_channel:
  88.890              self.store_channel = self.eventChannel("store/port")
  88.891 @@ -1078,8 +1212,6 @@ class XendDomainInfo:
  88.892      def dom0_enforce_vcpus(self):
  88.893          dom = 0
  88.894          # get max number of vcpus to use for dom0 from config
  88.895 -        from xen.xend import XendRoot
  88.896 -        xroot = XendRoot.instance()
  88.897          target = int(xroot.get_dom0_vcpus())
  88.898          log.debug("number of vcpus to use is %d" % (target))
  88.899     
    90.1 --- a/tools/python/xen/xend/server/DevController.py	Thu Sep 29 16:22:02 2005 -0600
    90.2 +++ b/tools/python/xen/xend/server/DevController.py	Thu Sep 29 17:28:44 2005 -0600
    90.3 @@ -81,6 +81,13 @@ class DevController:
    90.4          xstransact.Remove(backpath)
    90.5  
    90.6  
    90.7 +    def sxprs(self):
    90.8 +        """@return an s-expression describing all the devices of this
    90.9 +        controller's device-class.
   90.10 +        """
   90.11 +        return xstransact.ListRecursive(self.frontendRoot())
   90.12 +
   90.13 +
   90.14      def sxpr(self, devid):
   90.15          """@return an s-expression describing the specified device.
   90.16          """
   90.17 @@ -126,8 +133,8 @@ class DevController:
   90.18          compulsory to use it; subclasses may prefer to allocate IDs based upon
   90.19          the device configuration instead.
   90.20          """
   90.21 +        path = self.frontendMiscPath()
   90.22          while True:
   90.23 -            path = self.frontendMiscPath()
   90.24              t = xstransact(path)
   90.25              try:
   90.26                  result = t.read("nextDeviceID")
   90.27 @@ -196,8 +203,11 @@ class DevController:
   90.28  
   90.29  
   90.30      def frontendPath(self, devid):
   90.31 -        return "%s/device/%s/%d" % (self.vm.getPath(), self.deviceClass,
   90.32 -                                    devid)
   90.33 +        return "%s/%d" % (self.frontendRoot(), devid)
   90.34 +
   90.35 +
   90.36 +    def frontendRoot(self):
   90.37 +        return "%s/device/%s" % (self.vm.getPath(), self.deviceClass)
   90.38  
   90.39  
   90.40      def frontendMiscPath(self):
    91.1 --- a/tools/python/xen/xend/server/SrvDaemon.py	Thu Sep 29 16:22:02 2005 -0600
    91.2 +++ b/tools/python/xen/xend/server/SrvDaemon.py	Thu Sep 29 17:28:44 2005 -0600
    91.3 @@ -9,26 +9,24 @@ import signal
    91.4  import sys
    91.5  import threading
    91.6  import linecache
    91.7 -import socket
    91.8  import pwd
    91.9  import re
   91.10 -import StringIO
   91.11  import traceback
   91.12 -import time
   91.13 -import glob
   91.14  
   91.15 -from xen.xend import sxp
   91.16 -from xen.xend import PrettyPrint
   91.17 -from xen.xend import EventServer; eserver = EventServer.instance()
   91.18 -from xen.xend.XendError import XendError
   91.19 +from xen.xend import EventServer
   91.20  from xen.xend.server import SrvServer
   91.21  from xen.xend.XendLogging import log
   91.22 -from xen.xend import XendRoot; xroot = XendRoot.instance()
   91.23 +from xen.xend import XendRoot
   91.24  
   91.25  import event
   91.26  import relocate
   91.27  from params import *
   91.28  
   91.29 +
   91.30 +eserver = EventServer.instance()
   91.31 +xroot = XendRoot.instance()
   91.32 +
   91.33 +
   91.34  class Daemon:
   91.35      """The xend daemon.
   91.36      """
   91.37 @@ -233,7 +231,7 @@ class Daemon:
   91.38              except:
   91.39                  pass
   91.40  
   91.41 -    def print_trace(self, str):
   91.42 +    def print_trace(self, string):
   91.43          for i in range(self.traceindent):
   91.44              ch = " "
   91.45              if (i % 5):
   91.46 @@ -241,7 +239,7 @@ class Daemon:
   91.47              else:
   91.48                  ch = '|'
   91.49              self.tracefile.write(ch)
   91.50 -        self.tracefile.write(str)
   91.51 +        self.tracefile.write(string)
   91.52              
   91.53      def trace(self, frame, event, arg):
   91.54          if not self.traceon:
   91.55 @@ -289,7 +287,7 @@ class Daemon:
   91.56          try:
   91.57              os.setuid(pwd.getpwnam(XEND_USER)[2])
   91.58              return 0
   91.59 -        except KeyError, error:
   91.60 +        except KeyError:
   91.61              print >>sys.stderr, "Error: no such user '%s'" % XEND_USER
   91.62              return 1
   91.63  
    92.1 --- a/tools/python/xen/xend/server/SrvDmesg.py	Thu Sep 29 16:22:02 2005 -0600
    92.2 +++ b/tools/python/xen/xend/server/SrvDmesg.py	Thu Sep 29 17:28:44 2005 -0600
    92.3 @@ -13,15 +13,15 @@
    92.4  # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    92.5  #============================================================================
    92.6  # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
    92.7 +# Copyright (C) 2005 XenSource Ltd
    92.8  #============================================================================
    92.9  
   92.10 -import os
   92.11  
   92.12 -from xen.xend import sxp
   92.13  from xen.xend import XendDmesg
   92.14  
   92.15  from xen.web.SrvDir import SrvDir
   92.16  
   92.17 +
   92.18  class SrvDmesg(SrvDir):
   92.19      """Xen Dmesg output.
   92.20      """
   92.21 @@ -47,6 +47,6 @@ class SrvDmesg(SrvDir):
   92.22      def info(self):
   92.23          return self.xd.info()
   92.24  
   92.25 -    def op_clear(self, op, req):
   92.26 +    def op_clear(self, _1, _2):
   92.27          self.xd.clear()
   92.28          return 0
    93.1 --- a/tools/python/xen/xend/server/SrvDomain.py	Thu Sep 29 16:22:02 2005 -0600
    93.2 +++ b/tools/python/xen/xend/server/SrvDomain.py	Thu Sep 29 17:28:44 2005 -0600
    93.3 @@ -150,17 +150,6 @@ class SrvDomain(SrvDir):
    93.4          val = fn(req.args, {'dom': self.dom.domid})
    93.5          return val
    93.6  
    93.7 -    def op_device(self, op, req):
    93.8 -        fn = FormFn(self.xd.domain_devtype_get,
    93.9 -                    [['dom',    'int'],
   93.10 -                     ['type',   'str'],
   93.11 -                     ['idx',    'int']])
   93.12 -        val = fn(req.args, {'dom': self.dom.domid})
   93.13 -        if val:
   93.14 -            return val.sxpr()
   93.15 -        else:
   93.16 -            raise XendError("invalid device")
   93.17 -
   93.18      def op_device_create(self, op, req):
   93.19          fn = FormFn(self.xd.domain_device_create,
   93.20                      [['dom',    'int'],
   93.21 @@ -172,7 +161,7 @@ class SrvDomain(SrvDir):
   93.22          fn = FormFn(self.xd.domain_device_refresh,
   93.23                      [['dom',  'int'],
   93.24                       ['type', 'str'],
   93.25 -                     ['idx',  'str']])
   93.26 +                     ['idx',  'int']])
   93.27          val = fn(req.args, {'dom': self.dom.domid})
   93.28          return val
   93.29  
   93.30 @@ -180,7 +169,7 @@ class SrvDomain(SrvDir):
   93.31          fn = FormFn(self.xd.domain_device_destroy,
   93.32                      [['dom',  'int'],
   93.33                       ['type', 'str'],
   93.34 -                     ['idx',  'str']])
   93.35 +                     ['idx',  'int']])
   93.36          val = fn(req.args, {'dom': self.dom.domid})
   93.37          return val
   93.38                  
   93.39 @@ -188,7 +177,7 @@ class SrvDomain(SrvDir):
   93.40          fn = FormFn(self.xd.domain_device_configure,
   93.41                      [['dom',    'int'],
   93.42                       ['config', 'sxpr'],
   93.43 -                     ['idx',    'str']])
   93.44 +                     ['idx',    'int']])
   93.45          val = fn(req.args, {'dom': self.dom.domid})
   93.46          return val
   93.47  
   93.48 @@ -230,10 +219,6 @@ class SrvDomain(SrvDir):
   93.49              self.print_path(req)
   93.50              #self.ls()
   93.51              req.write('<p>%s</p>' % self.dom)
   93.52 -            if self.dom.config:
   93.53 -                req.write("<code><pre>")
   93.54 -                PrettyPrint.prettyprint(self.dom.config, out=req)
   93.55 -                req.write("</pre></code>")
   93.56              self.form(req)
   93.57              req.write('</body></html>')
   93.58          return ''
    94.1 --- a/tools/python/xen/xend/server/SrvDomainDir.py	Thu Sep 29 16:22:02 2005 -0600
    94.2 +++ b/tools/python/xen/xend/server/SrvDomainDir.py	Thu Sep 29 17:28:44 2005 -0600
    94.3 @@ -38,19 +38,17 @@ class SrvDomainDir(SrvDir):
    94.4          self.xd = XendDomain.instance()
    94.5  
    94.6      def domain(self, x):
    94.7 -        val = None
    94.8          dom = self.xd.domain_lookup_by_name(x)
    94.9          if not dom:
   94.10              raise XendError('No such domain ' + str(x))
   94.11 -        val = SrvDomain(dom)
   94.12 -        return val
   94.13 +        return SrvDomain(dom)
   94.14  
   94.15      def get(self, x):
   94.16          v = SrvDir.get(self, x)
   94.17          if v is not None:
   94.18              return v
   94.19 -        v = self.domain(x)
   94.20 -        return v
   94.21 +        else:
   94.22 +            return self.domain(x)
   94.23  
   94.24      def op_create(self, op, req):
   94.25          """Create a domain.
   94.26 @@ -152,11 +150,11 @@ class SrvDomainDir(SrvDir):
   94.27              domains = self.xd.list_sorted()
   94.28              req.write('<ul>')
   94.29              for d in domains:
   94.30 -                req.write('<li><a href="%s%s"> Domain %s</a>'
   94.31 -                          % (url, d.getName(), d.getName()))
   94.32 -                req.write('id=%s' % d.getDomain())
   94.33 -                req.write('memory=%d'% d.getMemoryTarget())
   94.34 -                req.write('ssidref=%d'% d.getSsidref())
   94.35 +                req.write(
   94.36 +                    '<li><a href="%s%s">Domain %s</a>: id = %s, memory = %d, '
   94.37 +                    'ssidref = %d.'
   94.38 +                    % (url, d.getName(), d.getName(), d.getDomid(),
   94.39 +                       d.getMemoryTarget(), d.getSsidref()))
   94.40                  req.write('</li>')
   94.41              req.write('</ul>')
   94.42  
    95.1 --- a/tools/python/xen/xend/server/SrvNode.py	Thu Sep 29 16:22:02 2005 -0600
    95.2 +++ b/tools/python/xen/xend/server/SrvNode.py	Thu Sep 29 17:28:44 2005 -0600
    95.3 @@ -62,7 +62,7 @@ class SrvNode(SrvDir):
    95.4              for d in self.info():
    95.5                  req.write('<li> %10s: %s' % (d[0], str(d[1])))
    95.6              req.write('<li><a href="%sdmesg">Xen dmesg output</a>' % url)
    95.7 -            req.write('<li><a href="%slog>Xend log</a>' % url)
    95.8 +            req.write('<li><a href="%slog">Xend log</a>' % url)
    95.9              req.write('</ul>')
   95.10              req.write('</body></html>')
   95.11              
    97.1 --- a/tools/python/xen/xend/xenstore/xstransact.py	Thu Sep 29 16:22:02 2005 -0600
    97.2 +++ b/tools/python/xen/xend/xenstore/xstransact.py	Thu Sep 29 17:28:44 2005 -0600
    97.3 @@ -1,4 +1,5 @@
    97.4  # Copyright (C) 2005 Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
    97.5 +# Copyright (C) 2005 XenSource Ltd
    97.6  
    97.7  # This file is subject to the terms and conditions of the GNU General
    97.8  # Public License.  See the file "COPYING" in the main directory of
    97.9 @@ -9,6 +10,7 @@ import threading
   97.10  from xen.lowlevel import xs
   97.11  from xen.xend.xenstore.xsutil import xshandle
   97.12  
   97.13 +
   97.14  class xstransact:
   97.15  
   97.16      def __init__(self, path):
   97.17 @@ -40,8 +42,15 @@ class xstransact:
   97.18                                 '%s, while reading %s' % (ex.args[1], path))
   97.19  
   97.20      def read(self, *args):
   97.21 +        """If no arguments are given, return the value at this transaction's
   97.22 +        path.  If one argument is given, treat that argument as a subpath to
   97.23 +        this transaction's path, and return the value at that path.
   97.24 +        Otherwise, treat each argument as a subpath to this transaction's
   97.25 +        path, and return a list composed of the values at each of those
   97.26 +        instead.
   97.27 +        """
   97.28          if len(args) == 0:
   97.29 -            raise TypeError
   97.30 +            return xshandle().read(self.path)
   97.31          if len(args) == 1:
   97.32              return self._read(args[0])
   97.33          ret = []
   97.34 @@ -105,13 +114,50 @@ class xstransact:
   97.35          return []
   97.36  
   97.37      def list(self, *args):
   97.38 +        """If no arguments are given, list this transaction's path, returning
   97.39 +        the entries therein, or the empty list if no entries are found.
   97.40 +        Otherwise, treat each argument as a subpath to this transaction's
   97.41 +        path, and return the cumulative listing of each of those instead.
   97.42 +        """
   97.43          if len(args) == 0:
   97.44 -            raise TypeError
   97.45 +            ret = xshandle().ls(self.path)
   97.46 +            if ret is None:
   97.47 +                return []
   97.48 +            else:
   97.49 +                return ret
   97.50 +        else:
   97.51 +            ret = []
   97.52 +            for key in args:
   97.53 +                ret.extend(self._list(key))
   97.54 +            return ret
   97.55 +
   97.56 +
   97.57 +    def list_recursive_(self, subdir, keys):
   97.58          ret = []
   97.59 -        for key in args:
   97.60 -            ret.extend(self._list(key))
   97.61 +        for key in keys:
   97.62 +            new_subdir = subdir + "/" + key
   97.63 +            l = xshandle().ls(new_subdir)
   97.64 +            if l:
   97.65 +                ret.append([key, self.list_recursive_(new_subdir, l)])
   97.66 +            else:
   97.67 +                ret.append([key, xshandle().read(new_subdir)])
   97.68          return ret
   97.69  
   97.70 +
   97.71 +    def list_recursive(self, *args):
   97.72 +        """If no arguments are given, list this transaction's path, returning
   97.73 +        the entries therein, or the empty list if no entries are found.
   97.74 +        Otherwise, treat each argument as a subpath to this transaction's
   97.75 +        path, and return the cumulative listing of each of those instead.
   97.76 +        """
   97.77 +        if len(args) == 0:
   97.78 +            args = self.list()
   97.79 +            if args is None or len(args) == 0:
   97.80 +                return []
   97.81 +
   97.82 +        return self.list_recursive_(self.path, args)
   97.83 +
   97.84 +
   97.85      def gather(self, *args):
   97.86          if len(args) and type(args[0]) != tuple:
   97.87              args = args,
   97.88 @@ -163,6 +209,13 @@ class xstransact:
   97.89  
   97.90  
   97.91      def Read(cls, path, *args):
   97.92 +        """If only one argument is given (path), return the value stored at
   97.93 +        that path.  If two arguments are given, treat the second argument as a
   97.94 +        subpath within the first, and return the value at the composed path.
   97.95 +        Otherwise, treat each argument after the first as a subpath to the
   97.96 +        given path, and return a list composed of the values at each of those
   97.97 +        instead.  This operation is performed inside a transaction.
   97.98 +        """
   97.99          while True:
  97.100              t = cls(path)
  97.101              try:
  97.102 @@ -206,6 +259,12 @@ class xstransact:
  97.103      Remove = classmethod(Remove)
  97.104  
  97.105      def List(cls, path, *args):
  97.106 +        """If only one argument is given (path), list its contents, returning
  97.107 +        the entries therein, or the empty list if no entries are found.
  97.108 +        Otherwise, treat each further argument as a subpath to the given path,
  97.109 +        and return the cumulative listing of each of those instead.  This
  97.110 +        operation is performed inside a transaction.
  97.111 +        """
  97.112          while True:
  97.113              t = cls(path)
  97.114              try:
  97.115 @@ -218,6 +277,25 @@ class xstransact:
  97.116  
  97.117      List = classmethod(List)
  97.118  
  97.119 +    def ListRecursive(cls, path, *args):
  97.120 +        """If only one argument is given (path), list its contents
  97.121 +        recursively, returning the entries therein, or the empty list if no
  97.122 +        entries are found.  Otherwise, treat each further argument as a
  97.123 +        subpath to the given path, and return the cumulative listing of each
  97.124 +        of those instead.  This operation is performed inside a transaction.
  97.125 +        """
  97.126 +        while True:
  97.127 +            t = cls(path)
  97.128 +            try:
  97.129 +                v = t.list_recursive(*args)
  97.130 +                if t.commit():
  97.131 +                    return v
  97.132 +            except:
  97.133 +                t.abort()
  97.134 +                raise
  97.135 +
  97.136 +    ListRecursive = classmethod(ListRecursive)
  97.137 +
  97.138      def Gather(cls, path, *args):
  97.139          while True:
  97.140              t = cls(path)
    98.1 --- a/tools/python/xen/xm/main.py	Thu Sep 29 16:22:02 2005 -0600
    98.2 +++ b/tools/python/xen/xm/main.py	Thu Sep 29 17:28:44 2005 -0600
    98.3 @@ -175,6 +175,10 @@ def xm_save(args):
    98.4  
    98.5      dom = args[0] # TODO: should check if this exists
    98.6      savefile = os.path.abspath(args[1])
    98.7 +
    98.8 +    if not os.access(os.path.dirname(savefile), os.W_OK):
    98.9 +        err("xm save: Unable to create file %s" % savefile)
   98.10 +        sys.exit(1)
   98.11      
   98.12      from xen.xend.XendClient import server
   98.13      server.xend_domain_save(dom, savefile)
   98.14 @@ -184,6 +188,10 @@ def xm_restore(args):
   98.15  
   98.16      savefile = os.path.abspath(args[0])
   98.17  
   98.18 +    if not os.access(savefile, os.R_OK):
   98.19 +        err("xm restore: Unable to read file %s" % savefile)
   98.20 +        sys.exit(1)
   98.21 +
   98.22      from xen.xend.XendClient import server
   98.23      info = server.xend_domain_restore(savefile)
   98.24      PrettyPrint.prettyprint(info)
   113.1 --- a/tools/xenstore/xenstored_domain.c	Thu Sep 29 16:22:02 2005 -0600
   113.2 +++ b/tools/xenstore/xenstored_domain.c	Thu Sep 29 17:28:44 2005 -0600
   113.3 @@ -63,6 +63,8 @@ struct domain
   113.4  	/* The connection associated with this. */
   113.5  	struct connection *conn;
   113.6  
   113.7 +	/* Have we noticed that this domain is shutdown? */
   113.8 +	int shutdown;
   113.9  };
  113.10  
  113.11  static LIST_HEAD(domains);
  113.12 @@ -222,19 +224,25 @@ static void domain_cleanup(void)
  113.13  {
  113.14  	xc_dominfo_t dominfo;
  113.15  	struct domain *domain, *tmp;
  113.16 -	int released = 0;
  113.17 +	int notify = 0;
  113.18  
  113.19  	list_for_each_entry_safe(domain, tmp, &domains, list) {
  113.20  		if (xc_domain_getinfo(*xc_handle, domain->domid, 1,
  113.21  				      &dominfo) == 1 &&
  113.22 -		    dominfo.domid == domain->domid &&
  113.23 -		    !dominfo.dying && !dominfo.crashed && !dominfo.shutdown)
  113.24 -			continue;
  113.25 +		    dominfo.domid == domain->domid) {
  113.26 +			if ((dominfo.crashed || dominfo.shutdown)
  113.27 +			    && !domain->shutdown) {
  113.28 +				domain->shutdown = 1;
  113.29 +				notify = 1;
  113.30 +			}
  113.31 +			if (!dominfo.dying)
  113.32 +				continue;
  113.33 +		}
  113.34  		talloc_free(domain->conn);
  113.35 -		released++;
  113.36 +		notify = 1;
  113.37  	}
  113.38  
  113.39 -	if (released)
  113.40 +	if (notify)
  113.41  		fire_watches(NULL, "@releaseDomain", false);
  113.42  }
  113.43  
  113.44 @@ -272,6 +280,7 @@ static struct domain *new_domain(void *c
  113.45  	struct domain *domain;
  113.46  	domain = talloc(context, struct domain);
  113.47  	domain->port = 0;
  113.48 +	domain->shutdown = 0;
  113.49  	domain->domid = domid;
  113.50  	domain->path = talloc_strdup(domain, path);
  113.51  	domain->page = xc_map_foreign_range(*xc_handle, domain->domid,
   131.1 --- a/xen/arch/x86/shadow32.c	Thu Sep 29 16:22:02 2005 -0600
   131.2 +++ b/xen/arch/x86/shadow32.c	Thu Sep 29 17:28:44 2005 -0600
   131.3 @@ -755,9 +755,13 @@ void free_monitor_pagetable(struct vcpu 
   131.4  
   131.5      /*
   131.6       * Then free monitor_table.
   131.7 +     * Note: for VMX guest, only BSP need do this free.
   131.8       */
   131.9 -    mfn = pagetable_get_pfn(v->arch.monitor_table);
  131.10 -    free_domheap_page(&frame_table[mfn]);
  131.11 +    if (!(VMX_DOMAIN(v) && v->vcpu_id)) {
  131.12 +        mfn = pagetable_get_pfn(v->arch.monitor_table);
  131.13 +        unmap_domain_page(v->arch.monitor_vtable);
  131.14 +        free_domheap_page(&frame_table[mfn]);
  131.15 +    }
  131.16  
  131.17      v->arch.monitor_table = mk_pagetable(0);
  131.18      v->arch.monitor_vtable = 0;
  131.19 @@ -1832,7 +1836,7 @@ shadow_mark_mfn_out_of_sync(struct vcpu 
  131.20      }
  131.21  #endif
  131.22  
  131.23 -    FSH_LOG("%s(gpfn=%lx, mfn=%lx) c=%08x t=%08x", __func__,
  131.24 +    FSH_LOG("%s(gpfn=%lx, mfn=%lx) c=%08x t=%08lx", __func__,
  131.25              gpfn, mfn, page->count_info, page->u.inuse.type_info);
  131.26  
  131.27      // XXX this will require some more thought...  Cross-domain sharing and
  131.28 @@ -3013,7 +3017,7 @@ static int check_pte(
  131.29      l1_pgentry_t guest_pte = *p_guest_pte;
  131.30      l1_pgentry_t shadow_pte = *p_shadow_pte;
  131.31      l1_pgentry_t snapshot_pte = p_snapshot_pte ? *p_snapshot_pte : l1e_empty();
  131.32 -    l1_pgentry_t eff_guest_pte;
  131.33 +    l1_pgentry_t eff_guest_pte = l1e_empty();
  131.34      unsigned long mask, eff_guest_pfn, eff_guest_mfn, shadow_mfn;
  131.35      int errors = 0, guest_writable;
  131.36      int page_table_page;
  131.37 @@ -3070,7 +3074,7 @@ static int check_pte(
  131.38  
  131.39      if ( (l1e_get_flags(shadow_pte) & _PAGE_RW ) && !guest_writable )
  131.40      {
  131.41 -        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08x page_table_page=%d\n",
  131.42 +        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx page_table_page=%d\n",
  131.43                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
  131.44                 frame_table[eff_guest_mfn].u.inuse.type_info,
  131.45                 page_table_page);
  131.46 @@ -3081,7 +3085,7 @@ static int check_pte(
  131.47           (l1e_get_flags(shadow_pte) & _PAGE_RW ) &&
  131.48           !(guest_writable && (l1e_get_flags(eff_guest_pte) & _PAGE_DIRTY)) )
  131.49      {
  131.50 -        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08x page_table_page=%d\n",
  131.51 +        printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx page_table_page=%d\n",
  131.52                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
  131.53                 frame_table[eff_guest_mfn].u.inuse.type_info,
  131.54                 page_table_page);
   132.1 --- a/xen/arch/x86/shadow_public.c	Thu Sep 29 16:22:02 2005 -0600
   132.2 +++ b/xen/arch/x86/shadow_public.c	Thu Sep 29 17:28:44 2005 -0600
   132.3 @@ -256,14 +256,15 @@ void free_monitor_pagetable(struct vcpu 
   132.4  {
   132.5      unsigned long mfn;
   132.6  
   132.7 -//    ASSERT( pagetable_val(v->arch.monitor_table) );
   132.8      /*
   132.9       * free monitor_table.
  132.10 +     * Note: for VMX guest, only BSP need do this free.
  132.11       */
  132.12 -    //mfn = (pagetable_val(v->arch.monitor_table)) >> PAGE_SHIFT;
  132.13 -    mfn = pagetable_get_pfn(v->arch.monitor_table);
  132.14 -    unmap_domain_page(v->arch.monitor_vtable);
  132.15 -    free_domheap_page(&frame_table[mfn]);
  132.16 +    if (!(VMX_DOMAIN(v) && v->vcpu_id)) {
  132.17 +        mfn = pagetable_get_pfn(v->arch.monitor_table);
  132.18 +        unmap_domain_page(v->arch.monitor_vtable);
  132.19 +        free_domheap_page(&frame_table[mfn]);
  132.20 +    }
  132.21      v->arch.monitor_table = mk_pagetable(0);
  132.22      v->arch.monitor_vtable = 0;
  132.23  }
  132.24 @@ -358,9 +359,13 @@ void free_monitor_pagetable(struct vcpu 
  132.25  
  132.26      /*
  132.27       * Then free monitor_table.
  132.28 +     * Note: for VMX guest, only BSP need do this free.
  132.29       */
  132.30 -    mfn = pagetable_get_pfn(v->arch.monitor_table);
  132.31 -    free_domheap_page(&frame_table[mfn]);
  132.32 +    if (!(VMX_DOMAIN(v) && v->vcpu_id)) {
  132.33 +        mfn = pagetable_get_pfn(v->arch.monitor_table);
  132.34 +        unmap_domain_page(v->arch.monitor_vtable);
  132.35 +        free_domheap_page(&frame_table[mfn]);
  132.36 +    }
  132.37  
  132.38      v->arch.monitor_table = mk_pagetable(0);
  132.39      v->arch.monitor_vtable = 0;
   133.1 --- a/xen/arch/x86/time.c	Thu Sep 29 16:22:02 2005 -0600
   133.2 +++ b/xen/arch/x86/time.c	Thu Sep 29 17:28:44 2005 -0600
   133.3 @@ -119,8 +119,8 @@ static inline u64 scale_delta(u64 delta,
   133.4          "mov  %4,%%eax ; "
   133.5          "mov  %%edx,%4 ; "
   133.6          "mul  %5       ; "
   133.7 +        "xor  %5,%5    ; "
   133.8          "add  %4,%%eax ; "
   133.9 -        "xor  %5,%5    ; "
  133.10          "adc  %5,%%edx ; "
  133.11          : "=A" (product), "=r" (tmp1), "=r" (tmp2)
  133.12          : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (scale->mul_frac) );
   134.1 --- a/xen/arch/x86/vmx_platform.c	Thu Sep 29 16:22:02 2005 -0600
   134.2 +++ b/xen/arch/x86/vmx_platform.c	Thu Sep 29 17:28:44 2005 -0600
   134.3 @@ -667,6 +667,7 @@ static void mmio_operands(int type, unsi
   134.4      mpcip->instr = inst->instr;
   134.5      mpcip->operand[0] = inst->operand[0]; /* source */
   134.6      mpcip->operand[1] = inst->operand[1]; /* destination */
   134.7 +    mpcip->immediate = inst->immediate;
   134.8  
   134.9      if (inst->operand[0] & REGISTER) { /* dest is memory */
  134.10          index = operand_index(inst->operand[0]);
  134.11 @@ -833,12 +834,16 @@ void handle_mmio(unsigned long va, unsig
  134.12          mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs);
  134.13          break;
  134.14  
  134.15 -    case INSTR_CMP:
  134.16 -        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
  134.17 -        break;
  134.18 +    case INSTR_CMP:        /* Pass through */
  134.19 +    case INSTR_TEST:
  134.20 +        mpcip->flags = mmio_inst.flags;
  134.21 +        mpcip->instr = mmio_inst.instr;
  134.22 +        mpcip->operand[0] = mmio_inst.operand[0]; /* source */
  134.23 +        mpcip->operand[1] = mmio_inst.operand[1]; /* destination */
  134.24 +        mpcip->immediate = mmio_inst.immediate;
  134.25  
  134.26 -    case INSTR_TEST:
  134.27 -        mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
  134.28 +        /* send the request and wait for the value */
  134.29 +        send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, mmio_inst.op_size, 0, IOREQ_READ, 0);
  134.30          break;
  134.31  
  134.32      default: