ia64/xen-unstable

changeset 7126:c317e0aca9f1

Reindent more xenlinux files. Remove defunct header file.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Sep 29 11:10:27 2005 +0100 (2005-09-29)
parents e04b0805febb
children 805ee053e61f
files linux-2.6-xen-sparse/arch/xen/kernel/devmem.c linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c linux-2.6-xen-sparse/arch/xen/kernel/fixup.c linux-2.6-xen-sparse/arch/xen/kernel/reboot.c linux-2.6-xen-sparse/arch/xen/kernel/smp.c linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c linux-2.6-xen-sparse/include/asm-xen/balloon.h linux-2.6-xen-sparse/include/asm-xen/driver_util.h linux-2.6-xen-sparse/include/asm-xen/evtchn.h linux-2.6-xen-sparse/include/asm-xen/foreign_page.h linux-2.6-xen-sparse/include/asm-xen/gnttab.h linux-2.6-xen-sparse/include/asm-xen/xen_proc.h linux-2.6-xen-sparse/include/asm-xen/xenbus.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c	Thu Sep 29 09:59:46 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c	Thu Sep 29 11:10:27 2005 +0100
     1.3 @@ -144,3 +144,13 @@ struct file_operations mem_fops = {
     1.4  	.mmap		= mmap_mem,
     1.5  	.open		= open_mem,
     1.6  };
     1.7 +
     1.8 +/*
     1.9 + * Local variables:
    1.10 + *  c-file-style: "linux"
    1.11 + *  indent-tabs-mode: t
    1.12 + *  c-indent-level: 8
    1.13 + *  c-basic-offset: 8
    1.14 + *  tab-width: 8
    1.15 + * End:
    1.16 + */
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Thu Sep 29 09:59:46 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Thu Sep 29 11:10:27 2005 +0100
     2.3 @@ -3,7 +3,7 @@
     2.4   * 
     2.5   * Communication via Xen event channels.
     2.6   * 
     2.7 - * Copyright (c) 2002-2004, K A Fraser
     2.8 + * Copyright (c) 2002-2005, K A Fraser
     2.9   * 
    2.10   * This file may be distributed separately from the Linux kernel, or
    2.11   * incorporated into other software packages, subject to the following license:
    2.12 @@ -73,23 +73,23 @@ static unsigned long pirq_needs_unmask_n
    2.13  static u8  cpu_evtchn[NR_EVENT_CHANNELS];
    2.14  static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
    2.15  
    2.16 -#define active_evtchns(cpu,sh,idx)              \
    2.17 -    ((sh)->evtchn_pending[idx] &                \
    2.18 -     cpu_evtchn_mask[cpu][idx] &                \
    2.19 -     ~(sh)->evtchn_mask[idx])
    2.20 +#define active_evtchns(cpu,sh,idx)		\
    2.21 +	((sh)->evtchn_pending[idx] &		\
    2.22 +	 cpu_evtchn_mask[cpu][idx] &		\
    2.23 +	 ~(sh)->evtchn_mask[idx])
    2.24  
    2.25  void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
    2.26  {
    2.27 -    clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
    2.28 -    set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
    2.29 -    cpu_evtchn[chn] = cpu;
    2.30 +	clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
    2.31 +	set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
    2.32 +	cpu_evtchn[chn] = cpu;
    2.33  }
    2.34  
    2.35  #else
    2.36  
    2.37 -#define active_evtchns(cpu,sh,idx)              \
    2.38 -    ((sh)->evtchn_pending[idx] &                \
    2.39 -     ~(sh)->evtchn_mask[idx])
    2.40 +#define active_evtchns(cpu,sh,idx)		\
    2.41 +	((sh)->evtchn_pending[idx] &		\
    2.42 +	 ~(sh)->evtchn_mask[idx])
    2.43  
    2.44  void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
    2.45  {
    2.46 @@ -108,9 +108,9 @@ extern asmlinkage unsigned int do_IRQ(st
    2.47  #elif defined (__x86_64__)
    2.48  #define IRQ_REG orig_rax
    2.49  #endif
    2.50 -#define do_IRQ(irq, regs) do {                  \
    2.51 -    (regs)->IRQ_REG = (irq);                    \
    2.52 -    do_IRQ((regs));                             \
    2.53 +#define do_IRQ(irq, regs) do {			\
    2.54 +	(regs)->IRQ_REG = (irq);		\
    2.55 +	do_IRQ((regs));				\
    2.56  } while (0)
    2.57  #endif
    2.58  
    2.59 @@ -123,249 +123,236 @@ extern asmlinkage unsigned int do_IRQ(st
    2.60   */
    2.61  void force_evtchn_callback(void)
    2.62  {
    2.63 -    (void)HYPERVISOR_xen_version(0, NULL);
    2.64 +	(void)HYPERVISOR_xen_version(0, NULL);
    2.65  }
    2.66  EXPORT_SYMBOL(force_evtchn_callback);
    2.67  
    2.68  /* NB. Interrupts are disabled on entry. */
    2.69  asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
    2.70  {
    2.71 -    u32     l1, l2;
    2.72 -    unsigned int   l1i, l2i, port;
    2.73 -    int            irq, cpu = smp_processor_id();
    2.74 -    shared_info_t *s = HYPERVISOR_shared_info;
    2.75 -    vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
    2.76 -
    2.77 -    vcpu_info->evtchn_upcall_pending = 0;
    2.78 +	u32     l1, l2;
    2.79 +	unsigned int   l1i, l2i, port;
    2.80 +	int            irq, cpu = smp_processor_id();
    2.81 +	shared_info_t *s = HYPERVISOR_shared_info;
    2.82 +	vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
    2.83  
    2.84 -    /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
    2.85 -    l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
    2.86 -    while ( l1 != 0 )
    2.87 -    {
    2.88 -        l1i = __ffs(l1);
    2.89 -        l1 &= ~(1 << l1i);
    2.90 +	vcpu_info->evtchn_upcall_pending = 0;
    2.91 +
    2.92 +	/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
    2.93 +	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
    2.94 +	while (l1 != 0) {
    2.95 +		l1i = __ffs(l1);
    2.96 +		l1 &= ~(1 << l1i);
    2.97          
    2.98 -        while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
    2.99 -        {
   2.100 -            l2i = __ffs(l2);
   2.101 -            l2 &= ~(1 << l2i);
   2.102 +		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
   2.103 +			l2i = __ffs(l2);
   2.104 +			l2 &= ~(1 << l2i);
   2.105              
   2.106 -            port = (l1i << 5) + l2i;
   2.107 -            if ( (irq = evtchn_to_irq[port]) != -1 ) {
   2.108 -                do_IRQ(irq, regs);
   2.109 -	    } else
   2.110 -                evtchn_device_upcall(port);
   2.111 -        }
   2.112 -    }
   2.113 +			port = (l1i << 5) + l2i;
   2.114 +			if ((irq = evtchn_to_irq[port]) != -1)
   2.115 +				do_IRQ(irq, regs);
   2.116 +			else
   2.117 +				evtchn_device_upcall(port);
   2.118 +		}
   2.119 +	}
   2.120  }
   2.121  EXPORT_SYMBOL(evtchn_do_upcall);
   2.122  
   2.123  static int find_unbound_irq(void)
   2.124  {
   2.125 -    int irq;
   2.126 +	int irq;
   2.127  
   2.128 -    for ( irq = 0; irq < NR_IRQS; irq++ )
   2.129 -        if ( irq_bindcount[irq] == 0 )
   2.130 -            break;
   2.131 +	for (irq = 0; irq < NR_IRQS; irq++)
   2.132 +		if (irq_bindcount[irq] == 0)
   2.133 +			break;
   2.134  
   2.135 -    if ( irq == NR_IRQS )
   2.136 -        panic("No available IRQ to bind to: increase NR_IRQS!\n");
   2.137 +	if (irq == NR_IRQS)
   2.138 +		panic("No available IRQ to bind to: increase NR_IRQS!\n");
   2.139  
   2.140 -    return irq;
   2.141 +	return irq;
   2.142  }
   2.143  
   2.144  int bind_virq_to_irq(int virq)
   2.145  {
   2.146 -    evtchn_op_t op;
   2.147 -    int evtchn, irq;
   2.148 -    int cpu = smp_processor_id();
   2.149 -
   2.150 -    spin_lock(&irq_mapping_update_lock);
   2.151 -
   2.152 -    if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
   2.153 -    {
   2.154 -        op.cmd              = EVTCHNOP_bind_virq;
   2.155 -        op.u.bind_virq.virq = virq;
   2.156 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   2.157 -            panic("Failed to bind virtual IRQ %d\n", virq);
   2.158 -        evtchn = op.u.bind_virq.port;
   2.159 +	evtchn_op_t op;
   2.160 +	int evtchn, irq;
   2.161 +	int cpu = smp_processor_id();
   2.162  
   2.163 -        irq = find_unbound_irq();
   2.164 -        evtchn_to_irq[evtchn] = irq;
   2.165 -        irq_to_evtchn[irq]    = evtchn;
   2.166 -
   2.167 -        per_cpu(virq_to_irq, cpu)[virq] = irq;
   2.168 +	spin_lock(&irq_mapping_update_lock);
   2.169  
   2.170 -        bind_evtchn_to_cpu(evtchn, cpu);
   2.171 -    }
   2.172 +	if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
   2.173 +		op.cmd              = EVTCHNOP_bind_virq;
   2.174 +		op.u.bind_virq.virq = virq;
   2.175 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   2.176 +		evtchn = op.u.bind_virq.port;
   2.177  
   2.178 -    irq_bindcount[irq]++;
   2.179 +		irq = find_unbound_irq();
   2.180 +		evtchn_to_irq[evtchn] = irq;
   2.181 +		irq_to_evtchn[irq]    = evtchn;
   2.182  
   2.183 -    spin_unlock(&irq_mapping_update_lock);
   2.184 +		per_cpu(virq_to_irq, cpu)[virq] = irq;
   2.185 +
   2.186 +		bind_evtchn_to_cpu(evtchn, cpu);
   2.187 +	}
   2.188 +
   2.189 +	irq_bindcount[irq]++;
   2.190 +
   2.191 +	spin_unlock(&irq_mapping_update_lock);
   2.192      
   2.193 -    return irq;
   2.194 +	return irq;
   2.195  }
   2.196  EXPORT_SYMBOL(bind_virq_to_irq);
   2.197  
   2.198  void unbind_virq_from_irq(int virq)
   2.199  {
   2.200 -    evtchn_op_t op;
   2.201 -    int cpu    = smp_processor_id();
   2.202 -    int irq    = per_cpu(virq_to_irq, cpu)[virq];
   2.203 -    int evtchn = irq_to_evtchn[irq];
   2.204 -
   2.205 -    spin_lock(&irq_mapping_update_lock);
   2.206 +	evtchn_op_t op;
   2.207 +	int cpu    = smp_processor_id();
   2.208 +	int irq    = per_cpu(virq_to_irq, cpu)[virq];
   2.209 +	int evtchn = irq_to_evtchn[irq];
   2.210  
   2.211 -    if ( --irq_bindcount[irq] == 0 )
   2.212 -    {
   2.213 -        op.cmd          = EVTCHNOP_close;
   2.214 -        op.u.close.dom  = DOMID_SELF;
   2.215 -        op.u.close.port = evtchn;
   2.216 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   2.217 -            panic("Failed to unbind virtual IRQ %d\n", virq);
   2.218 +	spin_lock(&irq_mapping_update_lock);
   2.219  
   2.220 -        /*
   2.221 -         * This is a slight hack. Interdomain ports can be allocated directly 
   2.222 -         * by userspace, and at that point they get bound by Xen to vcpu 0. We 
   2.223 -         * therefore need to make sure that if we get an event on an event 
   2.224 -         * channel we don't know about vcpu 0 handles it. Binding channels to 
   2.225 -         * vcpu 0 when closing them achieves this.
   2.226 -         */
   2.227 -        bind_evtchn_to_cpu(evtchn, 0);
   2.228 -        evtchn_to_irq[evtchn] = -1;
   2.229 -        irq_to_evtchn[irq]    = -1;
   2.230 -        per_cpu(virq_to_irq, cpu)[virq]     = -1;
   2.231 -    }
   2.232 +	if (--irq_bindcount[irq] == 0) {
   2.233 +		op.cmd          = EVTCHNOP_close;
   2.234 +		op.u.close.dom  = DOMID_SELF;
   2.235 +		op.u.close.port = evtchn;
   2.236 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   2.237  
   2.238 -    spin_unlock(&irq_mapping_update_lock);
   2.239 +		/*
   2.240 +		 * This is a slight hack. Interdomain ports can be allocated 
   2.241 +		 * directly by userspace, and at that point they get bound by 
   2.242 +		 * Xen to vcpu 0. We therefore need to make sure that if we get
   2.243 +		 * an event on an event channel we don't know about vcpu 0 
   2.244 +		 * handles it. Binding channels to vcpu 0 when closing them
   2.245 +		 * achieves this.
   2.246 +		 */
   2.247 +		bind_evtchn_to_cpu(evtchn, 0);
   2.248 +		evtchn_to_irq[evtchn] = -1;
   2.249 +		irq_to_evtchn[irq]    = -1;
   2.250 +		per_cpu(virq_to_irq, cpu)[virq] = -1;
   2.251 +	}
   2.252 +
   2.253 +	spin_unlock(&irq_mapping_update_lock);
   2.254  }
   2.255  EXPORT_SYMBOL(unbind_virq_from_irq);
   2.256  
   2.257  int bind_ipi_to_irq(int ipi)
   2.258  {
   2.259 -    evtchn_op_t op;
   2.260 -    int evtchn, irq;
   2.261 -    int cpu = smp_processor_id();
   2.262 -
   2.263 -    spin_lock(&irq_mapping_update_lock);
   2.264 -
   2.265 -    if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
   2.266 -    {
   2.267 -        op.cmd = EVTCHNOP_bind_ipi;
   2.268 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   2.269 -            panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
   2.270 -        evtchn = op.u.bind_ipi.port;
   2.271 +	evtchn_op_t op;
   2.272 +	int evtchn, irq;
   2.273 +	int cpu = smp_processor_id();
   2.274  
   2.275 -        irq = find_unbound_irq();
   2.276 -        evtchn_to_irq[evtchn] = irq;
   2.277 -        irq_to_evtchn[irq]    = evtchn;
   2.278 -
   2.279 -        per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
   2.280 +	spin_lock(&irq_mapping_update_lock);
   2.281  
   2.282 -        bind_evtchn_to_cpu(evtchn, cpu);
   2.283 -    } 
   2.284 -    else
   2.285 -    {
   2.286 -        irq = evtchn_to_irq[evtchn];
   2.287 -    }
   2.288 +	if ((evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0) {
   2.289 +		op.cmd = EVTCHNOP_bind_ipi;
   2.290 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   2.291 +		evtchn = op.u.bind_ipi.port;
   2.292  
   2.293 -    irq_bindcount[irq]++;
   2.294 +		irq = find_unbound_irq();
   2.295 +		evtchn_to_irq[evtchn] = irq;
   2.296 +		irq_to_evtchn[irq]    = evtchn;
   2.297  
   2.298 -    spin_unlock(&irq_mapping_update_lock);
   2.299 +		per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
   2.300  
   2.301 -    return irq;
   2.302 +		bind_evtchn_to_cpu(evtchn, cpu);
   2.303 +	} else {
   2.304 +		irq = evtchn_to_irq[evtchn];
   2.305 +	}
   2.306 +
   2.307 +	irq_bindcount[irq]++;
   2.308 +
   2.309 +	spin_unlock(&irq_mapping_update_lock);
   2.310 +
   2.311 +	return irq;
   2.312  }
   2.313  EXPORT_SYMBOL(bind_ipi_to_irq);
   2.314  
   2.315  void unbind_ipi_from_irq(int ipi)
   2.316  {
   2.317 -    evtchn_op_t op;
   2.318 -    int cpu    = smp_processor_id();
   2.319 -    int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
   2.320 -    int irq    = evtchn_to_irq[evtchn];
   2.321 -
   2.322 -    spin_lock(&irq_mapping_update_lock);
   2.323 +	evtchn_op_t op;
   2.324 +	int cpu    = smp_processor_id();
   2.325 +	int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
   2.326 +	int irq    = evtchn_to_irq[evtchn];
   2.327  
   2.328 -    if ( --irq_bindcount[irq] == 0 )
   2.329 -    {
   2.330 -        op.cmd          = EVTCHNOP_close;
   2.331 -        op.u.close.dom  = DOMID_SELF;
   2.332 -        op.u.close.port = evtchn;
   2.333 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   2.334 -            panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
   2.335 +	spin_lock(&irq_mapping_update_lock);
   2.336  
   2.337 -        /* See comments in unbind_virq_from_irq */
   2.338 -        bind_evtchn_to_cpu(evtchn, 0);
   2.339 -        evtchn_to_irq[evtchn] = -1;
   2.340 -        irq_to_evtchn[irq]    = -1;
   2.341 -        per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
   2.342 -    }
   2.343 +	if (--irq_bindcount[irq] == 0) {
   2.344 +		op.cmd          = EVTCHNOP_close;
   2.345 +		op.u.close.dom  = DOMID_SELF;
   2.346 +		op.u.close.port = evtchn;
   2.347 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   2.348  
   2.349 -    spin_unlock(&irq_mapping_update_lock);
   2.350 +		/* See comments in unbind_virq_from_irq */
   2.351 +		bind_evtchn_to_cpu(evtchn, 0);
   2.352 +		evtchn_to_irq[evtchn] = -1;
   2.353 +		irq_to_evtchn[irq]    = -1;
   2.354 +		per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
   2.355 +	}
   2.356 +
   2.357 +	spin_unlock(&irq_mapping_update_lock);
   2.358  }
   2.359  EXPORT_SYMBOL(unbind_ipi_from_irq);
   2.360  
   2.361  int bind_evtchn_to_irq(unsigned int evtchn)
   2.362  {
   2.363 -    int irq;
   2.364 -
   2.365 -    spin_lock(&irq_mapping_update_lock);
   2.366 +	int irq;
   2.367  
   2.368 -    if ( (irq = evtchn_to_irq[evtchn]) == -1 )
   2.369 -    {
   2.370 -        irq = find_unbound_irq();
   2.371 -        evtchn_to_irq[evtchn] = irq;
   2.372 -        irq_to_evtchn[irq]    = evtchn;
   2.373 -    }
   2.374 +	spin_lock(&irq_mapping_update_lock);
   2.375  
   2.376 -    irq_bindcount[irq]++;
   2.377 +	if ((irq = evtchn_to_irq[evtchn]) == -1) {
   2.378 +		irq = find_unbound_irq();
   2.379 +		evtchn_to_irq[evtchn] = irq;
   2.380 +		irq_to_evtchn[irq]    = evtchn;
   2.381 +	}
   2.382  
   2.383 -    spin_unlock(&irq_mapping_update_lock);
   2.384 +	irq_bindcount[irq]++;
   2.385 +
   2.386 +	spin_unlock(&irq_mapping_update_lock);
   2.387      
   2.388 -    return irq;
   2.389 +	return irq;
   2.390  }
   2.391  EXPORT_SYMBOL(bind_evtchn_to_irq);
   2.392  
   2.393  void unbind_evtchn_from_irq(unsigned int evtchn)
   2.394  {
   2.395 -    int irq = evtchn_to_irq[evtchn];
   2.396 -
   2.397 -    spin_lock(&irq_mapping_update_lock);
   2.398 +	int irq = evtchn_to_irq[evtchn];
   2.399  
   2.400 -    if ( --irq_bindcount[irq] == 0 )
   2.401 -    {
   2.402 -        evtchn_to_irq[evtchn] = -1;
   2.403 -        irq_to_evtchn[irq]    = -1;
   2.404 -    }
   2.405 +	spin_lock(&irq_mapping_update_lock);
   2.406  
   2.407 -    spin_unlock(&irq_mapping_update_lock);
   2.408 +	if (--irq_bindcount[irq] == 0) {
   2.409 +		evtchn_to_irq[evtchn] = -1;
   2.410 +		irq_to_evtchn[irq]    = -1;
   2.411 +	}
   2.412 +
   2.413 +	spin_unlock(&irq_mapping_update_lock);
   2.414  }
   2.415  EXPORT_SYMBOL(unbind_evtchn_from_irq);
   2.416  
   2.417  int bind_evtchn_to_irqhandler(
   2.418 -    unsigned int evtchn,
   2.419 -    irqreturn_t (*handler)(int, void *, struct pt_regs *),
   2.420 -    unsigned long irqflags,
   2.421 -    const char *devname,
   2.422 -    void *dev_id)
   2.423 +	unsigned int evtchn,
   2.424 +	irqreturn_t (*handler)(int, void *, struct pt_regs *),
   2.425 +	unsigned long irqflags,
   2.426 +	const char *devname,
   2.427 +	void *dev_id)
   2.428  {
   2.429 -    unsigned int irq;
   2.430 -    int retval;
   2.431 +	unsigned int irq;
   2.432 +	int retval;
   2.433  
   2.434 -    irq = bind_evtchn_to_irq(evtchn);
   2.435 -    retval = request_irq(irq, handler, irqflags, devname, dev_id);
   2.436 -    if ( retval != 0 )
   2.437 -        unbind_evtchn_from_irq(evtchn);
   2.438 +	irq = bind_evtchn_to_irq(evtchn);
   2.439 +	retval = request_irq(irq, handler, irqflags, devname, dev_id);
   2.440 +	if (retval != 0)
   2.441 +		unbind_evtchn_from_irq(evtchn);
   2.442  
   2.443 -    return retval;
   2.444 +	return retval;
   2.445  }
   2.446  EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
   2.447  
   2.448  void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id)
   2.449  {
   2.450 -    unsigned int irq = evtchn_to_irq[evtchn];
   2.451 -    free_irq(irq, dev_id);
   2.452 -    unbind_evtchn_from_irq(evtchn);
   2.453 +	unsigned int irq = evtchn_to_irq[evtchn];
   2.454 +	free_irq(irq, dev_id);
   2.455 +	unbind_evtchn_from_irq(evtchn);
   2.456  }
   2.457  EXPORT_SYMBOL(unbind_evtchn_from_irqhandler);
   2.458  
   2.459 @@ -378,50 +365,50 @@ static void do_nothing_function(void *ig
   2.460  /* Rebind an evtchn so that it gets delivered to a specific cpu */
   2.461  static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
   2.462  {
   2.463 -    evtchn_op_t op;
   2.464 -    int evtchn;
   2.465 -
   2.466 -    spin_lock(&irq_mapping_update_lock);
   2.467 -    evtchn = irq_to_evtchn[irq];
   2.468 -    if (!VALID_EVTCHN(evtchn)) {
   2.469 -        spin_unlock(&irq_mapping_update_lock);
   2.470 -        return;
   2.471 -    }
   2.472 -
   2.473 -    /* Tell Xen to send future instances of this interrupt to other vcpu. */
   2.474 -    op.cmd = EVTCHNOP_bind_vcpu;
   2.475 -    op.u.bind_vcpu.port = evtchn;
   2.476 -    op.u.bind_vcpu.vcpu = tcpu;
   2.477 +	evtchn_op_t op;
   2.478 +	int evtchn;
   2.479  
   2.480 -    /*
   2.481 -     * If this fails, it usually just indicates that we're dealing with a virq 
   2.482 -     * or IPI channel, which don't actually need to be rebound. Ignore it, 
   2.483 -     * but don't do the xenlinux-level rebind in that case.
   2.484 -     */
   2.485 -    if (HYPERVISOR_event_channel_op(&op) >= 0)
   2.486 -        bind_evtchn_to_cpu(evtchn, tcpu);
   2.487 -
   2.488 -    spin_unlock(&irq_mapping_update_lock);
   2.489 +	spin_lock(&irq_mapping_update_lock);
   2.490 +	evtchn = irq_to_evtchn[irq];
   2.491 +	if (!VALID_EVTCHN(evtchn)) {
   2.492 +		spin_unlock(&irq_mapping_update_lock);
   2.493 +		return;
   2.494 +	}
   2.495  
   2.496 -    /*
   2.497 -     * Now send the new target processor a NOP IPI. When this returns, it 
   2.498 -     * will check for any pending interrupts, and so service any that got 
   2.499 -     * delivered to the wrong processor by mistake.
   2.500 -     * 
   2.501 -     * XXX: The only time this is called with interrupts disabled is from the 
   2.502 -     * hotplug/hotunplug path. In that case, all cpus are stopped with 
   2.503 -     * interrupts disabled, and the missed interrupts will be picked up when 
   2.504 -     * they start again. This is kind of a hack.
   2.505 -     */
   2.506 -    if (!irqs_disabled())
   2.507 -        smp_call_function(do_nothing_function, NULL, 0, 0);
   2.508 +	/* Send future instances of this interrupt to other vcpu. */
   2.509 +	op.cmd = EVTCHNOP_bind_vcpu;
   2.510 +	op.u.bind_vcpu.port = evtchn;
   2.511 +	op.u.bind_vcpu.vcpu = tcpu;
   2.512 +
   2.513 +	/*
   2.514 +	 * If this fails, it usually just indicates that we're dealing with a 
   2.515 +	 * virq or IPI channel, which don't actually need to be rebound. Ignore
   2.516 +	 * it, but don't do the xenlinux-level rebind in that case.
   2.517 +	 */
   2.518 +	if (HYPERVISOR_event_channel_op(&op) >= 0)
   2.519 +		bind_evtchn_to_cpu(evtchn, tcpu);
   2.520 +
   2.521 +	spin_unlock(&irq_mapping_update_lock);
   2.522 +
   2.523 +	/*
   2.524 +	 * Now send the new target processor a NOP IPI. When this returns, it
   2.525 +	 * will check for any pending interrupts, and so service any that got 
   2.526 +	 * delivered to the wrong processor by mistake.
   2.527 +	 * 
   2.528 +	 * XXX: The only time this is called with interrupts disabled is from
   2.529 +	 * the hotplug/hotunplug path. In that case, all cpus are stopped with 
   2.530 +	 * interrupts disabled, and the missed interrupts will be picked up
   2.531 +	 * when they start again. This is kind of a hack.
   2.532 +	 */
   2.533 +	if (!irqs_disabled())
   2.534 +		smp_call_function(do_nothing_function, NULL, 0, 0);
   2.535  }
   2.536  
   2.537  
   2.538  static void set_affinity_irq(unsigned irq, cpumask_t dest)
   2.539  {
   2.540 -    unsigned tcpu = first_cpu(dest);
   2.541 -    rebind_irq_to_cpu(irq, tcpu);
   2.542 +	unsigned tcpu = first_cpu(dest);
   2.543 +	rebind_irq_to_cpu(irq, tcpu);
   2.544  }
   2.545  
   2.546  /*
   2.547 @@ -430,83 +417,82 @@ static void set_affinity_irq(unsigned ir
   2.548  
   2.549  static unsigned int startup_dynirq(unsigned int irq)
   2.550  {
   2.551 -    int evtchn = irq_to_evtchn[irq];
   2.552 +	int evtchn = irq_to_evtchn[irq];
   2.553  
   2.554 -    if ( !VALID_EVTCHN(evtchn) )
   2.555 -        return 0;
   2.556 -    unmask_evtchn(evtchn);
   2.557 -    return 0;
   2.558 +	if (!VALID_EVTCHN(evtchn))
   2.559 +		return 0;
   2.560 +	unmask_evtchn(evtchn);
   2.561 +	return 0;
   2.562  }
   2.563  
   2.564  static void shutdown_dynirq(unsigned int irq)
   2.565  {
   2.566 -    int evtchn = irq_to_evtchn[irq];
   2.567 +	int evtchn = irq_to_evtchn[irq];
   2.568  
   2.569 -    if ( !VALID_EVTCHN(evtchn) )
   2.570 -        return;
   2.571 -    mask_evtchn(evtchn);
   2.572 +	if (!VALID_EVTCHN(evtchn))
   2.573 +		return;
   2.574 +	mask_evtchn(evtchn);
   2.575  }
   2.576  
   2.577  static void enable_dynirq(unsigned int irq)
   2.578  {
   2.579 -    int evtchn = irq_to_evtchn[irq];
   2.580 +	int evtchn = irq_to_evtchn[irq];
   2.581  
   2.582 -    unmask_evtchn(evtchn);
   2.583 +	unmask_evtchn(evtchn);
   2.584  }
   2.585  
   2.586  static void disable_dynirq(unsigned int irq)
   2.587  {
   2.588 -    int evtchn = irq_to_evtchn[irq];
   2.589 +	int evtchn = irq_to_evtchn[irq];
   2.590  
   2.591 -    mask_evtchn(evtchn);
   2.592 +	mask_evtchn(evtchn);
   2.593  }
   2.594  
   2.595  static void ack_dynirq(unsigned int irq)
   2.596  {
   2.597 -    int evtchn = irq_to_evtchn[irq];
   2.598 +	int evtchn = irq_to_evtchn[irq];
   2.599  
   2.600 -    mask_evtchn(evtchn);
   2.601 -    clear_evtchn(evtchn);
   2.602 +	mask_evtchn(evtchn);
   2.603 +	clear_evtchn(evtchn);
   2.604  }
   2.605  
   2.606  static void end_dynirq(unsigned int irq)
   2.607  {
   2.608 -    int evtchn = irq_to_evtchn[irq];
   2.609 +	int evtchn = irq_to_evtchn[irq];
   2.610  
   2.611 -    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
   2.612 -        unmask_evtchn(evtchn);
   2.613 +	if (!(irq_desc[irq].status & IRQ_DISABLED))
   2.614 +		unmask_evtchn(evtchn);
   2.615  }
   2.616  
   2.617  static struct hw_interrupt_type dynirq_type = {
   2.618 -    "Dynamic-irq",
   2.619 -    startup_dynirq,
   2.620 -    shutdown_dynirq,
   2.621 -    enable_dynirq,
   2.622 -    disable_dynirq,
   2.623 -    ack_dynirq,
   2.624 -    end_dynirq,
   2.625 -    set_affinity_irq
   2.626 +	"Dynamic-irq",
   2.627 +	startup_dynirq,
   2.628 +	shutdown_dynirq,
   2.629 +	enable_dynirq,
   2.630 +	disable_dynirq,
   2.631 +	ack_dynirq,
   2.632 +	end_dynirq,
   2.633 +	set_affinity_irq
   2.634  };
   2.635  
   2.636  static inline void pirq_unmask_notify(int pirq)
   2.637  {
   2.638 -    physdev_op_t op;
   2.639 -    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
   2.640 -    {
   2.641 -        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
   2.642 -        (void)HYPERVISOR_physdev_op(&op);
   2.643 -    }
   2.644 +	physdev_op_t op;
   2.645 +	if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
   2.646 +		op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
   2.647 +		(void)HYPERVISOR_physdev_op(&op);
   2.648 +	}
   2.649  }
   2.650  
   2.651  static inline void pirq_query_unmask(int pirq)
   2.652  {
   2.653 -    physdev_op_t op;
   2.654 -    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
   2.655 -    op.u.irq_status_query.irq = pirq;
   2.656 -    (void)HYPERVISOR_physdev_op(&op);
   2.657 -    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
   2.658 -    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
   2.659 -        set_bit(pirq, &pirq_needs_unmask_notify[0]);
   2.660 +	physdev_op_t op;
   2.661 +	op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
   2.662 +	op.u.irq_status_query.irq = pirq;
   2.663 +	(void)HYPERVISOR_physdev_op(&op);
   2.664 +	clear_bit(pirq, &pirq_needs_unmask_notify[0]);
   2.665 +	if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
   2.666 +		set_bit(pirq, &pirq_needs_unmask_notify[0]);
   2.667  }
   2.668  
   2.669  /*
   2.670 @@ -517,218 +503,222 @@ static inline void pirq_query_unmask(int
   2.671  
   2.672  static unsigned int startup_pirq(unsigned int irq)
   2.673  {
   2.674 -    evtchn_op_t op;
   2.675 -    int evtchn;
   2.676 +	evtchn_op_t op;
   2.677 +	int evtchn;
   2.678  
   2.679 -    op.cmd               = EVTCHNOP_bind_pirq;
   2.680 -    op.u.bind_pirq.pirq  = irq;
   2.681 -    /* NB. We are happy to share unless we are probing. */
   2.682 -    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
   2.683 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
   2.684 -    {
   2.685 -        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
   2.686 -            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
   2.687 -        return 0;
   2.688 -    }
   2.689 -    evtchn = op.u.bind_pirq.port;
   2.690 +	op.cmd               = EVTCHNOP_bind_pirq;
   2.691 +	op.u.bind_pirq.pirq  = irq;
   2.692 +	/* NB. We are happy to share unless we are probing. */
   2.693 +	op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
   2.694 +	if (HYPERVISOR_event_channel_op(&op) != 0) {
   2.695 +		if ( !probing_irq(irq) )
   2.696 +			printk(KERN_INFO "Failed to obtain physical "
   2.697 +			       "IRQ %d\n", irq);
   2.698 +		return 0;
   2.699 +	}
   2.700 +	evtchn = op.u.bind_pirq.port;
   2.701  
   2.702 -    pirq_query_unmask(irq_to_pirq(irq));
   2.703 +	pirq_query_unmask(irq_to_pirq(irq));
   2.704  
   2.705 -    bind_evtchn_to_cpu(evtchn, 0);
   2.706 -    evtchn_to_irq[evtchn] = irq;
   2.707 -    irq_to_evtchn[irq]    = evtchn;
   2.708 +	bind_evtchn_to_cpu(evtchn, 0);
   2.709 +	evtchn_to_irq[evtchn] = irq;
   2.710 +	irq_to_evtchn[irq]    = evtchn;
   2.711  
   2.712 -    unmask_evtchn(evtchn);
   2.713 -    pirq_unmask_notify(irq_to_pirq(irq));
   2.714 +	unmask_evtchn(evtchn);
   2.715 +	pirq_unmask_notify(irq_to_pirq(irq));
   2.716  
   2.717 -    return 0;
   2.718 +	return 0;
   2.719  }
   2.720  
   2.721  static void shutdown_pirq(unsigned int irq)
   2.722  {
   2.723 -    evtchn_op_t op;
   2.724 -    int evtchn = irq_to_evtchn[irq];
   2.725 -
   2.726 -    if ( !VALID_EVTCHN(evtchn) )
   2.727 -        return;
   2.728 -
   2.729 -    mask_evtchn(evtchn);
   2.730 +	evtchn_op_t op;
   2.731 +	int evtchn = irq_to_evtchn[irq];
   2.732  
   2.733 -    op.cmd          = EVTCHNOP_close;
   2.734 -    op.u.close.dom  = DOMID_SELF;
   2.735 -    op.u.close.port = evtchn;
   2.736 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
   2.737 -        panic("Failed to unbind physical IRQ %d\n", irq);
   2.738 +	if (!VALID_EVTCHN(evtchn))
   2.739 +		return;
   2.740  
   2.741 -    bind_evtchn_to_cpu(evtchn, 0);
   2.742 -    evtchn_to_irq[evtchn] = -1;
   2.743 -    irq_to_evtchn[irq]    = -1;
   2.744 +	mask_evtchn(evtchn);
   2.745 +
   2.746 +	op.cmd          = EVTCHNOP_close;
   2.747 +	op.u.close.dom  = DOMID_SELF;
   2.748 +	op.u.close.port = evtchn;
   2.749 +	BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   2.750 +
   2.751 +	bind_evtchn_to_cpu(evtchn, 0);
   2.752 +	evtchn_to_irq[evtchn] = -1;
   2.753 +	irq_to_evtchn[irq]    = -1;
   2.754  }
   2.755  
   2.756  static void enable_pirq(unsigned int irq)
   2.757  {
   2.758 -    int evtchn = irq_to_evtchn[irq];
   2.759 -    if ( !VALID_EVTCHN(evtchn) )
   2.760 -        return;
   2.761 -    unmask_evtchn(evtchn);
   2.762 -    pirq_unmask_notify(irq_to_pirq(irq));
   2.763 +	int evtchn = irq_to_evtchn[irq];
   2.764 +	if (!VALID_EVTCHN(evtchn))
   2.765 +		return;
   2.766 +	unmask_evtchn(evtchn);
   2.767 +	pirq_unmask_notify(irq_to_pirq(irq));
   2.768  }
   2.769  
   2.770  static void disable_pirq(unsigned int irq)
   2.771  {
   2.772 -    int evtchn = irq_to_evtchn[irq];
   2.773 -    if ( !VALID_EVTCHN(evtchn) )
   2.774 -        return;
   2.775 -    mask_evtchn(evtchn);
   2.776 +	int evtchn = irq_to_evtchn[irq];
   2.777 +	if (!VALID_EVTCHN(evtchn))
   2.778 +		return;
   2.779 +	mask_evtchn(evtchn);
   2.780  }
   2.781  
   2.782  static void ack_pirq(unsigned int irq)
   2.783  {
   2.784 -    int evtchn = irq_to_evtchn[irq];
   2.785 -    if ( !VALID_EVTCHN(evtchn) )
   2.786 -        return;
   2.787 -    mask_evtchn(evtchn);
   2.788 -    clear_evtchn(evtchn);
   2.789 +	int evtchn = irq_to_evtchn[irq];
   2.790 +	if (!VALID_EVTCHN(evtchn))
   2.791 +		return;
   2.792 +	mask_evtchn(evtchn);
   2.793 +	clear_evtchn(evtchn);
   2.794  }
   2.795  
   2.796  static void end_pirq(unsigned int irq)
   2.797  {
   2.798 -    int evtchn = irq_to_evtchn[irq];
   2.799 -    if ( !VALID_EVTCHN(evtchn) )
   2.800 -        return;
   2.801 -    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
   2.802 -    {
   2.803 -        unmask_evtchn(evtchn);
   2.804 -        pirq_unmask_notify(irq_to_pirq(irq));
   2.805 -    }
   2.806 +	int evtchn = irq_to_evtchn[irq];
   2.807 +	if (!VALID_EVTCHN(evtchn))
   2.808 +		return;
   2.809 +	if (!(irq_desc[irq].status & IRQ_DISABLED)) {
   2.810 +		unmask_evtchn(evtchn);
   2.811 +		pirq_unmask_notify(irq_to_pirq(irq));
   2.812 +	}
   2.813  }
   2.814  
   2.815  static struct hw_interrupt_type pirq_type = {
   2.816 -    "Phys-irq",
   2.817 -    startup_pirq,
   2.818 -    shutdown_pirq,
   2.819 -    enable_pirq,
   2.820 -    disable_pirq,
   2.821 -    ack_pirq,
   2.822 -    end_pirq,
   2.823 -    set_affinity_irq
   2.824 +	"Phys-irq",
   2.825 +	startup_pirq,
   2.826 +	shutdown_pirq,
   2.827 +	enable_pirq,
   2.828 +	disable_pirq,
   2.829 +	ack_pirq,
   2.830 +	end_pirq,
   2.831 +	set_affinity_irq
   2.832  };
   2.833  
   2.834  void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
   2.835  {
   2.836 -    int evtchn = irq_to_evtchn[i];
   2.837 -    shared_info_t *s = HYPERVISOR_shared_info;
   2.838 -    if ( !VALID_EVTCHN(evtchn) )
   2.839 -        return;
   2.840 -    BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
   2.841 -    synch_set_bit(evtchn, &s->evtchn_pending[0]);
   2.842 +	int evtchn = irq_to_evtchn[i];
   2.843 +	shared_info_t *s = HYPERVISOR_shared_info;
   2.844 +	if (!VALID_EVTCHN(evtchn))
   2.845 +		return;
   2.846 +	BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
   2.847 +	synch_set_bit(evtchn, &s->evtchn_pending[0]);
   2.848  }
   2.849  
   2.850  void irq_suspend(void)
   2.851  {
   2.852 -    int pirq, virq, irq, evtchn;
   2.853 -    int cpu = smp_processor_id(); /* XXX */
   2.854 -
   2.855 -    /* Unbind VIRQs from event channels. */
   2.856 -    for ( virq = 0; virq < NR_VIRQS; virq++ )
   2.857 -    {
   2.858 -        if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
   2.859 -            continue;
   2.860 -        evtchn = irq_to_evtchn[irq];
   2.861 +	int pirq, virq, irq, evtchn;
   2.862 +	int cpu = smp_processor_id(); /* XXX */
   2.863  
   2.864 -        /* Mark the event channel as unused in our table. */
   2.865 -        evtchn_to_irq[evtchn] = -1;
   2.866 -        irq_to_evtchn[irq]    = -1;
   2.867 -    }
   2.868 +	/* Unbind VIRQs from event channels. */
   2.869 +	for (virq = 0; virq < NR_VIRQS; virq++) {
   2.870 +		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
   2.871 +			continue;
   2.872 +		evtchn = irq_to_evtchn[irq];
   2.873  
   2.874 -    /* Check that no PIRQs are still bound. */
   2.875 -    for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
   2.876 -        if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
   2.877 -            panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
   2.878 -                  pirq, evtchn);
   2.879 +		/* Mark the event channel as unused in our table. */
   2.880 +		evtchn_to_irq[evtchn] = -1;
   2.881 +		irq_to_evtchn[irq]    = -1;
   2.882 +	}
   2.883 +
   2.884 +	/* Check that no PIRQs are still bound. */
   2.885 +	for (pirq = 0; pirq < NR_PIRQS; pirq++)
   2.886 +		if ((evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1)
   2.887 +			panic("Suspend attempted while PIRQ %d bound "
   2.888 +			      "to evtchn %d.\n", pirq, evtchn);
   2.889  }
   2.890  
   2.891  void irq_resume(void)
   2.892  {
   2.893 -    evtchn_op_t op;
   2.894 -    int         virq, irq, evtchn;
   2.895 -    int cpu = smp_processor_id(); /* XXX */
   2.896 -
   2.897 -    for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
   2.898 -        mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
   2.899 -
   2.900 -    for ( virq = 0; virq < NR_VIRQS; virq++ )
   2.901 -    {
   2.902 -        if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
   2.903 -            continue;
   2.904 +	evtchn_op_t op;
   2.905 +	int         virq, irq, evtchn;
   2.906 +	int cpu = smp_processor_id(); /* XXX */
   2.907  
   2.908 -        /* Get a new binding from Xen. */
   2.909 -        op.cmd              = EVTCHNOP_bind_virq;
   2.910 -        op.u.bind_virq.virq = virq;
   2.911 -        if ( HYPERVISOR_event_channel_op(&op) != 0 )
   2.912 -            panic("Failed to bind virtual IRQ %d\n", virq);
   2.913 -        evtchn = op.u.bind_virq.port;
   2.914 +	/* New event-channel space is not 'live' yet. */
   2.915 +	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
   2.916 +		mask_evtchn(evtchn);
   2.917 +
   2.918 +	for (virq = 0; virq < NR_VIRQS; virq++) {
   2.919 +		if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
   2.920 +			continue;
   2.921 +
   2.922 +		/* Get a new binding from Xen. */
   2.923 +		op.cmd              = EVTCHNOP_bind_virq;
   2.924 +		op.u.bind_virq.virq = virq;
   2.925 +		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   2.926 +		evtchn = op.u.bind_virq.port;
   2.927          
   2.928 -        /* Record the new mapping. */
   2.929 -        bind_evtchn_to_cpu(evtchn, 0);
   2.930 -        evtchn_to_irq[evtchn] = irq;
   2.931 -        irq_to_evtchn[irq]    = evtchn;
   2.932 +		/* Record the new mapping. */
   2.933 +		bind_evtchn_to_cpu(evtchn, 0);
   2.934 +		evtchn_to_irq[evtchn] = irq;
   2.935 +		irq_to_evtchn[irq]    = evtchn;
   2.936  
   2.937 -        /* Ready for use. */
   2.938 -        unmask_evtchn(evtchn);
   2.939 -    }
   2.940 +		/* Ready for use. */
   2.941 +		unmask_evtchn(evtchn);
   2.942 +	}
   2.943  }
   2.944  
   2.945  void __init init_IRQ(void)
   2.946  {
   2.947 -    int i;
   2.948 -    int cpu;
   2.949 +	int i;
   2.950 +	int cpu;
   2.951  
   2.952 -    irq_ctx_init(0);
   2.953 +	irq_ctx_init(0);
   2.954  
   2.955 -    spin_lock_init(&irq_mapping_update_lock);
   2.956 +	spin_lock_init(&irq_mapping_update_lock);
   2.957  
   2.958  #ifdef CONFIG_SMP
   2.959 -    /* By default all event channels notify CPU#0. */
   2.960 -    memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
   2.961 +	/* By default all event channels notify CPU#0. */
   2.962 +	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
   2.963  #endif
   2.964  
   2.965 -    for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
   2.966 -        /* No VIRQ -> IRQ mappings. */
   2.967 -        for ( i = 0; i < NR_VIRQS; i++ )
   2.968 -            per_cpu(virq_to_irq, cpu)[i] = -1;
   2.969 -    }
   2.970 -
   2.971 -    /* No event-channel -> IRQ mappings. */
   2.972 -    for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
   2.973 -    {
   2.974 -        evtchn_to_irq[i] = -1;
   2.975 -        mask_evtchn(i); /* No event channels are 'live' right now. */
   2.976 -    }
   2.977 -
   2.978 -    /* No IRQ -> event-channel mappings. */
   2.979 -    for ( i = 0; i < NR_IRQS; i++ )
   2.980 -        irq_to_evtchn[i] = -1;
   2.981 +	for (cpu = 0; cpu < NR_CPUS; cpu++) {
   2.982 +		/* No VIRQ -> IRQ mappings. */
   2.983 +		for (i = 0; i < NR_VIRQS; i++)
   2.984 +			per_cpu(virq_to_irq, cpu)[i] = -1;
   2.985 +	}
   2.986  
   2.987 -    for ( i = 0; i < NR_DYNIRQS; i++ )
   2.988 -    {
   2.989 -        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
   2.990 -        irq_bindcount[dynirq_to_irq(i)] = 0;
   2.991 -
   2.992 -        irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
   2.993 -        irq_desc[dynirq_to_irq(i)].action  = 0;
   2.994 -        irq_desc[dynirq_to_irq(i)].depth   = 1;
   2.995 -        irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
   2.996 -    }
   2.997 +	/* No event-channel -> IRQ mappings. */
   2.998 +	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
   2.999 +		evtchn_to_irq[i] = -1;
  2.1000 +		mask_evtchn(i); /* No event channels are 'live' right now. */
  2.1001 +	}
  2.1002  
  2.1003 -    for ( i = 0; i < NR_PIRQS; i++ )
  2.1004 -    {
  2.1005 -        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
  2.1006 -        irq_bindcount[pirq_to_irq(i)] = 1;
  2.1007 +	/* No IRQ -> event-channel mappings. */
  2.1008 +	for (i = 0; i < NR_IRQS; i++)
  2.1009 +		irq_to_evtchn[i] = -1;
  2.1010  
  2.1011 -        irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
  2.1012 -        irq_desc[pirq_to_irq(i)].action  = 0;
  2.1013 -        irq_desc[pirq_to_irq(i)].depth   = 1;
  2.1014 -        irq_desc[pirq_to_irq(i)].handler = &pirq_type;
  2.1015 -    }
  2.1016 +	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
  2.1017 +	for (i = 0; i < NR_DYNIRQS; i++) {
  2.1018 +		irq_bindcount[dynirq_to_irq(i)] = 0;
  2.1019 +
  2.1020 +		irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
  2.1021 +		irq_desc[dynirq_to_irq(i)].action  = 0;
  2.1022 +		irq_desc[dynirq_to_irq(i)].depth   = 1;
  2.1023 +		irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
  2.1024 +	}
  2.1025 +
  2.1026 +	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
  2.1027 +	for (i = 0; i < NR_PIRQS; i++)
  2.1028 +	{
  2.1029 +		irq_bindcount[pirq_to_irq(i)] = 1;
  2.1030 +
  2.1031 +		irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
  2.1032 +		irq_desc[pirq_to_irq(i)].action  = 0;
  2.1033 +		irq_desc[pirq_to_irq(i)].depth   = 1;
  2.1034 +		irq_desc[pirq_to_irq(i)].handler = &pirq_type;
  2.1035 +	}
  2.1036  }
  2.1037 +
  2.1038 +/*
  2.1039 + * Local variables:
  2.1040 + *  c-file-style: "linux"
  2.1041 + *  indent-tabs-mode: t
  2.1042 + *  c-indent-level: 8
  2.1043 + *  c-basic-offset: 8
  2.1044 + *  tab-width: 8
  2.1045 + * End:
  2.1046 + */
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/fixup.c	Thu Sep 29 09:59:46 2005 +0100
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/fixup.c	Thu Sep 29 11:10:27 2005 +0100
     3.3 @@ -37,51 +37,57 @@
     3.4  
     3.5  #define DP(_f, _args...) printk(KERN_ALERT "  " _f "\n" , ## _args )
     3.6  
     3.7 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
     3.8 -#define __LINKAGE fastcall
     3.9 -#else
    3.10 -#define __LINKAGE asmlinkage
    3.11 -#endif
    3.12 -
    3.13 -__LINKAGE void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
    3.14 +fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
    3.15  {
    3.16 -    static unsigned long printed = 0;
    3.17 -    char info[100];
    3.18 -    int i;
    3.19 -
    3.20 -    if ( !test_and_set_bit(0, &printed) )
    3.21 -    {
    3.22 -        HYPERVISOR_vm_assist(VMASST_CMD_disable,
    3.23 -			     VMASST_TYPE_4gb_segments_notify);
    3.24 -
    3.25 -        sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
    3.26 +	static unsigned long printed = 0;
    3.27 +	char info[100];
    3.28 +	int i;
    3.29  
    3.30 -        DP("");
    3.31 -        DP("***************************************************************");
    3.32 -        DP("***************************************************************");
    3.33 -        DP("** WARNING: Currently emulating unsupported memory accesses  **");
    3.34 -        DP("**          in /lib/tls libraries. The emulation is very     **");
    3.35 -        DP("**          slow. To ensure full performance you should      **");
    3.36 -        DP("**          execute the following as root:                   **");
    3.37 -        DP("**          mv /lib/tls /lib/tls.disabled                    **");
    3.38 -        DP("** Offending process: %-38.38s **", info);
    3.39 -        DP("***************************************************************");
    3.40 -        DP("***************************************************************");
    3.41 -        DP("");
    3.42 +	if (test_and_set_bit(0, &printed))
    3.43 +		return;
    3.44  
    3.45 -        for ( i = 5; i > 0; i-- )
    3.46 -        {
    3.47 -            printk("Pausing... %d", i);
    3.48 -            mdelay(1000);
    3.49 -            printk("\b\b\b\b\b\b\b\b\b\b\b\b");
    3.50 -        }
    3.51 -        printk("Continuing...\n\n");
    3.52 -    }
    3.53 +	HYPERVISOR_vm_assist(
    3.54 +		VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
    3.55 +
    3.56 +	sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
    3.57 +
    3.58 +
    3.59 +	DP("");
    3.60 +	DP("***************************************************************");
    3.61 +	DP("***************************************************************");
    3.62 +	DP("** WARNING: Currently emulating unsupported memory accesses  **");
    3.63 +	DP("**          in /lib/tls libraries. The emulation is very     **");
    3.64 +	DP("**          slow. To ensure full performance you should      **");
    3.65 +	DP("**          execute the following as root:                   **");
    3.66 +	DP("**          mv /lib/tls /lib/tls.disabled                    **");
    3.67 +	DP("** Offending process: %-38.38s **", info);
    3.68 +	DP("***************************************************************");
    3.69 +	DP("***************************************************************");
    3.70 +	DP("");
    3.71 +
    3.72 +	for (i = 5; i > 0; i--) {
    3.73 +		printk("Pausing... %d", i);
    3.74 +		mdelay(1000);
    3.75 +		printk("\b\b\b\b\b\b\b\b\b\b\b\b");
    3.76 +	}
    3.77 +
    3.78 +	printk("Continuing...\n\n");
    3.79  }
    3.80  
    3.81  static int __init fixup_init(void)
    3.82  {
    3.83 -    HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
    3.84 -    return 0;
    3.85 +	HYPERVISOR_vm_assist(
    3.86 +		VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
    3.87 +	return 0;
    3.88  }
    3.89  __initcall(fixup_init);
    3.90 +
    3.91 +/*
    3.92 + * Local variables:
    3.93 + *  c-file-style: "linux"
    3.94 + *  indent-tabs-mode: t
    3.95 + *  c-indent-level: 8
    3.96 + *  c-basic-offset: 8
    3.97 + *  tab-width: 8
    3.98 + * End:
    3.99 + */
     4.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Thu Sep 29 09:59:46 2005 +0100
     4.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Thu Sep 29 11:10:27 2005 +0100
     4.3 @@ -12,7 +12,6 @@
     4.4  #include <asm-xen/evtchn.h>
     4.5  #include <asm/hypervisor.h>
     4.6  #include <asm-xen/xen-public/dom0_ops.h>
     4.7 -#include <asm-xen/queues.h>
     4.8  #include <asm-xen/xenbus.h>
     4.9  #include <linux/cpu.h>
    4.10  #include <linux/kthread.h>
    4.11 @@ -43,12 +42,10 @@ void machine_power_off(void)
    4.12  	HYPERVISOR_shutdown();
    4.13  }
    4.14  
    4.15 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
    4.16  int reboot_thru_bios = 0;	/* for dmi_scan.c */
    4.17  EXPORT_SYMBOL(machine_restart);
    4.18  EXPORT_SYMBOL(machine_halt);
    4.19  EXPORT_SYMBOL(machine_power_off);
    4.20 -#endif
    4.21  
    4.22  
    4.23  /******************************************************************************
    4.24 @@ -66,227 +63,221 @@ static int shutting_down = SHUTDOWN_INVA
    4.25  
    4.26  static int __do_suspend(void *ignore)
    4.27  {
    4.28 -    int i, j, k, fpp;
    4.29 +	int i, j, k, fpp;
    4.30  
    4.31  #ifdef CONFIG_XEN_USB_FRONTEND
    4.32 -    extern void usbif_resume();
    4.33 +	extern void usbif_resume();
    4.34  #else
    4.35  #define usbif_resume() do{}while(0)
    4.36  #endif
    4.37  
    4.38 -    extern int gnttab_suspend(void);
    4.39 -    extern int gnttab_resume(void);
    4.40 +	extern int gnttab_suspend(void);
    4.41 +	extern int gnttab_resume(void);
    4.42  
    4.43 -    extern void time_suspend(void);
    4.44 -    extern void time_resume(void);
    4.45 -    extern unsigned long max_pfn;
    4.46 -    extern unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[];
    4.47 +	extern void time_suspend(void);
    4.48 +	extern void time_resume(void);
    4.49 +	extern unsigned long max_pfn;
    4.50 +	extern unsigned long *pfn_to_mfn_frame_list_list;
    4.51 +	extern unsigned long *pfn_to_mfn_frame_list[];
    4.52  
    4.53  #ifdef CONFIG_SMP
    4.54 -    extern void smp_suspend(void);
    4.55 -    extern void smp_resume(void);
    4.56 -
    4.57 -    static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
    4.58 -    cpumask_t prev_online_cpus, prev_present_cpus;
    4.59 -
    4.60 -    void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
    4.61 -    int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
    4.62 -#endif
    4.63 +	extern void smp_suspend(void);
    4.64 +	extern void smp_resume(void);
    4.65  
    4.66 -    extern void xencons_suspend(void);
    4.67 -    extern void xencons_resume(void);
    4.68 -
    4.69 -    int err = 0;
    4.70 +	static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
    4.71 +	cpumask_t prev_online_cpus, prev_present_cpus;
    4.72  
    4.73 -    BUG_ON(smp_processor_id() != 0);
    4.74 -    BUG_ON(in_interrupt());
    4.75 -
    4.76 -#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
    4.77 -    if (num_online_cpus() > 1) {
    4.78 -	printk(KERN_WARNING 
    4.79 -               "Can't suspend SMP guests without CONFIG_HOTPLUG_CPU\n");
    4.80 -	return -EOPNOTSUPP;
    4.81 -    }
    4.82 +	void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
    4.83 +	int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
    4.84  #endif
    4.85  
    4.86 -    preempt_disable();
    4.87 -#ifdef CONFIG_SMP
    4.88 -    /* Take all of the other cpus offline.  We need to be careful not
    4.89 -       to get preempted between the final test for num_online_cpus()
    4.90 -       == 1 and disabling interrupts, since otherwise userspace could
    4.91 -       bring another cpu online, and then we'd be stuffed.  At the
    4.92 -       same time, cpu_down can reschedule, so we need to enable
    4.93 -       preemption while doing that.  This kind of sucks, but should be
    4.94 -       correct. */
    4.95 -    /* (We don't need to worry about other cpus bringing stuff up,
    4.96 -       since by the time num_online_cpus() == 1, there aren't any
    4.97 -       other cpus) */
    4.98 -    cpus_clear(prev_online_cpus);
    4.99 -    while (num_online_cpus() > 1) {
   4.100 -	preempt_enable();
   4.101 -	for_each_online_cpu(i) {
   4.102 -	    if (i == 0)
   4.103 -		continue;
   4.104 -	    err = cpu_down(i);
   4.105 -	    if (err != 0) {
   4.106 -		printk(KERN_CRIT "Failed to take all CPUs down: %d.\n", err);
   4.107 -		goto out_reenable_cpus;
   4.108 -	    }
   4.109 -	    cpu_set(i, prev_online_cpus);
   4.110 +	extern void xencons_suspend(void);
   4.111 +	extern void xencons_resume(void);
   4.112 +
   4.113 +	int err = 0;
   4.114 +
   4.115 +	BUG_ON(smp_processor_id() != 0);
   4.116 +	BUG_ON(in_interrupt());
   4.117 +
   4.118 +#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
   4.119 +	if (num_online_cpus() > 1) {
   4.120 +		printk(KERN_WARNING "Can't suspend SMP guests "
   4.121 +		       "without CONFIG_HOTPLUG_CPU\n");
   4.122 +		return -EOPNOTSUPP;
   4.123  	}
   4.124 -	preempt_disable();
   4.125 -    }
   4.126  #endif
   4.127  
   4.128 -    __cli();
   4.129 +	preempt_disable();
   4.130 +#ifdef CONFIG_SMP
   4.131 +	/* Take all of the other cpus offline.  We need to be careful not
   4.132 +	   to get preempted between the final test for num_online_cpus()
   4.133 +	   == 1 and disabling interrupts, since otherwise userspace could
   4.134 +	   bring another cpu online, and then we'd be stuffed.  At the
   4.135 +	   same time, cpu_down can reschedule, so we need to enable
   4.136 +	   preemption while doing that.  This kind of sucks, but should be
   4.137 +	   correct. */
   4.138 +	/* (We don't need to worry about other cpus bringing stuff up,
   4.139 +	   since by the time num_online_cpus() == 1, there aren't any
   4.140 +	   other cpus) */
   4.141 +	cpus_clear(prev_online_cpus);
   4.142 +	while (num_online_cpus() > 1) {
   4.143 +		preempt_enable();
   4.144 +		for_each_online_cpu(i) {
   4.145 +			if (i == 0)
   4.146 +				continue;
   4.147 +			err = cpu_down(i);
   4.148 +			if (err != 0) {
   4.149 +				printk(KERN_CRIT "Failed to take all CPUs "
   4.150 +				       "down: %d.\n", err);
   4.151 +				goto out_reenable_cpus;
   4.152 +			}
   4.153 +			cpu_set(i, prev_online_cpus);
   4.154 +		}
   4.155 +		preempt_disable();
   4.156 +	}
   4.157 +#endif
   4.158  
   4.159 -    preempt_enable();
   4.160 +	__cli();
   4.161 +
   4.162 +	preempt_enable();
   4.163  
   4.164  #ifdef CONFIG_SMP
   4.165 -    cpus_clear(prev_present_cpus);
   4.166 -    for_each_present_cpu(i) {
   4.167 -	if (i == 0)
   4.168 -	    continue;
   4.169 -	save_vcpu_context(i, &suspended_cpu_records[i]);
   4.170 -	cpu_set(i, prev_present_cpus);
   4.171 -    }
   4.172 +	cpus_clear(prev_present_cpus);
   4.173 +	for_each_present_cpu(i) {
   4.174 +		if (i == 0)
   4.175 +			continue;
   4.176 +		save_vcpu_context(i, &suspended_cpu_records[i]);
   4.177 +		cpu_set(i, prev_present_cpus);
   4.178 +	}
   4.179  #endif
   4.180  
   4.181  #ifdef __i386__
   4.182 -    mm_pin_all();
   4.183 -    kmem_cache_shrink(pgd_cache);
   4.184 -#endif
   4.185 -
   4.186 -    time_suspend();
   4.187 -
   4.188 -#ifdef CONFIG_SMP
   4.189 -    smp_suspend();
   4.190 +	mm_pin_all();
   4.191 +	kmem_cache_shrink(pgd_cache);
   4.192  #endif
   4.193  
   4.194 -    xenbus_suspend();
   4.195 -
   4.196 -    xencons_suspend();
   4.197 -
   4.198 -    irq_suspend();
   4.199 -
   4.200 -    gnttab_suspend();
   4.201 -
   4.202 -    HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
   4.203 -    clear_fixmap(FIX_SHARED_INFO);
   4.204 +	time_suspend();
   4.205  
   4.206 -    xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
   4.207 -    xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
   4.208 -
   4.209 -    /* We'll stop somewhere inside this hypercall.  When it returns,
   4.210 -       we'll start resuming after the restore. */
   4.211 -    HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
   4.212 +#ifdef CONFIG_SMP
   4.213 +	smp_suspend();
   4.214 +#endif
   4.215  
   4.216 -    shutting_down = SHUTDOWN_INVALID; 
   4.217 -
   4.218 -    set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
   4.219 +	xenbus_suspend();
   4.220  
   4.221 -    HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
   4.222 +	xencons_suspend();
   4.223  
   4.224 -    memset(empty_zero_page, 0, PAGE_SIZE);
   4.225 +	irq_suspend();
   4.226 +
   4.227 +	gnttab_suspend();
   4.228 +
   4.229 +	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
   4.230 +	clear_fixmap(FIX_SHARED_INFO);
   4.231 +
   4.232 +	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
   4.233 +	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
   4.234 +
   4.235 +	/* We'll stop somewhere inside this hypercall.  When it returns,
   4.236 +	   we'll start resuming after the restore. */
   4.237 +	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
   4.238 +
   4.239 +	shutting_down = SHUTDOWN_INVALID; 
   4.240 +
   4.241 +	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
   4.242 +
   4.243 +	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
   4.244 +
   4.245 +	memset(empty_zero_page, 0, PAGE_SIZE);
   4.246  	     
   4.247 -    HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
   4.248 +	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
   4.249  		virt_to_mfn(pfn_to_mfn_frame_list_list);
   4.250    
   4.251 -    fpp = PAGE_SIZE/sizeof(unsigned long);
   4.252 -    for ( i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++ )
   4.253 -    {
   4.254 -	if ( (j % fpp) == 0 )
   4.255 -	{
   4.256 -	    k++;
   4.257 -	    pfn_to_mfn_frame_list_list[k] = 
   4.258 -		    virt_to_mfn(pfn_to_mfn_frame_list[k]);
   4.259 -	    j=0;
   4.260 +	fpp = PAGE_SIZE/sizeof(unsigned long);
   4.261 +	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
   4.262 +		if ((j % fpp) == 0) {
   4.263 +			k++;
   4.264 +			pfn_to_mfn_frame_list_list[k] = 
   4.265 +				virt_to_mfn(pfn_to_mfn_frame_list[k]);
   4.266 +			j = 0;
   4.267 +		}
   4.268 +		pfn_to_mfn_frame_list[k][j] = 
   4.269 +			virt_to_mfn(&phys_to_machine_mapping[i]);
   4.270  	}
   4.271 -	pfn_to_mfn_frame_list[k][j] = 
   4.272 -		virt_to_mfn(&phys_to_machine_mapping[i]);
   4.273 -    }
   4.274 -    HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
   4.275 +	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
   4.276  
   4.277 -    gnttab_resume();
   4.278 +	gnttab_resume();
   4.279  
   4.280 -    irq_resume();
   4.281 +	irq_resume();
   4.282  
   4.283 -    xencons_resume();
   4.284 +	xencons_resume();
   4.285  
   4.286 -    xenbus_resume();
   4.287 +	xenbus_resume();
   4.288  
   4.289  #ifdef CONFIG_SMP
   4.290 -    smp_resume();
   4.291 +	smp_resume();
   4.292  #endif
   4.293  
   4.294 -    time_resume();
   4.295 +	time_resume();
   4.296  
   4.297 -    usbif_resume();
   4.298 +	usbif_resume();
   4.299  
   4.300  #ifdef CONFIG_SMP
   4.301 -    for_each_cpu_mask(i, prev_present_cpus)
   4.302 -	restore_vcpu_context(i, &suspended_cpu_records[i]);
   4.303 +	for_each_cpu_mask(i, prev_present_cpus)
   4.304 +		restore_vcpu_context(i, &suspended_cpu_records[i]);
   4.305  #endif
   4.306  
   4.307 -    __sti();
   4.308 +	__sti();
   4.309  
   4.310  #ifdef CONFIG_SMP
   4.311   out_reenable_cpus:
   4.312 -    for_each_cpu_mask(i, prev_online_cpus) {
   4.313 -	j = cpu_up(i);
   4.314 -	if (j != 0) {
   4.315 -	    printk(KERN_CRIT "Failed to bring cpu %d back up (%d).\n",
   4.316 -		   i, j);
   4.317 -	    err = j;
   4.318 +	for_each_cpu_mask(i, prev_online_cpus) {
   4.319 +		j = cpu_up(i);
   4.320 +		if (j != 0) {
   4.321 +			printk(KERN_CRIT "Failed to bring cpu "
   4.322 +			       "%d back up (%d).\n",
   4.323 +			       i, j);
   4.324 +			err = j;
   4.325 +		}
   4.326  	}
   4.327 -    }
   4.328  #endif
   4.329  
   4.330 -    return err;
   4.331 +	return err;
   4.332  }
   4.333  
   4.334  static int shutdown_process(void *__unused)
   4.335  {
   4.336 -    static char *envp[] = { "HOME=/", "TERM=linux", 
   4.337 -                            "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
   4.338 -    static char *restart_argv[]  = { "/sbin/reboot", NULL };
   4.339 -    static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
   4.340 -
   4.341 -    extern asmlinkage long sys_reboot(int magic1, int magic2,
   4.342 -                                      unsigned int cmd, void *arg);
   4.343 -
   4.344 -    daemonize(
   4.345 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
   4.346 -        "shutdown"
   4.347 -#endif
   4.348 -        );
   4.349 +	static char *envp[] = { "HOME=/", "TERM=linux", 
   4.350 +				"PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
   4.351 +	static char *restart_argv[]  = { "/sbin/reboot", NULL };
   4.352 +	static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
   4.353  
   4.354 -    switch ( shutting_down )
   4.355 -    {
   4.356 -    case SHUTDOWN_POWEROFF:
   4.357 -        if ( execve("/sbin/poweroff", poweroff_argv, envp) < 0 )
   4.358 -        {
   4.359 -            sys_reboot(LINUX_REBOOT_MAGIC1,
   4.360 -                       LINUX_REBOOT_MAGIC2,
   4.361 -                       LINUX_REBOOT_CMD_POWER_OFF,
   4.362 -                       NULL);
   4.363 -        }
   4.364 -        break;
   4.365 +	extern asmlinkage long sys_reboot(int magic1, int magic2,
   4.366 +					  unsigned int cmd, void *arg);
   4.367  
   4.368 -    case SHUTDOWN_REBOOT:
   4.369 -        if ( execve("/sbin/reboot", restart_argv, envp) < 0 )
   4.370 -        {
   4.371 -            sys_reboot(LINUX_REBOOT_MAGIC1,
   4.372 -                       LINUX_REBOOT_MAGIC2,
   4.373 -                       LINUX_REBOOT_CMD_RESTART,
   4.374 -                       NULL);
   4.375 -        }
   4.376 -        break;
   4.377 -    }
   4.378 +	daemonize("shutdown");
   4.379  
   4.380 -    shutting_down = SHUTDOWN_INVALID; /* could try again */
   4.381 +	switch (shutting_down) {
   4.382 +	case SHUTDOWN_POWEROFF:
   4.383 +		if (execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
   4.384 +			sys_reboot(LINUX_REBOOT_MAGIC1,
   4.385 +				   LINUX_REBOOT_MAGIC2,
   4.386 +				   LINUX_REBOOT_CMD_POWER_OFF,
   4.387 +				   NULL);
   4.388 +		}
   4.389 +		break;
   4.390  
   4.391 -    return 0;
   4.392 +	case SHUTDOWN_REBOOT:
   4.393 +		if (execve("/sbin/reboot", restart_argv, envp) < 0) {
   4.394 +			sys_reboot(LINUX_REBOOT_MAGIC1,
   4.395 +				   LINUX_REBOOT_MAGIC2,
   4.396 +				   LINUX_REBOOT_CMD_RESTART,
   4.397 +				   NULL);
   4.398 +		}
   4.399 +		break;
   4.400 +	}
   4.401 +
   4.402 +	shutting_down = SHUTDOWN_INVALID; /* could try again */
   4.403 +
   4.404 +	return 0;
   4.405  }
   4.406  
   4.407  static struct task_struct *kthread_create_on_cpu(int (*f)(void *arg),
   4.408 @@ -294,113 +285,109 @@ static struct task_struct *kthread_creat
   4.409  						 const char *name,
   4.410  						 int cpu)
   4.411  {
   4.412 -    struct task_struct *p;
   4.413 -    p = kthread_create(f, arg, name);
   4.414 -    kthread_bind(p, cpu);
   4.415 -    wake_up_process(p);
   4.416 -    return p;
   4.417 +	struct task_struct *p;
   4.418 +	p = kthread_create(f, arg, name);
   4.419 +	kthread_bind(p, cpu);
   4.420 +	wake_up_process(p);
   4.421 +	return p;
   4.422  }
   4.423  
   4.424  static void __shutdown_handler(void *unused)
   4.425  {
   4.426 -    int err;
   4.427 +	int err;
   4.428  
   4.429 -    if ( shutting_down != SHUTDOWN_SUSPEND )
   4.430 -    {
   4.431 -        err = kernel_thread(shutdown_process, NULL, CLONE_FS | CLONE_FILES);
   4.432 -        if ( err < 0 )
   4.433 -            printk(KERN_ALERT "Error creating shutdown process!\n");
   4.434 -    }
   4.435 -    else
   4.436 -    {
   4.437 -	kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
   4.438 -    }
   4.439 +	if (shutting_down != SHUTDOWN_SUSPEND) {
   4.440 +		err = kernel_thread(shutdown_process, NULL,
   4.441 +				    CLONE_FS | CLONE_FILES);
   4.442 +		if ( err < 0 )
   4.443 +			printk(KERN_ALERT "Error creating shutdown "
   4.444 +			       "process!\n");
   4.445 +	} else {
   4.446 +		kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
   4.447 +	}
   4.448  }
   4.449  
   4.450  static void shutdown_handler(struct xenbus_watch *watch, const char *node)
   4.451  {
   4.452 -    static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
   4.453 -    char *str;
   4.454 -    int err;
   4.455 +	static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
   4.456 +	char *str;
   4.457 +	int err;
   4.458  
   4.459   again:
   4.460 -    err = xenbus_transaction_start();
   4.461 -    if (err)
   4.462 -	return;
   4.463 -    str = (char *)xenbus_read("control", "shutdown", NULL);
   4.464 -    /* Ignore read errors and empty reads. */
   4.465 -    if (XENBUS_IS_ERR_READ(str)) {
   4.466 -	xenbus_transaction_end(1);
   4.467 -	return;
   4.468 -    }
   4.469 +	err = xenbus_transaction_start();
   4.470 +	if (err)
   4.471 +		return;
   4.472 +	str = (char *)xenbus_read("control", "shutdown", NULL);
   4.473 +	/* Ignore read errors and empty reads. */
   4.474 +	if (XENBUS_IS_ERR_READ(str)) {
   4.475 +		xenbus_transaction_end(1);
   4.476 +		return;
   4.477 +	}
   4.478  
   4.479 -    xenbus_write("control", "shutdown", "");
   4.480 +	xenbus_write("control", "shutdown", "");
   4.481  
   4.482 -    err = xenbus_transaction_end(0);
   4.483 -    if (err == -EAGAIN) {
   4.484 +	err = xenbus_transaction_end(0);
   4.485 +	if (err == -EAGAIN) {
   4.486 +		kfree(str);
   4.487 +		goto again;
   4.488 +	}
   4.489 +
   4.490 +	if (strcmp(str, "poweroff") == 0)
   4.491 +		shutting_down = SHUTDOWN_POWEROFF;
   4.492 +	else if (strcmp(str, "reboot") == 0)
   4.493 +		shutting_down = SHUTDOWN_REBOOT;
   4.494 +	else if (strcmp(str, "suspend") == 0)
   4.495 +		shutting_down = SHUTDOWN_SUSPEND;
   4.496 +	else {
   4.497 +		printk("Ignoring shutdown request: %s\n", str);
   4.498 +		shutting_down = SHUTDOWN_INVALID;
   4.499 +	}
   4.500 +
   4.501  	kfree(str);
   4.502 -	goto again;
   4.503 -    }
   4.504  
   4.505 -    if (strcmp(str, "poweroff") == 0)
   4.506 -        shutting_down = SHUTDOWN_POWEROFF;
   4.507 -    else if (strcmp(str, "reboot") == 0)
   4.508 -        shutting_down = SHUTDOWN_REBOOT;
   4.509 -    else if (strcmp(str, "suspend") == 0)
   4.510 -        shutting_down = SHUTDOWN_SUSPEND;
   4.511 -    else {
   4.512 -        printk("Ignoring shutdown request: %s\n", str);
   4.513 -        shutting_down = SHUTDOWN_INVALID;
   4.514 -    }
   4.515 -
   4.516 -    kfree(str);
   4.517 -
   4.518 -    if (shutting_down != SHUTDOWN_INVALID)
   4.519 -        schedule_work(&shutdown_work);
   4.520 +	if (shutting_down != SHUTDOWN_INVALID)
   4.521 +		schedule_work(&shutdown_work);
   4.522  }
   4.523  
   4.524  #ifdef CONFIG_MAGIC_SYSRQ
   4.525  static void sysrq_handler(struct xenbus_watch *watch, const char *node)
   4.526  {
   4.527 -    char sysrq_key = '\0';
   4.528 -    int err;
   4.529 +	char sysrq_key = '\0';
   4.530 +	int err;
   4.531  
   4.532   again:
   4.533 -    err = xenbus_transaction_start();
   4.534 -    if (err)
   4.535 -	return;
   4.536 -    if (!xenbus_scanf("control", "sysrq", "%c", &sysrq_key)) {
   4.537 -        printk(KERN_ERR "Unable to read sysrq code in control/sysrq\n");
   4.538 -	xenbus_transaction_end(1);
   4.539 -	return;
   4.540 -    }
   4.541 +	err = xenbus_transaction_start();
   4.542 +	if (err)
   4.543 +		return;
   4.544 +	if (!xenbus_scanf("control", "sysrq", "%c", &sysrq_key)) {
   4.545 +		printk(KERN_ERR "Unable to read sysrq code in "
   4.546 +		       "control/sysrq\n");
   4.547 +		xenbus_transaction_end(1);
   4.548 +		return;
   4.549 +	}
   4.550  
   4.551 -    if (sysrq_key != '\0')
   4.552 -	xenbus_printf("control", "sysrq", "%c", '\0');
   4.553 -
   4.554 -    err = xenbus_transaction_end(0);
   4.555 -    if (err == -EAGAIN)
   4.556 -	goto again;
   4.557 +	if (sysrq_key != '\0')
   4.558 +		xenbus_printf("control", "sysrq", "%c", '\0');
   4.559  
   4.560 -    if (sysrq_key != '\0') {
   4.561 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
   4.562 -        handle_sysrq(sysrq_key, NULL, NULL);
   4.563 -#else
   4.564 -        handle_sysrq(sysrq_key, NULL, NULL, NULL);
   4.565 -#endif
   4.566 -    }
   4.567 +	err = xenbus_transaction_end(0);
   4.568 +	if (err == -EAGAIN)
   4.569 +		goto again;
   4.570 +
   4.571 +	if (sysrq_key != '\0') {
   4.572 +		handle_sysrq(sysrq_key, NULL, NULL);
   4.573 +	}
   4.574  }
   4.575  #endif
   4.576  
   4.577  static struct xenbus_watch shutdown_watch = {
   4.578 -    .node = "control/shutdown",
   4.579 -    .callback = shutdown_handler
   4.580 +	.node = "control/shutdown",
   4.581 +	.callback = shutdown_handler
   4.582  };
   4.583  
   4.584  #ifdef CONFIG_MAGIC_SYSRQ
   4.585  static struct xenbus_watch sysrq_watch = {
   4.586 -    .node ="control/sysrq",
   4.587 -    .callback = sysrq_handler
   4.588 +	.node ="control/sysrq",
   4.589 +	.callback = sysrq_handler
   4.590  };
   4.591  #endif
   4.592  
   4.593 @@ -413,39 +400,50 @@ static int setup_shutdown_watcher(struct
   4.594                                    unsigned long event,
   4.595                                    void *data)
   4.596  {
   4.597 -    int err1 = 0;
   4.598 +	int err1 = 0;
   4.599  #ifdef CONFIG_MAGIC_SYSRQ
   4.600 -    int err2 = 0;
   4.601 +	int err2 = 0;
   4.602  #endif
   4.603  
   4.604 -    BUG_ON(down_trylock(&xenbus_lock) == 0);
   4.605 +	BUG_ON(down_trylock(&xenbus_lock) == 0);
   4.606  
   4.607 -    err1 = register_xenbus_watch(&shutdown_watch);
   4.608 +	err1 = register_xenbus_watch(&shutdown_watch);
   4.609  #ifdef CONFIG_MAGIC_SYSRQ
   4.610 -    err2 = register_xenbus_watch(&sysrq_watch);
   4.611 +	err2 = register_xenbus_watch(&sysrq_watch);
   4.612  #endif
   4.613  
   4.614 -    if (err1) {
   4.615 -        printk(KERN_ERR "Failed to set shutdown watcher\n");
   4.616 -    }
   4.617 +	if (err1) {
   4.618 +		printk(KERN_ERR "Failed to set shutdown watcher\n");
   4.619 +	}
   4.620      
   4.621  #ifdef CONFIG_MAGIC_SYSRQ
   4.622 -    if (err2) {
   4.623 -        printk(KERN_ERR "Failed to set sysrq watcher\n");
   4.624 -    }
   4.625 +	if (err2) {
   4.626 +		printk(KERN_ERR "Failed to set sysrq watcher\n");
   4.627 +	}
   4.628  #endif
   4.629  
   4.630 -    return NOTIFY_DONE;
   4.631 +	return NOTIFY_DONE;
   4.632  }
   4.633  
   4.634  static int __init setup_shutdown_event(void)
   4.635  {
   4.636      
   4.637 -    xenstore_notifier.notifier_call = setup_shutdown_watcher;
   4.638 +	xenstore_notifier.notifier_call = setup_shutdown_watcher;
   4.639  
   4.640 -    register_xenstore_notifier(&xenstore_notifier);
   4.641 +	register_xenstore_notifier(&xenstore_notifier);
   4.642      
   4.643 -    return 0;
   4.644 +	return 0;
   4.645  }
   4.646  
   4.647  subsys_initcall(setup_shutdown_event);
   4.648 +
   4.649 +/*
   4.650 + * Local variables:
   4.651 + *  c-file-style: "linux"
   4.652 + *  indent-tabs-mode: t
   4.653 + *  c-indent-level: 8
   4.654 + *  c-basic-offset: 8
   4.655 + *  tab-width: 8
   4.656 + * End:
   4.657 + */
   4.658 +#
     5.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/smp.c	Thu Sep 29 09:59:46 2005 +0100
     5.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/smp.c	Thu Sep 29 11:10:27 2005 +0100
     5.3 @@ -11,6 +11,15 @@
     5.4  int setup_profiling_timer(unsigned int multiplier)
     5.5  {
     5.6  	printk("setup_profiling_timer\n");
     5.7 -
     5.8  	return 0;
     5.9  }
    5.10 +
    5.11 +/*
    5.12 + * Local variables:
    5.13 + *  c-file-style: "linux"
    5.14 + *  indent-tabs-mode: t
    5.15 + *  c-indent-level: 8
    5.16 + *  c-basic-offset: 8
    5.17 + *  tab-width: 8
    5.18 + * End:
    5.19 + */
     6.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c	Thu Sep 29 09:59:46 2005 +0100
     6.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/xen_proc.c	Thu Sep 29 11:10:27 2005 +0100
     6.3 @@ -6,13 +6,23 @@ static struct proc_dir_entry *xen_base;
     6.4  
     6.5  struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
     6.6  {
     6.7 -    if ( xen_base == NULL )
     6.8 -        if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
     6.9 -            panic("Couldn't create /proc/xen");
    6.10 -    return create_proc_entry(name, mode, xen_base);
    6.11 +	if ( xen_base == NULL )
    6.12 +		if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
    6.13 +			panic("Couldn't create /proc/xen");
    6.14 +	return create_proc_entry(name, mode, xen_base);
    6.15  }
    6.16  
    6.17  void remove_xen_proc_entry(const char *name)
    6.18  {
    6.19 -    remove_proc_entry(name, xen_base);
    6.20 +	remove_proc_entry(name, xen_base);
    6.21  }
    6.22 +
    6.23 +/*
    6.24 + * Local variables:
    6.25 + *  c-file-style: "linux"
    6.26 + *  indent-tabs-mode: t
    6.27 + *  c-indent-level: 8
    6.28 + *  c-basic-offset: 8
    6.29 + *  tab-width: 8
    6.30 + * End:
    6.31 + */
     7.1 --- a/linux-2.6-xen-sparse/include/asm-xen/balloon.h	Thu Sep 29 09:59:46 2005 +0100
     7.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/balloon.h	Thu Sep 29 11:10:27 2005 +0100
     7.3 @@ -58,3 +58,13 @@ extern spinlock_t balloon_lock;
     7.4  #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
     7.5  
     7.6  #endif /* __ASM_BALLOON_H__ */
     7.7 +
     7.8 +/*
     7.9 + * Local variables:
    7.10 + *  c-file-style: "linux"
    7.11 + *  indent-tabs-mode: t
    7.12 + *  c-indent-level: 8
    7.13 + *  c-basic-offset: 8
    7.14 + *  tab-width: 8
    7.15 + * End:
    7.16 + */
     8.1 --- a/linux-2.6-xen-sparse/include/asm-xen/driver_util.h	Thu Sep 29 09:59:46 2005 +0100
     8.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/driver_util.h	Thu Sep 29 11:10:27 2005 +0100
     8.3 @@ -14,3 +14,13 @@ extern void lock_vm_area(struct vm_struc
     8.4  extern void unlock_vm_area(struct vm_struct *area);
     8.5  
     8.6  #endif /* __ASM_XEN_DRIVER_UTIL_H__ */
     8.7 +
     8.8 +/*
     8.9 + * Local variables:
    8.10 + *  c-file-style: "linux"
    8.11 + *  indent-tabs-mode: t
    8.12 + *  c-indent-level: 8
    8.13 + *  c-basic-offset: 8
    8.14 + *  tab-width: 8
    8.15 + * End:
    8.16 + */
     9.1 --- a/linux-2.6-xen-sparse/include/asm-xen/evtchn.h	Thu Sep 29 09:59:46 2005 +0100
     9.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h	Thu Sep 29 11:10:27 2005 +0100
     9.3 @@ -4,7 +4,7 @@
     9.4   * Communication via Xen event channels.
     9.5   * Also definitions for the device that demuxes notifications to userspace.
     9.6   * 
     9.7 - * Copyright (c) 2004, K A Fraser
     9.8 + * Copyright (c) 2004-2005, K A Fraser
     9.9   * 
    9.10   * This file may be distributed separately from the Linux kernel, or
    9.11   * incorporated into other software packages, subject to the following license:
    9.12 @@ -61,11 +61,11 @@ extern void unbind_evtchn_from_irq(unsig
    9.13   * You *cannot* trust the irq argument passed to the callback handler.
    9.14   */
    9.15  extern int  bind_evtchn_to_irqhandler(
    9.16 -    unsigned int evtchn,
    9.17 -    irqreturn_t (*handler)(int, void *, struct pt_regs *),
    9.18 -    unsigned long irqflags,
    9.19 -    const char *devname,
    9.20 -    void *dev_id);
    9.21 +	unsigned int evtchn,
    9.22 +	irqreturn_t (*handler)(int, void *, struct pt_regs *),
    9.23 +	unsigned long irqflags,
    9.24 +	const char *devname,
    9.25 +	void *dev_id);
    9.26  extern void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id);
    9.27  
    9.28  extern void irq_suspend(void);
    9.29 @@ -79,42 +79,42 @@ void evtchn_device_upcall(int port);
    9.30  
    9.31  static inline void mask_evtchn(int port)
    9.32  {
    9.33 -    shared_info_t *s = HYPERVISOR_shared_info;
    9.34 -    synch_set_bit(port, &s->evtchn_mask[0]);
    9.35 +	shared_info_t *s = HYPERVISOR_shared_info;
    9.36 +	synch_set_bit(port, &s->evtchn_mask[0]);
    9.37  }
    9.38  
    9.39  static inline void unmask_evtchn(int port)
    9.40  {
    9.41 -    shared_info_t *s = HYPERVISOR_shared_info;
    9.42 -    vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
    9.43 -
    9.44 -    synch_clear_bit(port, &s->evtchn_mask[0]);
    9.45 +	shared_info_t *s = HYPERVISOR_shared_info;
    9.46 +	vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
    9.47  
    9.48 -    /*
    9.49 -     * The following is basically the equivalent of 'hw_resend_irq'. Just like
    9.50 -     * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
    9.51 -     */
    9.52 -    if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
    9.53 -         !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel) )
    9.54 -    {
    9.55 -        vcpu_info->evtchn_upcall_pending = 1;
    9.56 -        if ( !vcpu_info->evtchn_upcall_mask )
    9.57 -            force_evtchn_callback();
    9.58 -    }
    9.59 +	synch_clear_bit(port, &s->evtchn_mask[0]);
    9.60 +
    9.61 +	/*
    9.62 +	 * The following is basically the equivalent of 'hw_resend_irq'. Just
    9.63 +	 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
    9.64 +	 * masked.
    9.65 +	 */
    9.66 +	if (synch_test_bit         (port,    &s->evtchn_pending[0]) && 
    9.67 +	    !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel)) {
    9.68 +		vcpu_info->evtchn_upcall_pending = 1;
    9.69 +		if (!vcpu_info->evtchn_upcall_mask)
    9.70 +			force_evtchn_callback();
    9.71 +	}
    9.72  }
    9.73  
    9.74  static inline void clear_evtchn(int port)
    9.75  {
    9.76 -    shared_info_t *s = HYPERVISOR_shared_info;
    9.77 -    synch_clear_bit(port, &s->evtchn_pending[0]);
    9.78 +	shared_info_t *s = HYPERVISOR_shared_info;
    9.79 +	synch_clear_bit(port, &s->evtchn_pending[0]);
    9.80  }
    9.81  
    9.82  static inline int notify_via_evtchn(int port)
    9.83  {
    9.84 -    evtchn_op_t op;
    9.85 -    op.cmd = EVTCHNOP_send;
    9.86 -    op.u.send.local_port = port;
    9.87 -    return HYPERVISOR_event_channel_op(&op);
    9.88 +	evtchn_op_t op;
    9.89 +	op.cmd = EVTCHNOP_send;
    9.90 +	op.u.send.local_port = port;
    9.91 +	return HYPERVISOR_event_channel_op(&op);
    9.92  }
    9.93  
    9.94  /*
    9.95 @@ -133,3 +133,13 @@ static inline int notify_via_evtchn(int 
    9.96  #define EVTCHN_UNBIND _IO('E', 3)
    9.97  
    9.98  #endif /* __ASM_EVTCHN_H__ */
    9.99 +
   9.100 +/*
   9.101 + * Local variables:
   9.102 + *  c-file-style: "linux"
   9.103 + *  indent-tabs-mode: t
   9.104 + *  c-indent-level: 8
   9.105 + *  c-basic-offset: 8
   9.106 + *  tab-width: 8
   9.107 + * End:
   9.108 + */
    10.1 --- a/linux-2.6-xen-sparse/include/asm-xen/foreign_page.h	Thu Sep 29 09:59:46 2005 +0100
    10.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/foreign_page.h	Thu Sep 29 11:10:27 2005 +0100
    10.3 @@ -28,3 +28,13 @@
    10.4  	( (void (*) (struct page *)) (page)->mapping )
    10.5  
    10.6  #endif /* __ASM_XEN_FOREIGN_PAGE_H__ */
    10.7 +
    10.8 +/*
    10.9 + * Local variables:
   10.10 + *  c-file-style: "linux"
   10.11 + *  indent-tabs-mode: t
   10.12 + *  c-indent-level: 8
   10.13 + *  c-basic-offset: 8
   10.14 + *  tab-width: 8
   10.15 + * End:
   10.16 + */
    11.1 --- a/linux-2.6-xen-sparse/include/asm-xen/gnttab.h	Thu Sep 29 09:59:46 2005 +0100
    11.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/gnttab.h	Thu Sep 29 11:10:27 2005 +0100
    11.3 @@ -6,7 +6,7 @@
    11.4   * 2. Accessing others' memory reservations via grant references.
    11.5   * (i.e., mechanisms for both sender and recipient of grant references)
    11.6   * 
    11.7 - * Copyright (c) 2004, K A Fraser
    11.8 + * Copyright (c) 2004-2005, K A Fraser
    11.9   * Copyright (c) 2005, Christopher Clark
   11.10   */
   11.11  
   11.12 @@ -25,10 +25,10 @@
   11.13  #endif
   11.14  
   11.15  struct gnttab_free_callback {
   11.16 -    struct gnttab_free_callback *next;
   11.17 -    void (*fn)(void *);
   11.18 -    void *arg;
   11.19 -    u16 count;
   11.20 +	struct gnttab_free_callback *next;
   11.21 +	void (*fn)(void *);
   11.22 +	void *arg;
   11.23 +	u16 count;
   11.24  };
   11.25  
   11.26  int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
   11.27 @@ -73,3 +73,13 @@ void gnttab_grant_foreign_transfer_ref(g
   11.28  #endif
   11.29  
   11.30  #endif /* __ASM_GNTTAB_H__ */
   11.31 +
   11.32 +/*
   11.33 + * Local variables:
   11.34 + *  c-file-style: "linux"
   11.35 + *  indent-tabs-mode: t
   11.36 + *  c-indent-level: 8
   11.37 + *  c-basic-offset: 8
   11.38 + *  tab-width: 8
   11.39 + * End:
   11.40 + */
    12.1 --- a/linux-2.6-xen-sparse/include/asm-xen/queues.h	Thu Sep 29 09:59:46 2005 +0100
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,81 +0,0 @@
    12.4 -
    12.5 -/*
    12.6 - * Oh dear. Task queues were removed from Linux 2.6 and replaced by work 
    12.7 - * queues. Unfortunately the semantics is not the same. With task queues we 
    12.8 - * can defer work until a particular event occurs -- this is not
    12.9 - * straightforwardly done with work queues (queued work is performed asap, or
   12.10 - * after some fixed timeout). Conversely, work queues are a (slightly) neater
   12.11 - * way of deferring work to a process context than using task queues in 2.4.
   12.12 - * 
   12.13 - * This is a bit of a needless reimplementation -- should have just pulled
   12.14 - * the code from 2.4, but I tried leveraging work queues to simplify things.
   12.15 - * They didn't help. :-(
   12.16 - */
   12.17 -
   12.18 -#ifndef __QUEUES_H__
   12.19 -#define __QUEUES_H__
   12.20 -
   12.21 -#include <linux/version.h>
   12.22 -#include <linux/list.h>
   12.23 -#include <linux/workqueue.h>
   12.24 -
   12.25 -struct tq_struct { 
   12.26 -    void (*fn)(void *);
   12.27 -    void *arg;
   12.28 -    struct list_head list;
   12.29 -    unsigned long pending;
   12.30 -};
   12.31 -#define INIT_TQUEUE(_name, _fn, _arg)               \
   12.32 -    do {                                            \
   12.33 -        INIT_LIST_HEAD(&(_name)->list);             \
   12.34 -        (_name)->pending = 0;                       \
   12.35 -        (_name)->fn = (_fn); (_name)->arg = (_arg); \
   12.36 -    } while ( 0 )
   12.37 -#define DECLARE_TQUEUE(_name, _fn, _arg)            \
   12.38 -    struct tq_struct _name = { (_fn), (_arg), LIST_HEAD_INIT((_name).list), 0 }
   12.39 -
   12.40 -typedef struct {
   12.41 -    struct list_head list;
   12.42 -    spinlock_t       lock;
   12.43 -} task_queue;
   12.44 -#define DECLARE_TASK_QUEUE(_name) \
   12.45 -    task_queue _name = { LIST_HEAD_INIT((_name).list), SPIN_LOCK_UNLOCKED }
   12.46 -
   12.47 -static inline int queue_task(struct tq_struct *tqe, task_queue *tql)
   12.48 -{
   12.49 -    unsigned long flags;
   12.50 -    if ( test_and_set_bit(0, &tqe->pending) )
   12.51 -        return 0;
   12.52 -    spin_lock_irqsave(&tql->lock, flags);
   12.53 -    list_add_tail(&tqe->list, &tql->list);
   12.54 -    spin_unlock_irqrestore(&tql->lock, flags);
   12.55 -    return 1;
   12.56 -}
   12.57 -
   12.58 -static inline void run_task_queue(task_queue *tql)
   12.59 -{
   12.60 -    struct list_head head, *ent;
   12.61 -    struct tq_struct *tqe;
   12.62 -    unsigned long flags;
   12.63 -    void (*fn)(void *);
   12.64 -    void *arg;
   12.65 -
   12.66 -    spin_lock_irqsave(&tql->lock, flags);
   12.67 -    list_add(&head, &tql->list);
   12.68 -    list_del_init(&tql->list);
   12.69 -    spin_unlock_irqrestore(&tql->lock, flags);
   12.70 -
   12.71 -    while ( !list_empty(&head) )
   12.72 -    {
   12.73 -        ent = head.next;
   12.74 -        list_del_init(ent);
   12.75 -        tqe = list_entry(ent, struct tq_struct, list);
   12.76 -        fn  = tqe->fn;
   12.77 -        arg = tqe->arg;
   12.78 -        wmb();
   12.79 -        tqe->pending = 0;
   12.80 -        fn(arg);
   12.81 -    }
   12.82 -}
   12.83 -
   12.84 -#endif /* __QUEUES_H__ */
    13.1 --- a/linux-2.6-xen-sparse/include/asm-xen/xen_proc.h	Thu Sep 29 09:59:46 2005 +0100
    13.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/xen_proc.h	Thu Sep 29 11:10:27 2005 +0100
    13.3 @@ -6,8 +6,18 @@
    13.4  #include <linux/proc_fs.h>
    13.5  
    13.6  extern struct proc_dir_entry *create_xen_proc_entry(
    13.7 -    const char *name, mode_t mode);
    13.8 +	const char *name, mode_t mode);
    13.9  extern void remove_xen_proc_entry(
   13.10 -    const char *name);
   13.11 +	const char *name);
   13.12  
   13.13  #endif /* __ASM_XEN_PROC_H__ */
   13.14 +
   13.15 +/*
   13.16 + * Local variables:
   13.17 + *  c-file-style: "linux"
   13.18 + *  indent-tabs-mode: t
   13.19 + *  c-indent-level: 8
   13.20 + *  c-basic-offset: 8
   13.21 + *  tab-width: 8
   13.22 + * End:
   13.23 + */
    14.1 --- a/linux-2.6-xen-sparse/include/asm-xen/xenbus.h	Thu Sep 29 09:59:46 2005 +0100
    14.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/xenbus.h	Thu Sep 29 11:10:27 2005 +0100
    14.3 @@ -139,3 +139,13 @@ void xenbus_resume(void);
    14.4  #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
    14.5  
    14.6  #endif /* _ASM_XEN_XENBUS_H */
    14.7 +
    14.8 +/*
    14.9 + * Local variables:
   14.10 + *  c-file-style: "linux"
   14.11 + *  indent-tabs-mode: t
   14.12 + *  c-indent-level: 8
   14.13 + *  c-basic-offset: 8
   14.14 + *  tab-width: 8
   14.15 + * End:
   14.16 + */