ia64/xen-unstable

changeset 1506:729cac1fb14e

bitkeeper revision 1.983 (40d46e62pNngJp16CZ2sqZwmplr_Kw)

More cleanups.
author kaf24@scramble.cl.cam.ac.uk
date Sat Jun 19 16:48:34 2004 +0000 (2004-06-19)
parents ead91151a0e6
children b535339f6ed0
files .rootkeys xen/arch/x86/apic.c xen/arch/x86/nmi.c xen/arch/x86/pci-irq.c xen/arch/x86/process.c xen/arch/x86/setup.c xen/arch/x86/smp.c xen/arch/x86/smpboot.c xen/arch/x86/time.c xen/arch/x86/traps.c xen/common/ac_timer.c xen/common/debug.c xen/common/dom0_ops.c xen/common/domain.c xen/common/event_channel.c xen/common/kernel.c xen/common/keyhandler.c xen/common/memory.c xen/common/sched_bvt.c xen/common/schedule.c xen/common/shadow.c xen/common/softirq.c xen/drivers/char/serial.c xen/include/xen/event.h xen/include/xen/interrupt.h xen/include/xen/irq.h xen/include/xen/sched.h xen/include/xen/smp.h xen/include/xen/softirq.h
line diff
     1.1 --- a/.rootkeys	Fri Jun 18 14:46:29 2004 +0000
     1.2 +++ b/.rootkeys	Sat Jun 19 16:48:34 2004 +0000
     1.3 @@ -465,7 +465,6 @@ 3ddb79c2O729EttZTYu1c8LcsUO_GQ xen/inclu
     1.4  3ddb79c0HIghfBF8zFUdmXhOU8i6hA xen/include/xen/errno.h
     1.5  3ddb79c1W0lQca8gRV7sN6j3iY4Luw xen/include/xen/event.h
     1.6  3ddb79c0GurNF9tDWqQbAwJFH8ugfA xen/include/xen/init.h
     1.7 -3ddb79c1Vi5VleJAOKHAlY0G2zAsgw xen/include/xen/interrupt.h
     1.8  3ddb79c1nzaWu8NoF4xCCMSFJR4MlA xen/include/xen/ioport.h
     1.9  3ddb79c2qAxCOABlkKtD8Txohe-qEw xen/include/xen/irq.h
    1.10  3ddb79c2b3qe-6Ann09FqZBF4IrJaQ xen/include/xen/irq_cpustat.h
    1.11 @@ -487,6 +486,7 @@ 403a06a7H0hpHcKpAiDe5BPnaXWTlA xen/inclu
    1.12  405b8599BsDsDwKEJLS0XipaiQW3TA xen/include/xen/shadow.h
    1.13  3ddb79c14dXIhP7C2ahnoD08K90G_w xen/include/xen/slab.h
    1.14  3ddb79c09xbS-xxfKxuV3JETIhBzmg xen/include/xen/smp.h
    1.15 +3ddb79c1Vi5VleJAOKHAlY0G2zAsgw xen/include/xen/softirq.h
    1.16  3ddb79c2iIcESrDAB8samy_yAh6olQ xen/include/xen/spinlock.h
    1.17  3e7f358aMtFMUVvN_Zjg5qvEJIqEBA xen/include/xen/string.h
    1.18  3ddb79c0BnA20PbgmuMPSGIBljNRQw xen/include/xen/time.h
     2.1 --- a/xen/arch/x86/apic.c	Fri Jun 18 14:46:29 2004 +0000
     2.2 +++ b/xen/arch/x86/apic.c	Sat Jun 19 16:48:34 2004 +0000
     2.3 @@ -14,24 +14,24 @@
     2.4  
     2.5  
     2.6  #include <xen/config.h>
     2.7 +#include <xen/ac_timer.h>
     2.8 +#include <xen/perfc.h>
     2.9 +#include <xen/errno.h>
    2.10  #include <xen/init.h>
    2.11 +#include <xen/mm.h>
    2.12  #include <xen/sched.h>
    2.13  #include <xen/irq.h>
    2.14  #include <xen/delay.h>
    2.15 +#include <xen/smp.h>
    2.16 +#include <xen/softirq.h>
    2.17  #include <asm/mc146818rtc.h>
    2.18  #include <asm/msr.h>
    2.19 -#include <xen/errno.h>
    2.20  #include <asm/atomic.h>
    2.21 -#include <xen/smp.h>
    2.22 -#include <xen/interrupt.h>
    2.23  #include <asm/mpspec.h>
    2.24  #include <asm/flushtlb.h>
    2.25  #include <asm/hardirq.h>
    2.26  #include <asm/apic.h>
    2.27 -#include <xen/mm.h>
    2.28  #include <asm/io_apic.h>
    2.29 -#include <xen/ac_timer.h>
    2.30 -#include <xen/perfc.h>
    2.31  
    2.32  
    2.33  /* Using APIC to generate smp_local_timer_interrupt? */
    2.34 @@ -726,14 +726,12 @@ unsigned int apic_timer_irqs [NR_CPUS];
    2.35  
    2.36  void smp_apic_timer_interrupt(struct pt_regs * regs)
    2.37  {
    2.38 -    int cpu = smp_processor_id();
    2.39 -
    2.40      ack_APIC_irq();
    2.41  
    2.42 -    apic_timer_irqs[cpu]++;
    2.43 +    apic_timer_irqs[smp_processor_id()]++;
    2.44      perfc_incrc(apic_timer);
    2.45  
    2.46 -    __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
    2.47 +    raise_softirq(AC_TIMER_SOFTIRQ);
    2.48  }
    2.49  
    2.50  /*
     3.1 --- a/xen/arch/x86/nmi.c	Fri Jun 18 14:46:29 2004 +0000
     3.2 +++ b/xen/arch/x86/nmi.c	Sat Jun 19 16:48:34 2004 +0000
     3.3 @@ -18,10 +18,8 @@
     3.4  #include <xen/mm.h>
     3.5  #include <xen/irq.h>
     3.6  #include <xen/delay.h>
     3.7 -#include <xen/interrupt.h>
     3.8  #include <xen/time.h>
     3.9  #include <xen/sched.h>
    3.10 -
    3.11  #include <asm/mc146818rtc.h>
    3.12  #include <asm/smp.h>
    3.13  #include <asm/msr.h>
     4.1 --- a/xen/arch/x86/pci-irq.c	Fri Jun 18 14:46:29 2004 +0000
     4.2 +++ b/xen/arch/x86/pci-irq.c	Sat Jun 19 16:48:34 2004 +0000
     4.3 @@ -10,13 +10,10 @@
     4.4  #include <xen/pci.h>
     4.5  #include <xen/init.h>
     4.6  #include <xen/slab.h>
     4.7 -#include <xen/interrupt.h>
     4.8  #include <xen/irq.h>
     4.9 -
    4.10  #include <asm/io.h>
    4.11  #include <asm/smp.h>
    4.12  #include <asm/io_apic.h>
    4.13 -
    4.14  #include "pci-x86.h"
    4.15  
    4.16  #define PIRQ_SIGNATURE	(('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
     5.1 --- a/xen/arch/x86/process.c	Fri Jun 18 14:46:29 2004 +0000
     5.2 +++ b/xen/arch/x86/process.c	Sat Jun 19 16:48:34 2004 +0000
     5.3 @@ -17,11 +17,10 @@
     5.4  #include <xen/errno.h>
     5.5  #include <xen/sched.h>
     5.6  #include <xen/smp.h>
     5.7 +#include <xen/delay.h>
     5.8 +#include <xen/softirq.h>
     5.9  #include <asm/ptrace.h>
    5.10 -#include <xen/delay.h>
    5.11 -#include <xen/interrupt.h>
    5.12  #include <asm/mc146818rtc.h>
    5.13 -
    5.14  #include <asm/system.h>
    5.15  #include <asm/io.h>
    5.16  #include <asm/processor.h>
    5.17 @@ -77,7 +76,7 @@ void startup_cpu_idle_loop(void)
    5.18  {
    5.19      /* Just some sanity to ensure that the scheduler is set up okay. */
    5.20      ASSERT(current->domain == IDLE_DOMAIN_ID);
    5.21 -    domain_controller_unpause(current);
    5.22 +    domain_start(current);
    5.23      __enter_scheduler();
    5.24  
    5.25      /*
     6.1 --- a/xen/arch/x86/setup.c	Fri Jun 18 14:46:29 2004 +0000
     6.2 +++ b/xen/arch/x86/setup.c	Sat Jun 19 16:48:34 2004 +0000
     6.3 @@ -1,11 +1,11 @@
     6.4  
     6.5  #include <xen/config.h>
     6.6  #include <xen/init.h>
     6.7 -#include <xen/interrupt.h>
     6.8  #include <xen/lib.h>
     6.9  #include <xen/sched.h>
    6.10  #include <xen/pci.h>
    6.11  #include <xen/serial.h>
    6.12 +#include <xen/softirq.h>
    6.13  #include <xen/acpi.h>
    6.14  #include <asm/bitops.h>
    6.15  #include <asm/smp.h>
     7.1 --- a/xen/arch/x86/smp.c	Fri Jun 18 14:46:29 2004 +0000
     7.2 +++ b/xen/arch/x86/smp.c	Sat Jun 19 16:48:34 2004 +0000
     7.3 @@ -305,7 +305,9 @@ void flush_tlb_all_pge(void)
     7.4  
     7.5  void smp_send_event_check_mask(unsigned long cpu_mask)
     7.6  {
     7.7 -    send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
     7.8 +    cpu_mask &= ~(1<<smp_processor_id());
     7.9 +    if ( cpu_mask != 0 )
    7.10 +        send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
    7.11  }
    7.12  
    7.13  /*
     8.1 --- a/xen/arch/x86/smpboot.c	Fri Jun 18 14:46:29 2004 +0000
     8.2 +++ b/xen/arch/x86/smpboot.c	Sat Jun 19 16:48:34 2004 +0000
     8.3 @@ -34,7 +34,6 @@
     8.4  
     8.5  #include <xen/config.h>
     8.6  #include <xen/init.h>
     8.7 -#include <xen/interrupt.h>
     8.8  #include <xen/irq.h>
     8.9  #include <xen/mm.h>
    8.10  #include <xen/slab.h>
     9.1 --- a/xen/arch/x86/time.c	Fri Jun 18 14:46:29 2004 +0000
     9.2 +++ b/xen/arch/x86/time.c	Sat Jun 19 16:48:34 2004 +0000
     9.3 @@ -19,13 +19,12 @@
     9.4  #include <xen/lib.h>
     9.5  #include <xen/config.h>
     9.6  #include <xen/init.h>
     9.7 -#include <xen/interrupt.h>
     9.8  #include <xen/time.h>
     9.9  #include <xen/ac_timer.h>
    9.10 -
    9.11 -#include <asm/io.h>
    9.12  #include <xen/smp.h>
    9.13  #include <xen/irq.h>
    9.14 +#include <xen/softirq.h>
    9.15 +#include <asm/io.h>
    9.16  #include <asm/msr.h>
    9.17  #include <asm/mpspec.h>
    9.18  #include <asm/processor.h>
    9.19 @@ -93,7 +92,7 @@ static void timer_interrupt(int irq, voi
    9.20  
    9.21      /* Rough hack to allow accurate timers to sort-of-work with no APIC. */
    9.22      if ( do_timer_lists_from_pit )
    9.23 -        __cpu_raise_softirq(smp_processor_id(), AC_TIMER_SOFTIRQ);
    9.24 +        raise_softirq(AC_TIMER_SOFTIRQ);
    9.25  }
    9.26  
    9.27  static struct irqaction irq0 = { timer_interrupt, "timer", NULL};
    10.1 --- a/xen/arch/x86/traps.c	Fri Jun 18 14:46:29 2004 +0000
    10.2 +++ b/xen/arch/x86/traps.c	Sat Jun 19 16:48:34 2004 +0000
    10.3 @@ -29,7 +29,6 @@
    10.4  
    10.5  #include <xen/config.h>
    10.6  #include <xen/init.h>
    10.7 -#include <xen/interrupt.h>
    10.8  #include <xen/sched.h>
    10.9  #include <xen/lib.h>
   10.10  #include <xen/errno.h>
    11.1 --- a/xen/common/ac_timer.c	Fri Jun 18 14:46:29 2004 +0000
    11.2 +++ b/xen/common/ac_timer.c	Sat Jun 19 16:48:34 2004 +0000
    11.3 @@ -21,7 +21,7 @@
    11.4  #include <xen/smp.h>
    11.5  #include <xen/perfc.h>
    11.6  #include <xen/time.h>
    11.7 -#include <xen/interrupt.h>
    11.8 +#include <xen/softirq.h>
    11.9  #include <xen/ac_timer.h>
   11.10  #include <xen/keyhandler.h>
   11.11  #include <asm/system.h>
   11.12 @@ -154,84 +154,58 @@ static int add_entry(struct ac_timer **h
   11.13   * TIMER OPERATIONS.
   11.14   */
   11.15  
   11.16 -static inline unsigned long __add_ac_timer(struct ac_timer *timer)
   11.17 +static inline void __add_ac_timer(struct ac_timer *timer)
   11.18  {
   11.19      int cpu = timer->cpu;
   11.20 -    unsigned long cpu_mask = 0;
   11.21 -
   11.22      if ( add_entry(ac_timers[cpu].heap, timer) )
   11.23 -    {
   11.24 -        __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
   11.25 -        cpu_mask = (cpu != smp_processor_id()) ? 1<<cpu : 0;
   11.26 -    }
   11.27 -
   11.28 -    return cpu_mask;
   11.29 +        cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
   11.30  }
   11.31  
   11.32  void add_ac_timer(struct ac_timer *timer) 
   11.33  {
   11.34      int           cpu = timer->cpu;
   11.35 -    unsigned long flags, cpu_mask;
   11.36 +    unsigned long flags;
   11.37  
   11.38      spin_lock_irqsave(&ac_timers[cpu].lock, flags);
   11.39      ASSERT(timer != NULL);
   11.40      ASSERT(!active_ac_timer(timer));
   11.41 -    cpu_mask = __add_ac_timer(timer);
   11.42 +    __add_ac_timer(timer);
   11.43      spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
   11.44 -
   11.45 -    if ( cpu_mask ) 
   11.46 -        smp_send_event_check_mask(cpu_mask);
   11.47  }
   11.48  
   11.49  
   11.50 -static inline unsigned long __rem_ac_timer(struct ac_timer *timer)
   11.51 +static inline void __rem_ac_timer(struct ac_timer *timer)
   11.52  {
   11.53      int cpu = timer->cpu;
   11.54 -    unsigned long cpu_mask = 0;
   11.55 -
   11.56      if ( remove_entry(ac_timers[cpu].heap, timer) )
   11.57 -    {
   11.58 -        __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
   11.59 -        cpu_mask = (cpu != smp_processor_id()) ? 1<<cpu : 0;
   11.60 -    }
   11.61 -
   11.62 -    return cpu_mask;
   11.63 +        cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
   11.64  }
   11.65  
   11.66  void rem_ac_timer(struct ac_timer *timer)
   11.67  {
   11.68      int           cpu = timer->cpu;
   11.69 -    unsigned long flags, cpu_mask = 0;
   11.70 +    unsigned long flags;
   11.71  
   11.72      spin_lock_irqsave(&ac_timers[cpu].lock, flags);
   11.73      ASSERT(timer != NULL);
   11.74      if ( active_ac_timer(timer) )
   11.75 -        cpu_mask = __rem_ac_timer(timer);
   11.76 +        __rem_ac_timer(timer);
   11.77      spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
   11.78 -
   11.79 -    if ( cpu_mask ) 
   11.80 -        smp_send_event_check_mask(cpu_mask);
   11.81  }
   11.82  
   11.83  
   11.84  void mod_ac_timer(struct ac_timer *timer, s_time_t new_time)
   11.85  {
   11.86      int           cpu = timer->cpu;
   11.87 -    unsigned long flags, cpu_mask = 0;
   11.88 +    unsigned long flags;
   11.89  
   11.90      spin_lock_irqsave(&ac_timers[cpu].lock, flags);
   11.91 -
   11.92      ASSERT(timer != NULL);
   11.93 -
   11.94      if ( active_ac_timer(timer) )
   11.95 -        cpu_mask = __rem_ac_timer(timer);
   11.96 +        __rem_ac_timer(timer);
   11.97      timer->expires = new_time;
   11.98 -    cpu_mask |= __add_ac_timer(timer);
   11.99 -
  11.100 +    __add_ac_timer(timer);
  11.101      spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
  11.102 -
  11.103 -    if ( cpu_mask ) 
  11.104 -        smp_send_event_check_mask(cpu_mask);
  11.105  }
  11.106  
  11.107  
    12.1 --- a/xen/common/debug.c	Fri Jun 18 14:46:29 2004 +0000
    12.2 +++ b/xen/common/debug.c	Sat Jun 19 16:48:34 2004 +0000
    12.3 @@ -49,11 +49,11 @@ void pdb_do_debug (dom0_op_t *op)
    12.4      {
    12.5          case 'c' :
    12.6  	{
    12.7 -	    struct domain *p = find_domain_by_id(op->u.debug.domain);
    12.8 -	    if ( p != NULL )
    12.9 +	    struct domain *d = find_domain_by_id(op->u.debug.domain);
   12.10 +	    if ( d != NULL )
   12.11  	    {
   12.12 -                domain_controller_unpause(p);
   12.13 -		put_domain(p);
   12.14 +                domain_start(d);
   12.15 +		put_domain(d);
   12.16  	    }
   12.17  	    else
   12.18  	    {
   12.19 @@ -66,13 +66,13 @@ void pdb_do_debug (dom0_op_t *op)
   12.20              int loop;
   12.21              u_char x;
   12.22  	    unsigned long cr3;
   12.23 -	    struct domain *p;
   12.24 +	    struct domain *d;
   12.25  
   12.26 -	    p = find_domain_by_id(op->u.debug.domain);
   12.27 -	    if (p->mm.shadow_mode)
   12.28 -	      cr3 = pagetable_val(p->mm.shadow_table);
   12.29 +	    d = find_domain_by_id(op->u.debug.domain);
   12.30 +	    if ( d->mm.shadow_mode )
   12.31 +	      cr3 = pagetable_val(d->mm.shadow_table);
   12.32  	    else
   12.33 -	      cr3 = pagetable_val(p->mm.pagetable);
   12.34 +	      cr3 = pagetable_val(d->mm.pagetable);
   12.35  
   12.36              for (loop = 0; loop < op->u.debug.in2; loop++)         /* length */
   12.37              { 
   12.38 @@ -85,17 +85,17 @@ void pdb_do_debug (dom0_op_t *op)
   12.39                  printk (" %02x", x);
   12.40              }
   12.41              printk ("\n");
   12.42 -	    put_domain(p);
   12.43 +	    put_domain(d);
   12.44              break;
   12.45          }
   12.46          case 's' :
   12.47  	{
   12.48 -	    struct domain * p = find_domain_by_id(op->u.debug.domain);
   12.49 +	    struct domain *d = find_domain_by_id(op->u.debug.domain);
   12.50  
   12.51 -	    if (p != NULL)
   12.52 +	    if ( d != NULL )
   12.53  	    {
   12.54 -                domain_controller_pause(p);
   12.55 -		put_domain(p);
   12.56 +                domain_stop(d);
   12.57 +		put_domain(d);
   12.58  	    }
   12.59  	    else
   12.60  	    {
   12.61 @@ -109,5 +109,4 @@ void pdb_do_debug (dom0_op_t *op)
   12.62  		   op->u.debug.opcode, op->u.debug.opcode);
   12.63  	}
   12.64      }
   12.65 -    return;
   12.66  }
    13.1 --- a/xen/common/dom0_ops.c	Fri Jun 18 14:46:29 2004 +0000
    13.2 +++ b/xen/common/dom0_ops.c	Sat Jun 19 16:48:34 2004 +0000
    13.3 @@ -89,7 +89,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    13.4              ret = -EINVAL;
    13.5              if ( test_bit(DF_CONSTRUCTED, &d->flags) )
    13.6              {
    13.7 -                domain_controller_unpause(d);
    13.8 +                domain_start(d);
    13.9                  ret = 0;
   13.10              }
   13.11              put_domain(d);
   13.12 @@ -103,7 +103,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   13.13          ret = -ESRCH;
   13.14          if ( d != NULL )
   13.15          {
   13.16 -            domain_controller_pause(d);
   13.17 +            domain_stop(d);
   13.18              put_domain(d);
   13.19              ret = 0;
   13.20          }
   13.21 @@ -196,14 +196,14 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   13.22              {
   13.23                  if ( cpu == -1 )
   13.24                  {
   13.25 -                    p->cpupinned = 0;
   13.26 +                    clear_bit(DF_CPUPINNED, &p->flags);
   13.27                  }
   13.28                  else
   13.29                  {
   13.30                      domain_pause(p);
   13.31 +                    set_bit(DF_CPUPINNED, &p->flags);
   13.32                      cpu = cpu % smp_num_cpus;
   13.33                      p->processor = cpu;
   13.34 -                    p->cpupinned = 1;                    
   13.35                      domain_unpause(p);
   13.36                  }
   13.37                  put_domain(p);
   13.38 @@ -295,14 +295,18 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   13.39                  op->u.getdomaininfo.flags = DOMSTATE_CRASHED;
   13.40              else if ( test_bit(DF_SUSPENDED, &p->flags) )
   13.41                  op->u.getdomaininfo.flags = DOMSTATE_SUSPENDED;
   13.42 -            else if ( test_bit(DF_CONTROLPAUSE, &p->flags) )
   13.43 +            else if ( test_bit(DF_STOPPED, &p->flags) )
   13.44                  op->u.getdomaininfo.flags = DOMSTATE_PAUSED;
   13.45              else if ( test_bit(DF_BLOCKED, &p->flags) )
   13.46                  op->u.getdomaininfo.flags = DOMSTATE_BLOCKED;
   13.47 +            else if ( test_bit(DF_RUNNING, &p->flags) )
   13.48 +            {
   13.49 +                op->u.getdomaininfo.flags = DOMSTATE_RUNNING;
   13.50 +                dump_state = 1;
   13.51 +            }
   13.52              else
   13.53              {
   13.54 -                op->u.getdomaininfo.flags = 
   13.55 -                    p->has_cpu ? DOMSTATE_RUNNING : DOMSTATE_RUNNABLE;
   13.56 +                op->u.getdomaininfo.flags = DOMSTATE_RUNNABLE;
   13.57                  dump_state = 1;
   13.58              }
   13.59  
    14.1 --- a/xen/common/domain.c	Fri Jun 18 14:46:29 2004 +0000
    14.2 +++ b/xen/common/domain.c	Sat Jun 19 16:48:34 2004 +0000
    14.3 @@ -5,19 +5,18 @@
    14.4  #include <xen/errno.h>
    14.5  #include <xen/sched.h>
    14.6  #include <xen/mm.h>
    14.7 -#include <xen/interrupt.h>
    14.8  #include <xen/delay.h>
    14.9  #include <xen/event.h>
   14.10  #include <xen/time.h>
   14.11  #include <xen/shadow.h>
   14.12 -#include <hypervisor-ifs/dom0_ops.h>
   14.13 +#include <xen/console.h>
   14.14 +#include <xen/shadow.h>
   14.15  #include <asm/io.h>
   14.16  #include <asm/domain_page.h>
   14.17  #include <asm/flushtlb.h>
   14.18  #include <asm/msr.h>
   14.19 -#include <xen/console.h>
   14.20  #include <asm/i387.h>
   14.21 -#include <xen/shadow.h>
   14.22 +#include <hypervisor-ifs/dom0_ops.h>
   14.23  
   14.24  #if defined(__x86_64__)
   14.25  #define ELFSIZE 64
   14.26 @@ -185,7 +184,7 @@ void domain_crash(void)
   14.27      struct domain *d;
   14.28  
   14.29      set_bit(DF_CRASHED, &current->flags);
   14.30 -    
   14.31 +
   14.32      d = find_domain_by_id(0);
   14.33      send_guest_virq(d, VIRQ_DOM_EXC);
   14.34      put_domain(d);
    15.1 --- a/xen/common/event_channel.c	Fri Jun 18 14:46:29 2004 +0000
    15.2 +++ b/xen/common/event_channel.c	Sat Jun 19 16:48:34 2004 +0000
    15.3 @@ -29,13 +29,13 @@
    15.4  #define INIT_EVENT_CHANNELS   16
    15.5  #define MAX_EVENT_CHANNELS  1024
    15.6  
    15.7 -static int get_free_port(struct domain *p)
    15.8 +static int get_free_port(struct domain *d)
    15.9  {
   15.10      int max, port;
   15.11      event_channel_t *chn;
   15.12  
   15.13 -    max = p->max_event_channel;
   15.14 -    chn = p->event_channel;
   15.15 +    max = d->max_event_channel;
   15.16 +    chn = d->event_channel;
   15.17  
   15.18      for ( port = 0; port < max; port++ )
   15.19          if ( chn[port].state == ECS_FREE )
   15.20 @@ -54,14 +54,14 @@ static int get_free_port(struct domain *
   15.21  
   15.22          memset(chn, 0, max * sizeof(event_channel_t));
   15.23  
   15.24 -        if ( p->event_channel != NULL )
   15.25 +        if ( d->event_channel != NULL )
   15.26          {
   15.27 -            memcpy(chn, p->event_channel, (max/2) * sizeof(event_channel_t));
   15.28 -            kfree(p->event_channel);
   15.29 +            memcpy(chn, d->event_channel, (max/2) * sizeof(event_channel_t));
   15.30 +            kfree(d->event_channel);
   15.31          }
   15.32  
   15.33 -        p->event_channel     = chn;
   15.34 -        p->max_event_channel = max;
   15.35 +        d->event_channel     = chn;
   15.36 +        d->max_event_channel = max;
   15.37      }
   15.38  
   15.39      return port;
   15.40 @@ -69,10 +69,10 @@ static int get_free_port(struct domain *
   15.41  
   15.42  static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
   15.43  {
   15.44 -    struct domain *p1, *p2;
   15.45 -    int                 port1 = 0, port2 = 0;
   15.46 -    domid_t             dom1 = bind->dom1, dom2 = bind->dom2;
   15.47 -    long                rc = 0;
   15.48 +    struct domain *d1, *d2;
   15.49 +    int            port1 = 0, port2 = 0;
   15.50 +    domid_t        dom1 = bind->dom1, dom2 = bind->dom2;
   15.51 +    long           rc = 0;
   15.52  
   15.53      if ( !IS_PRIV(current) )
   15.54          return -EPERM;
   15.55 @@ -82,60 +82,60 @@ static long evtchn_bind_interdomain(evtc
   15.56      if ( dom2 == DOMID_SELF )
   15.57          dom2 = current->domain;
   15.58  
   15.59 -    if ( ((p1 = find_domain_by_id(dom1)) == NULL) ||
   15.60 -         ((p2 = find_domain_by_id(dom2)) == NULL) )
   15.61 +    if ( ((d1 = find_domain_by_id(dom1)) == NULL) ||
   15.62 +         ((d2 = find_domain_by_id(dom2)) == NULL) )
   15.63      {
   15.64 -        if ( p1 != NULL )
   15.65 -            put_domain(p1);
   15.66 +        if ( d1 != NULL )
   15.67 +            put_domain(d1);
   15.68          return -ESRCH;
   15.69      }
   15.70  
   15.71      /* Avoid deadlock by first acquiring lock of domain with smaller id. */
   15.72      if ( dom1 < dom2 )
   15.73      {
   15.74 -        spin_lock(&p1->event_channel_lock);
   15.75 -        spin_lock(&p2->event_channel_lock);
   15.76 +        spin_lock(&d1->event_channel_lock);
   15.77 +        spin_lock(&d2->event_channel_lock);
   15.78      }
   15.79      else
   15.80      {
   15.81 -        if ( p1 != p2 )
   15.82 -            spin_lock(&p2->event_channel_lock);
   15.83 -        spin_lock(&p1->event_channel_lock);
   15.84 +        if ( d1 != d2 )
   15.85 +            spin_lock(&d2->event_channel_lock);
   15.86 +        spin_lock(&d1->event_channel_lock);
   15.87      }
   15.88  
   15.89 -    if ( (port1 = get_free_port(p1)) < 0 )
   15.90 +    if ( (port1 = get_free_port(d1)) < 0 )
   15.91      {
   15.92          rc = port1;
   15.93          goto out;
   15.94      }
   15.95  
   15.96      /* 'Allocate' port1 before searching for a free port2. */
   15.97 -    p1->event_channel[port1].state = ECS_INTERDOMAIN;
   15.98 +    d1->event_channel[port1].state = ECS_INTERDOMAIN;
   15.99  
  15.100 -    if ( (port2 = get_free_port(p2)) < 0 )
  15.101 +    if ( (port2 = get_free_port(d2)) < 0 )
  15.102      {
  15.103 -        p1->event_channel[port1].state = ECS_FREE;
  15.104 +        d1->event_channel[port1].state = ECS_FREE;
  15.105          rc = port2;
  15.106          goto out;
  15.107      }
  15.108  
  15.109 -    p1->event_channel[port1].u.remote.dom  = p2;
  15.110 -    p1->event_channel[port1].u.remote.port = (u16)port2;
  15.111 +    d1->event_channel[port1].u.remote.dom  = d2;
  15.112 +    d1->event_channel[port1].u.remote.port = (u16)port2;
  15.113  
  15.114 -    p2->event_channel[port2].u.remote.dom  = p1;
  15.115 -    p2->event_channel[port2].u.remote.port = (u16)port1;
  15.116 -    p2->event_channel[port2].state         = ECS_INTERDOMAIN;
  15.117 +    d2->event_channel[port2].u.remote.dom  = d1;
  15.118 +    d2->event_channel[port2].u.remote.port = (u16)port1;
  15.119 +    d2->event_channel[port2].state         = ECS_INTERDOMAIN;
  15.120  
  15.121 -    evtchn_set_pending(p1, port1);
  15.122 -    evtchn_set_pending(p2, port2);
  15.123 +    evtchn_set_pending(d1, port1);
  15.124 +    evtchn_set_pending(d2, port2);
  15.125      
  15.126   out:
  15.127 -    spin_unlock(&p1->event_channel_lock);
  15.128 -    if ( p1 != p2 )
  15.129 -        spin_unlock(&p2->event_channel_lock);
  15.130 +    spin_unlock(&d1->event_channel_lock);
  15.131 +    if ( d1 != d2 )
  15.132 +        spin_unlock(&d2->event_channel_lock);
  15.133      
  15.134 -    put_domain(p1);
  15.135 -    put_domain(p2);
  15.136 +    put_domain(d1);
  15.137 +    put_domain(d2);
  15.138  
  15.139      bind->port1 = port1;
  15.140      bind->port2 = port2;
  15.141 @@ -146,32 +146,31 @@ static long evtchn_bind_interdomain(evtc
  15.142  
  15.143  static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
  15.144  {
  15.145 -    struct domain *p = current;
  15.146 -    int virq = bind->virq;
  15.147 -    int port;
  15.148 +    struct domain *d = current;
  15.149 +    int            port, virq = bind->virq;
  15.150  
  15.151 -    if ( virq >= ARRAY_SIZE(p->virq_to_evtchn) )
  15.152 +    if ( virq >= ARRAY_SIZE(d->virq_to_evtchn) )
  15.153          return -EINVAL;
  15.154  
  15.155 -    spin_lock(&p->event_channel_lock);
  15.156 +    spin_lock(&d->event_channel_lock);
  15.157  
  15.158      /*
  15.159       * Port 0 is the fallback port for VIRQs that haven't been explicitly
  15.160       * bound yet. The exception is the 'misdirect VIRQ', which is permanently 
  15.161       * bound to port 0.
  15.162       */
  15.163 -    if ( ((port = p->virq_to_evtchn[virq]) != 0) ||
  15.164 +    if ( ((port = d->virq_to_evtchn[virq]) != 0) ||
  15.165           (virq == VIRQ_MISDIRECT) ||
  15.166 -         ((port = get_free_port(p)) < 0) )
  15.167 +         ((port = get_free_port(d)) < 0) )
  15.168          goto out;
  15.169  
  15.170 -    p->event_channel[port].state  = ECS_VIRQ;
  15.171 -    p->event_channel[port].u.virq = virq;
  15.172 +    d->event_channel[port].state  = ECS_VIRQ;
  15.173 +    d->event_channel[port].u.virq = virq;
  15.174  
  15.175 -    p->virq_to_evtchn[virq] = port;
  15.176 +    d->virq_to_evtchn[virq] = port;
  15.177  
  15.178   out:
  15.179 -    spin_unlock(&p->event_channel_lock);
  15.180 +    spin_unlock(&d->event_channel_lock);
  15.181  
  15.182      if ( port < 0 )
  15.183          return port;
  15.184 @@ -183,34 +182,33 @@ static long evtchn_bind_virq(evtchn_bind
  15.185  
  15.186  static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
  15.187  {
  15.188 -    struct domain *p = current;
  15.189 -    int pirq = bind->pirq;
  15.190 -    int port, rc;
  15.191 +    struct domain *d = current;
  15.192 +    int            port, rc, pirq = bind->pirq;
  15.193  
  15.194 -    if ( pirq >= ARRAY_SIZE(p->pirq_to_evtchn) )
  15.195 +    if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
  15.196          return -EINVAL;
  15.197  
  15.198 -    spin_lock(&p->event_channel_lock);
  15.199 +    spin_lock(&d->event_channel_lock);
  15.200  
  15.201 -    if ( ((rc = port = p->pirq_to_evtchn[pirq]) != 0) ||
  15.202 -         ((rc = port = get_free_port(p)) < 0) )
  15.203 +    if ( ((rc = port = d->pirq_to_evtchn[pirq]) != 0) ||
  15.204 +         ((rc = port = get_free_port(d)) < 0) )
  15.205          goto out;
  15.206  
  15.207 -    p->pirq_to_evtchn[pirq] = port;
  15.208 -    rc = pirq_guest_bind(p, pirq, 
  15.209 +    d->pirq_to_evtchn[pirq] = port;
  15.210 +    rc = pirq_guest_bind(d, pirq, 
  15.211                           !!(bind->flags & BIND_PIRQ__WILL_SHARE));
  15.212      if ( rc != 0 )
  15.213      {
  15.214 -        p->pirq_to_evtchn[pirq] = 0;
  15.215 +        d->pirq_to_evtchn[pirq] = 0;
  15.216          DPRINTK("Couldn't bind to PIRQ %d (error=%d)\n", pirq, rc);
  15.217          goto out;
  15.218      }
  15.219  
  15.220 -    p->event_channel[port].state  = ECS_PIRQ;
  15.221 -    p->event_channel[port].u.pirq = pirq;
  15.222 +    d->event_channel[port].state  = ECS_PIRQ;
  15.223 +    d->event_channel[port].u.pirq = pirq;
  15.224  
  15.225   out:
  15.226 -    spin_unlock(&p->event_channel_lock);
  15.227 +    spin_unlock(&d->event_channel_lock);
  15.228  
  15.229      if ( rc < 0 )
  15.230          return rc;
  15.231 @@ -220,20 +218,20 @@ static long evtchn_bind_pirq(evtchn_bind
  15.232  }
  15.233  
  15.234  
  15.235 -static long __evtchn_close(struct domain *p1, int port1)
  15.236 +static long __evtchn_close(struct domain *d1, int port1)
  15.237  {
  15.238 -    struct domain *p2 = NULL;
  15.239 -    event_channel_t    *chn1, *chn2;
  15.240 -    int                 port2;
  15.241 -    long                rc = 0;
  15.242 +    struct domain   *d2 = NULL;
  15.243 +    event_channel_t *chn1, *chn2;
  15.244 +    int              port2;
  15.245 +    long             rc = 0;
  15.246  
  15.247   again:
  15.248 -    spin_lock(&p1->event_channel_lock);
  15.249 +    spin_lock(&d1->event_channel_lock);
  15.250  
  15.251 -    chn1 = p1->event_channel;
  15.252 +    chn1 = d1->event_channel;
  15.253  
  15.254      /* NB. Port 0 is special (VIRQ_MISDIRECT). Never let it be closed. */
  15.255 -    if ( (port1 <= 0) || (port1 >= p1->max_event_channel) )
  15.256 +    if ( (port1 <= 0) || (port1 >= d1->max_event_channel) )
  15.257      {
  15.258          rc = -EINVAL;
  15.259          goto out;
  15.260 @@ -249,59 +247,59 @@ static long __evtchn_close(struct domain
  15.261          break;
  15.262  
  15.263      case ECS_PIRQ:
  15.264 -        if ( (rc = pirq_guest_unbind(p1, chn1[port1].u.pirq)) == 0 )
  15.265 -            p1->pirq_to_evtchn[chn1[port1].u.pirq] = 0;
  15.266 +        if ( (rc = pirq_guest_unbind(d1, chn1[port1].u.pirq)) == 0 )
  15.267 +            d1->pirq_to_evtchn[chn1[port1].u.pirq] = 0;
  15.268          break;
  15.269  
  15.270      case ECS_VIRQ:
  15.271 -        p1->virq_to_evtchn[chn1[port1].u.virq] = 0;
  15.272 +        d1->virq_to_evtchn[chn1[port1].u.virq] = 0;
  15.273          break;
  15.274  
  15.275      case ECS_INTERDOMAIN:
  15.276 -        if ( p2 == NULL )
  15.277 +        if ( d2 == NULL )
  15.278          {
  15.279 -            p2 = chn1[port1].u.remote.dom;
  15.280 +            d2 = chn1[port1].u.remote.dom;
  15.281  
  15.282 -            /* If we unlock p1 then we could lose p2. Must get a reference. */
  15.283 -            if ( unlikely(!get_domain(p2)) )
  15.284 +            /* If we unlock d1 then we could lose d2. Must get a reference. */
  15.285 +            if ( unlikely(!get_domain(d2)) )
  15.286              {
  15.287                  /*
  15.288 -                 * Failed to obtain a reference. No matter: p2 must be dying
  15.289 +                 * Failed to obtain a reference. No matter: d2 must be dying
  15.290                   * and so will close this event channel for us.
  15.291                   */
  15.292 -                p2 = NULL;
  15.293 +                d2 = NULL;
  15.294                  goto out;
  15.295              }
  15.296  
  15.297 -            if ( p1->domain < p2->domain )
  15.298 +            if ( d1->domain < d2->domain )
  15.299              {
  15.300 -                spin_lock(&p2->event_channel_lock);
  15.301 +                spin_lock(&d2->event_channel_lock);
  15.302              }
  15.303 -            else if ( p1 != p2 )
  15.304 +            else if ( d1 != d2 )
  15.305              {
  15.306 -                spin_unlock(&p1->event_channel_lock);
  15.307 -                spin_lock(&p2->event_channel_lock);
  15.308 +                spin_unlock(&d1->event_channel_lock);
  15.309 +                spin_lock(&d2->event_channel_lock);
  15.310                  goto again;
  15.311              }
  15.312          }
  15.313 -        else if ( p2 != chn1[port1].u.remote.dom )
  15.314 +        else if ( d2 != chn1[port1].u.remote.dom )
  15.315          {
  15.316              rc = -EINVAL;
  15.317              goto out;
  15.318          }
  15.319      
  15.320 -        chn2  = p2->event_channel;
  15.321 +        chn2  = d2->event_channel;
  15.322          port2 = chn1[port1].u.remote.port;
  15.323  
  15.324 -        if ( port2 >= p2->max_event_channel )
  15.325 +        if ( port2 >= d2->max_event_channel )
  15.326              BUG();
  15.327          if ( chn2[port2].state != ECS_INTERDOMAIN )
  15.328              BUG();
  15.329 -        if ( chn2[port2].u.remote.dom != p1 )
  15.330 +        if ( chn2[port2].u.remote.dom != d1 )
  15.331              BUG();
  15.332  
  15.333          chn2[port2].state = ECS_UNBOUND;
  15.334 -        evtchn_set_exception(p2, port2);
  15.335 +        evtchn_set_exception(d2, port2);
  15.336  
  15.337          break;
  15.338  
  15.339 @@ -310,17 +308,17 @@ static long __evtchn_close(struct domain
  15.340      }
  15.341  
  15.342      chn1[port1].state = ECS_FREE;
  15.343 -    evtchn_set_exception(p1, port1);
  15.344 +    evtchn_set_exception(d1, port1);
  15.345  
  15.346   out:
  15.347 -    if ( p2 != NULL )
  15.348 +    if ( d2 != NULL )
  15.349      {
  15.350 -        if ( p1 != p2 )
  15.351 -            spin_unlock(&p2->event_channel_lock);
  15.352 -        put_domain(p2);
  15.353 +        if ( d1 != d2 )
  15.354 +            spin_unlock(&d2->event_channel_lock);
  15.355 +        put_domain(d2);
  15.356      }
  15.357      
  15.358 -    spin_unlock(&p1->event_channel_lock);
  15.359 +    spin_unlock(&d1->event_channel_lock);
  15.360  
  15.361      return rc;
  15.362  }
  15.363 @@ -328,46 +326,46 @@ static long __evtchn_close(struct domain
  15.364  
  15.365  static long evtchn_close(evtchn_close_t *close)
  15.366  {
  15.367 -    struct domain *p;
  15.368 -    long                rc;
  15.369 -    domid_t             dom = close->dom;
  15.370 +    struct domain *d;
  15.371 +    long           rc;
  15.372 +    domid_t        dom = close->dom;
  15.373  
  15.374      if ( dom == DOMID_SELF )
  15.375          dom = current->domain;
  15.376      else if ( !IS_PRIV(current) )
  15.377          return -EPERM;
  15.378  
  15.379 -    if ( (p = find_domain_by_id(dom)) == NULL )
  15.380 +    if ( (d = find_domain_by_id(dom)) == NULL )
  15.381          return -ESRCH;
  15.382  
  15.383 -    rc = __evtchn_close(p, close->port);
  15.384 +    rc = __evtchn_close(d, close->port);
  15.385  
  15.386 -    put_domain(p);
  15.387 +    put_domain(d);
  15.388      return rc;
  15.389  }
  15.390  
  15.391  
  15.392  static long evtchn_send(int lport)
  15.393  {
  15.394 -    struct domain *lp = current, *rp;
  15.395 -    int                 rport;
  15.396 +    struct domain *ld = current, *rd;
  15.397 +    int            rport;
  15.398  
  15.399 -    spin_lock(&lp->event_channel_lock);
  15.400 +    spin_lock(&ld->event_channel_lock);
  15.401  
  15.402      if ( unlikely(lport < 0) ||
  15.403 -         unlikely(lport >= lp->max_event_channel) || 
  15.404 -         unlikely(lp->event_channel[lport].state != ECS_INTERDOMAIN) )
  15.405 +         unlikely(lport >= ld->max_event_channel) || 
  15.406 +         unlikely(ld->event_channel[lport].state != ECS_INTERDOMAIN) )
  15.407      {
  15.408 -        spin_unlock(&lp->event_channel_lock);
  15.409 +        spin_unlock(&ld->event_channel_lock);
  15.410          return -EINVAL;
  15.411      }
  15.412  
  15.413 -    rp    = lp->event_channel[lport].u.remote.dom;
  15.414 -    rport = lp->event_channel[lport].u.remote.port;
  15.415 +    rd    = ld->event_channel[lport].u.remote.dom;
  15.416 +    rport = ld->event_channel[lport].u.remote.port;
  15.417  
  15.418 -    evtchn_set_pending(rp, rport);
  15.419 +    evtchn_set_pending(rd, rport);
  15.420  
  15.421 -    spin_unlock(&lp->event_channel_lock);
  15.422 +    spin_unlock(&ld->event_channel_lock);
  15.423  
  15.424      return 0;
  15.425  }
  15.426 @@ -375,25 +373,25 @@ static long evtchn_send(int lport)
  15.427  
  15.428  static long evtchn_status(evtchn_status_t *status)
  15.429  {
  15.430 -    struct domain *p;
  15.431 -    domid_t             dom = status->dom;
  15.432 -    int                 port = status->port;
  15.433 -    event_channel_t    *chn;
  15.434 -    long                rc = 0;
  15.435 +    struct domain   *d;
  15.436 +    domid_t          dom = status->dom;
  15.437 +    int              port = status->port;
  15.438 +    event_channel_t *chn;
  15.439 +    long             rc = 0;
  15.440  
  15.441      if ( dom == DOMID_SELF )
  15.442          dom = current->domain;
  15.443      else if ( !IS_PRIV(current) )
  15.444          return -EPERM;
  15.445  
  15.446 -    if ( (p = find_domain_by_id(dom)) == NULL )
  15.447 +    if ( (d = find_domain_by_id(dom)) == NULL )
  15.448          return -ESRCH;
  15.449  
  15.450 -    spin_lock(&p->event_channel_lock);
  15.451 +    spin_lock(&d->event_channel_lock);
  15.452  
  15.453 -    chn = p->event_channel;
  15.454 +    chn = d->event_channel;
  15.455  
  15.456 -    if ( (port < 0) || (port >= p->max_event_channel) )
  15.457 +    if ( (port < 0) || (port >= d->max_event_channel) )
  15.458      {
  15.459          rc = -EINVAL;
  15.460          goto out;
  15.461 @@ -425,8 +423,8 @@ static long evtchn_status(evtchn_status_
  15.462      }
  15.463  
  15.464   out:
  15.465 -    spin_unlock(&p->event_channel_lock);
  15.466 -    put_domain(p);
  15.467 +    spin_unlock(&d->event_channel_lock);
  15.468 +    put_domain(d);
  15.469      return rc;
  15.470  }
  15.471  
  15.472 @@ -482,28 +480,28 @@ long do_event_channel_op(evtchn_op_t *uo
  15.473  }
  15.474  
  15.475  
  15.476 -int init_event_channels(struct domain *p)
  15.477 +int init_event_channels(struct domain *d)
  15.478  {
  15.479 -    spin_lock_init(&p->event_channel_lock);
  15.480 -    p->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t), 
  15.481 +    spin_lock_init(&d->event_channel_lock);
  15.482 +    d->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t), 
  15.483                                 GFP_KERNEL);
  15.484 -    if ( unlikely(p->event_channel == NULL) )
  15.485 +    if ( unlikely(d->event_channel == NULL) )
  15.486          return -ENOMEM;
  15.487 -    p->max_event_channel = INIT_EVENT_CHANNELS;
  15.488 -    memset(p->event_channel, 0, INIT_EVENT_CHANNELS * sizeof(event_channel_t));
  15.489 -    p->event_channel[0].state  = ECS_VIRQ;
  15.490 -    p->event_channel[0].u.virq = VIRQ_MISDIRECT;
  15.491 +    d->max_event_channel = INIT_EVENT_CHANNELS;
  15.492 +    memset(d->event_channel, 0, INIT_EVENT_CHANNELS * sizeof(event_channel_t));
  15.493 +    d->event_channel[0].state  = ECS_VIRQ;
  15.494 +    d->event_channel[0].u.virq = VIRQ_MISDIRECT;
  15.495      return 0;
  15.496  }
  15.497  
  15.498  
  15.499 -void destroy_event_channels(struct domain *p)
  15.500 +void destroy_event_channels(struct domain *d)
  15.501  {
  15.502      int i;
  15.503 -    if ( p->event_channel != NULL )
  15.504 +    if ( d->event_channel != NULL )
  15.505      {
  15.506 -        for ( i = 0; i < p->max_event_channel; i++ )
  15.507 -            (void)__evtchn_close(p, i);
  15.508 -        kfree(p->event_channel);
  15.509 +        for ( i = 0; i < d->max_event_channel; i++ )
  15.510 +            (void)__evtchn_close(d, i);
  15.511 +        kfree(d->event_channel);
  15.512      }
  15.513  }
    16.1 --- a/xen/common/kernel.c	Fri Jun 18 14:46:29 2004 +0000
    16.2 +++ b/xen/common/kernel.c	Sat Jun 19 16:48:34 2004 +0000
    16.3 @@ -16,7 +16,6 @@
    16.4  #include <xen/sched.h>
    16.5  #include <xen/mm.h>
    16.6  #include <xen/delay.h>
    16.7 -#include <xen/interrupt.h>
    16.8  #include <xen/compile.h>
    16.9  #include <xen/console.h>
   16.10  #include <xen/serial.h>
   16.11 @@ -287,8 +286,8 @@ void cmain(unsigned long magic, multiboo
   16.12  
   16.13      init_trace_bufs();
   16.14  
   16.15 -    domain_controller_unpause(current);
   16.16 -    domain_controller_unpause(new_dom);
   16.17 +    domain_start(current);
   16.18 +    domain_start(new_dom);
   16.19      startup_cpu_idle_loop();
   16.20  }
   16.21  
    17.1 --- a/xen/common/keyhandler.c	Fri Jun 18 14:46:29 2004 +0000
    17.2 +++ b/xen/common/keyhandler.c	Sat Jun 19 16:48:34 2004 +0000
    17.3 @@ -84,7 +84,8 @@ void do_task_queues(u_char key, void *de
    17.4      for_each_domain ( p )
    17.5      {
    17.6          printk("Xen: DOM %u, CPU %d [has=%c]\n",
    17.7 -               p->domain, p->processor, p->has_cpu ? 'T':'F'); 
    17.8 +               p->domain, p->processor, 
    17.9 +               test_bit(DF_RUNNING, &p->flags) ? 'T':'F'); 
   17.10          s = p->shared_info; 
   17.11          printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n", 
   17.12                 s->vcpu_data[0].evtchn_upcall_pending, 
    18.1 --- a/xen/common/memory.c	Fri Jun 18 14:46:29 2004 +0000
    18.2 +++ b/xen/common/memory.c	Sat Jun 19 16:48:34 2004 +0000
    18.3 @@ -132,7 +132,7 @@
    18.4  #include <xen/sched.h>
    18.5  #include <xen/errno.h>
    18.6  #include <xen/perfc.h>
    18.7 -#include <xen/interrupt.h>
    18.8 +#include <xen/irq.h>
    18.9  #include <xen/shadow.h>
   18.10  #include <asm/page.h>
   18.11  #include <asm/flushtlb.h>
    19.1 --- a/xen/common/sched_bvt.c	Fri Jun 18 14:46:29 2004 +0000
    19.2 +++ b/xen/common/sched_bvt.c	Sat Jun 19 16:48:34 2004 +0000
    19.3 @@ -22,7 +22,6 @@
    19.4  #include <xen/event.h>
    19.5  #include <xen/time.h>
    19.6  #include <xen/ac_timer.h>
    19.7 -#include <xen/interrupt.h>
    19.8  #include <xen/perfc.h>
    19.9  #include <xen/sched-if.h>
   19.10  #include <xen/slab.h>
    20.1 --- a/xen/common/schedule.c	Fri Jun 18 14:46:29 2004 +0000
    20.2 +++ b/xen/common/schedule.c	Sat Jun 19 16:48:34 2004 +0000
    20.3 @@ -22,16 +22,16 @@
    20.4  #include <xen/event.h>
    20.5  #include <xen/time.h>
    20.6  #include <xen/ac_timer.h>
    20.7 -#include <xen/interrupt.h>
    20.8  #include <xen/perfc.h>
    20.9  #include <xen/sched-if.h>
   20.10 -#include <hypervisor-ifs/sched_ctl.h>
   20.11 +#include <xen/softirq.h>
   20.12  #include <xen/trace.h>
   20.13 +#include <hypervisor-ifs/sched_ctl.h>
   20.14  
   20.15 -/*#define WAKEUP_HISTO*/
   20.16 +/*#define WAKE_HISTO*/
   20.17  /*#define BLOCKTIME_HISTO*/
   20.18  
   20.19 -#if defined(WAKEUP_HISTO)
   20.20 +#if defined(WAKE_HISTO)
   20.21  #define BUCKETS 31
   20.22  #elif defined(BLOCKTIME_HISTO)
   20.23  #define BUCKETS 200
   20.24 @@ -100,83 +100,104 @@ static struct ac_timer fallback_timer[NR
   20.25  
   20.26  extern kmem_cache_t *domain_struct_cachep;
   20.27  
   20.28 -void free_domain_struct(struct domain *p)
   20.29 +void free_domain_struct(struct domain *d)
   20.30  {
   20.31 -    SCHED_OP(free_task, p);
   20.32 -    kmem_cache_free(domain_struct_cachep, p);
   20.33 +    SCHED_OP(free_task, d);
   20.34 +    kmem_cache_free(domain_struct_cachep, d);
   20.35  }
   20.36  
   20.37  struct domain *alloc_domain_struct(void)
   20.38  {
   20.39 -    struct domain *p;
   20.40 +    struct domain *d;
   20.41  
   20.42 -    if ( (p = kmem_cache_alloc(domain_struct_cachep,GFP_KERNEL)) == NULL )
   20.43 +    if ( (d = kmem_cache_alloc(domain_struct_cachep,GFP_KERNEL)) == NULL )
   20.44          return NULL;
   20.45      
   20.46 -    memset(p, 0, sizeof(*p));
   20.47 +    memset(d, 0, sizeof(*d));
   20.48  
   20.49 -    if ( SCHED_OP(alloc_task, p) < 0 )
   20.50 +    if ( SCHED_OP(alloc_task, d) < 0 )
   20.51      {
   20.52 -        kmem_cache_free(domain_struct_cachep,p);
   20.53 +        kmem_cache_free(domain_struct_cachep, d);
   20.54          return NULL;
   20.55      }
   20.56  
   20.57 -    return p;
   20.58 +    return d;
   20.59  }
   20.60  
   20.61  /*
   20.62   * Add and remove a domain
   20.63   */
   20.64 -void sched_add_domain(struct domain *p) 
   20.65 +void sched_add_domain(struct domain *d) 
   20.66  {
   20.67 -    domain_controller_pause(p);
   20.68 +    set_bit(DF_STOPPED, &d->flags);
   20.69  
   20.70 -    if ( p->domain != IDLE_DOMAIN_ID )
   20.71 +    if ( d->domain != IDLE_DOMAIN_ID )
   20.72      {
   20.73          /* Initialise the per-domain timer. */
   20.74 -        init_ac_timer(&p->timer);
   20.75 -        p->timer.cpu      =  p->processor;
   20.76 -        p->timer.data     = (unsigned long)p;
   20.77 -        p->timer.function = &dom_timer_fn;
   20.78 +        init_ac_timer(&d->timer);
   20.79 +        d->timer.cpu      = d->processor;
   20.80 +        d->timer.data     = (unsigned long)d;
   20.81 +        d->timer.function = &dom_timer_fn;
   20.82      }
   20.83      else
   20.84      {
   20.85 -        schedule_data[p->processor].idle = p;
   20.86 +        schedule_data[d->processor].idle = d;
   20.87      }
   20.88  
   20.89 -    SCHED_OP(add_task, p);
   20.90 +    SCHED_OP(add_task, d);
   20.91  
   20.92 -    TRACE_3D(TRC_SCHED_DOM_ADD, _HIGH32(p->domain), _LOW32(p->domain), p);
   20.93 +    TRACE_3D(TRC_SCHED_DOM_ADD, _HIGH32(d->domain), _LOW32(d->domain), d);
   20.94  }
   20.95  
   20.96 -void sched_rem_domain(struct domain *p) 
   20.97 +void sched_rem_domain(struct domain *d) 
   20.98  {
   20.99 -    rem_ac_timer(&p->timer);
  20.100 -    SCHED_OP(rem_task, p);
  20.101 -    TRACE_3D(TRC_SCHED_DOM_REM, _HIGH32(p->domain), _LOW32(p->domain), p);
  20.102 +    rem_ac_timer(&d->timer);
  20.103 +    SCHED_OP(rem_task, d);
  20.104 +    TRACE_3D(TRC_SCHED_DOM_REM, _HIGH32(d->domain), _LOW32(d->domain), d);
  20.105  }
  20.106  
  20.107  void init_idle_task(void)
  20.108  {
  20.109      unsigned long flags;
  20.110 -    struct domain *p = current;
  20.111 +    struct domain *d = current;
  20.112  
  20.113 -    if ( SCHED_OP(alloc_task, p) < 0)
  20.114 +    if ( SCHED_OP(alloc_task, d) < 0)
  20.115          panic("Failed to allocate scheduler private data for idle task");
  20.116 -    SCHED_OP(add_task, p);
  20.117 +    SCHED_OP(add_task, d);
  20.118  
  20.119 -    spin_lock_irqsave(&schedule_lock[p->processor], flags);
  20.120 -    p->has_cpu = 1;
  20.121 -    if ( !__task_on_runqueue(p) )
  20.122 -        __add_to_runqueue_head(p);
  20.123 -    spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
  20.124 +    spin_lock_irqsave(&schedule_lock[d->processor], flags);
  20.125 +    set_bit(DF_RUNNING, &d->flags);
  20.126 +    if ( !__task_on_runqueue(d) )
  20.127 +        __add_to_runqueue_head(d);
  20.128 +    spin_unlock_irqrestore(&schedule_lock[d->processor], flags);
  20.129  }
  20.130  
  20.131 -/* Returns TRUE if the domain was actually woken up. */
  20.132 -int domain_wakeup(struct domain *d)
  20.133 +void domain_sleep(struct domain *d)
  20.134 +{
  20.135 +    unsigned long flags;
  20.136 +    int           cpu = d->processor;
  20.137 +
  20.138 +    spin_lock_irqsave(&schedule_lock[cpu], flags);
  20.139 +
  20.140 +    if ( test_bit(DF_RUNNING, &d->flags) )
  20.141 +        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
  20.142 +    else if ( __task_on_runqueue(d) )
  20.143 +        __del_from_runqueue(d);
  20.144 +
  20.145 +    spin_unlock_irqrestore(&schedule_lock[cpu], flags);
  20.146 +
  20.147 +    /* Synchronous. */
  20.148 +    while ( test_bit(DF_RUNNING, &d->flags) )
  20.149 +    {
  20.150 +        smp_mb();
  20.151 +        cpu_relax();
  20.152 +    }
  20.153 +}
  20.154 +
  20.155 +void domain_wake(struct domain *d)
  20.156  {
  20.157      unsigned long       flags;
  20.158 -    int                 cpu = d->processor, woken_up = 0;
  20.159 +    int                 cpu = d->processor;
  20.160      struct domain      *curr;
  20.161      s_time_t            now, min_time;
  20.162  
  20.163 @@ -184,17 +205,12 @@ int domain_wakeup(struct domain *d)
  20.164  
  20.165      if ( likely(domain_runnable(d)) && likely(!__task_on_runqueue(d)) )
  20.166      {
  20.167 -        woken_up = 1;
  20.168 -
  20.169          TRACE_3D(TRC_SCHED_WAKE, _HIGH32(d->domain), _LOW32(d->domain), d);
  20.170          SCHED_OP(wake_up, d);
  20.171 -#ifdef WAKEUP_HISTO
  20.172 +#ifdef WAKE_HISTO
  20.173          p->wokenup = NOW();
  20.174  #endif
  20.175  
  20.176 -        ASSERT(__task_on_runqueue(d));
  20.177 -        ASSERT(!d->has_cpu);
  20.178 -
  20.179          now = NOW();
  20.180          curr = schedule_data[cpu].curr;
  20.181  
  20.182 @@ -208,34 +224,8 @@ int domain_wakeup(struct domain *d)
  20.183      }
  20.184  
  20.185      spin_unlock_irqrestore(&schedule_lock[cpu], flags);
  20.186 -
  20.187 -    return woken_up;
  20.188  }
  20.189  
  20.190 -
  20.191 -void __domain_pause(struct domain *d)
  20.192 -{
  20.193 -    unsigned long flags;
  20.194 -    int           cpu = d->processor;
  20.195 -
  20.196 -    spin_lock_irqsave(&schedule_lock[cpu], flags);
  20.197 -
  20.198 -    if ( d->has_cpu )
  20.199 -        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
  20.200 -    else if ( __task_on_runqueue(d) )
  20.201 -        __del_from_runqueue(d);
  20.202 -
  20.203 -    spin_unlock_irqrestore(&schedule_lock[cpu], flags);
  20.204 -
  20.205 -    /* Synchronous. */
  20.206 -    while ( d->has_cpu )
  20.207 -    {
  20.208 -        smp_mb();
  20.209 -        cpu_relax();
  20.210 -    }
  20.211 -}
  20.212 -
  20.213 -
  20.214  /* Block the currently-executing domain until a pertinent event occurs. */
  20.215  long do_block(void)
  20.216  {
  20.217 @@ -247,9 +237,7 @@ long do_block(void)
  20.218      return 0;
  20.219  }
  20.220  
  20.221 -/*
  20.222 - * Voluntarily yield the processor for this allocation.
  20.223 - */
  20.224 +/* Voluntarily yield the processor for this allocation. */
  20.225  static long do_yield(void)
  20.226  {
  20.227      TRACE_2D(TRC_SCHED_YIELD, current->domain, current);
  20.228 @@ -394,9 +382,6 @@ void __enter_scheduler(void)
  20.229      r_time = next_slice.time;
  20.230      next = next_slice.task;
  20.231  
  20.232 -    prev->has_cpu = 0;
  20.233 -    next->has_cpu = 1;
  20.234 -
  20.235      schedule_data[cpu].curr = next;
  20.236  
  20.237      next->lastschd = now;
  20.238 @@ -416,7 +401,7 @@ void __enter_scheduler(void)
  20.239      
  20.240      perfc_incrc(sched_ctx);
  20.241  
  20.242 -#if defined(WAKEUP_HISTO)
  20.243 +#if defined(WAKE_HISTO)
  20.244      if ( !is_idle_task(next) && next->wokenup ) {
  20.245          ulong diff = (ulong)(now - next->wokenup);
  20.246          diff /= (ulong)MILLISECS(1);
  20.247 @@ -437,7 +422,16 @@ void __enter_scheduler(void)
  20.248      TRACE_2D(TRC_SCHED_SWITCH, next->domain, next);
  20.249  
  20.250      switch_to(prev, next);
  20.251 -    
  20.252 +
  20.253 +    /*
  20.254 +     * We do this late on because it doesn't need to be protected by the
  20.255 +     * schedule_lock, and because we want this to be the very last use of
  20.256 +     * 'prev' (after this point, a dying domain's info structure may be freed
  20.257 +     * without warning). 
  20.258 +     */
  20.259 +    clear_bit(DF_RUNNING, &prev->flags);
  20.260 +    set_bit(DF_RUNNING, &next->flags);
  20.261 +
  20.262      /* Mark a timer event for the newly-scheduled domain. */
  20.263      if ( !is_idle_task(next) )
  20.264          send_guest_virq(next, VIRQ_TIMER);
  20.265 @@ -578,56 +572,61 @@ static void dump_rqueue(struct list_head
  20.266  {
  20.267      struct list_head *list;
  20.268      int loop = 0;
  20.269 -    struct domain  *p;
  20.270 +    struct domain *d;
  20.271 +
  20.272 +    printk("QUEUE %s %lx   n: %lx, p: %lx\n", name,  (unsigned long)queue,
  20.273 +           (unsigned long) queue->next, (unsigned long) queue->prev);
  20.274  
  20.275 -    printk ("QUEUE %s %lx   n: %lx, p: %lx\n", name,  (unsigned long)queue,
  20.276 -            (unsigned long) queue->next, (unsigned long) queue->prev);
  20.277 -    list_for_each (list, queue) {
  20.278 -        p = list_entry(list, struct domain, run_list);
  20.279 -        printk("%3d: %u has=%c ", loop++, p->domain, p->has_cpu ? 'T':'F');
  20.280 -        SCHED_OP(dump_runq_el, p);
  20.281 -        printk("c=0x%X%08X\n", (u32)(p->cpu_time>>32), (u32)p->cpu_time);
  20.282 +    list_for_each ( list, queue )
  20.283 +    {
  20.284 +        d = list_entry(list, struct domain, run_list);
  20.285 +        printk("%3d: %u has=%c ", loop++, d->domain, 
  20.286 +               test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
  20.287 +        SCHED_OP(dump_runq_el, d);
  20.288 +        printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
  20.289          printk("         l: %lx n: %lx  p: %lx\n",
  20.290                 (unsigned long)list, (unsigned long)list->next,
  20.291                 (unsigned long)list->prev);
  20.292      }
  20.293 -    return; 
  20.294  }
  20.295  
  20.296  void dump_runq(u_char key, void *dev_id, struct pt_regs *regs)
  20.297  {
  20.298 -    u_long   flags; 
  20.299 -    s_time_t now = NOW();
  20.300 -    int i;
  20.301 +    unsigned long flags; 
  20.302 +    s_time_t      now = NOW();
  20.303 +    int           i;
  20.304  
  20.305      printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name);
  20.306      SCHED_OP(dump_settings);
  20.307      printk("NOW=0x%08X%08X\n",  (u32)(now>>32), (u32)now); 
  20.308 -    for (i = 0; i < smp_num_cpus; i++) {
  20.309 +    for ( i = 0; i < smp_num_cpus; i++ )
  20.310 +    {
  20.311          spin_lock_irqsave(&schedule_lock[i], flags);
  20.312          printk("CPU[%02d] ", i);
  20.313          SCHED_OP(dump_cpu_state,i);
  20.314          dump_rqueue(&schedule_data[i].runqueue, "rq"); 
  20.315          spin_unlock_irqrestore(&schedule_lock[i], flags);
  20.316      }
  20.317 -    return; 
  20.318  }
  20.319  
  20.320 -#if defined(WAKEUP_HISTO) || defined(BLOCKTIME_HISTO)
  20.321 +#if defined(WAKE_HISTO) || defined(BLOCKTIME_HISTO)
  20.322  void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
  20.323  {
  20.324 -    int loop, i, j;
  20.325 -    for (loop = 0; loop < smp_num_cpus; loop++) {
  20.326 +    int i, j, k;
  20.327 +    for ( k = 0; k < smp_num_cpus; k++ )
  20.328 +    {
  20.329          j = 0;
  20.330 -        printf ("CPU[%02d]: scheduler latency histogram (ms:[count])\n", loop);
  20.331 -        for (i=0; i<BUCKETS; i++) {
  20.332 -            if (schedule_data[loop].hist[i]) {
  20.333 -                if (i < BUCKETS-1)
  20.334 -                    printk("%2d:[%7u]    ", i, schedule_data[loop].hist[i]);
  20.335 +        printf ("CPU[%02d]: scheduler latency histogram (ms:[count])\n", k);
  20.336 +        for ( i = 0; i < BUCKETS; i++ )
  20.337 +        {
  20.338 +            if ( schedule_data[k].hist[i] != 0 )
  20.339 +            {
  20.340 +                if ( i < BUCKETS-1 )
  20.341 +                    printk("%2d:[%7u]    ", i, schedule_data[k].hist[i]);
  20.342                  else
  20.343 -                    printk(" >:[%7u]    ", schedule_data[loop].hist[i]);
  20.344 -                j++;
  20.345 -                if (!(j % 5)) printk("\n");
  20.346 +                    printk(" >:[%7u]    ", schedule_data[k].hist[i]);
  20.347 +                if ( !(++j % 5) )
  20.348 +                    printk("\n");
  20.349              }
  20.350          }
  20.351          printk("\n");
  20.352 @@ -636,10 +635,10 @@ void print_sched_histo(u_char key, void 
  20.353  }
  20.354  void reset_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
  20.355  {
  20.356 -    int loop, i;
  20.357 -    for (loop = 0; loop < smp_num_cpus; loop++)
  20.358 -        for (i=0; i<BUCKETS; i++) 
  20.359 -            schedule_data[loop].hist[i]=0;
  20.360 +    int i, j;
  20.361 +    for ( j = 0; j < smp_num_cpus; j++ )
  20.362 +        for ( i=0; i < BUCKETS; i++ ) 
  20.363 +            schedule_data[j].hist[i] = 0;
  20.364  }
  20.365  #else
  20.366  void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
    21.1 --- a/xen/common/shadow.c	Fri Jun 18 14:46:29 2004 +0000
    21.2 +++ b/xen/common/shadow.c	Sat Jun 19 16:48:34 2004 +0000
    21.3 @@ -331,18 +331,18 @@ void shadow_mode_disable( struct domain 
    21.4      kfree( &m->shadow_ht[0] );
    21.5  }
    21.6  
    21.7 -static int shadow_mode_table_op( struct domain *p, 
    21.8 -								 dom0_shadow_control_t *sc )
    21.9 +static int shadow_mode_table_op(struct domain *d, 
   21.10 +							    dom0_shadow_control_t *sc)
   21.11  {
   21.12      unsigned int op = sc->op;
   21.13 -    struct mm_struct *m = &p->mm;
   21.14 +    struct mm_struct *m = &d->mm;
   21.15      int rc = 0;
   21.16  
   21.17      // since Dom0 did the hypercall, we should be running with it's page
   21.18      // tables right now. Calling flush on yourself would be really
   21.19      // stupid.
   21.20  
   21.21 -    ASSERT(spin_is_locked(&p->mm.shadow_lock));
   21.22 +    ASSERT(spin_is_locked(&d->mm.shadow_lock));
   21.23  
   21.24      if ( m == &current->mm )
   21.25      {
   21.26 @@ -380,44 +380,44 @@ static int shadow_mode_table_op( struct 
   21.27  		
   21.28  	send_bitmap:
   21.29  
   21.30 -		if( p->tot_pages > sc->pages || 
   21.31 -			!sc->dirty_bitmap || !p->mm.shadow_dirty_bitmap )
   21.32 +		if( d->tot_pages > sc->pages || 
   21.33 +			!sc->dirty_bitmap || !d->mm.shadow_dirty_bitmap )
   21.34  		{
   21.35  			rc = -EINVAL;
   21.36  			goto out;
   21.37  		}
   21.38  
   21.39 -		sc->fault_count = p->mm.shadow_fault_count;
   21.40 -		sc->dirty_count = p->mm.shadow_dirty_count;
   21.41 -		p->mm.shadow_fault_count = 0;
   21.42 -		p->mm.shadow_dirty_count = 0;
   21.43 +		sc->fault_count = d->mm.shadow_fault_count;
   21.44 +		sc->dirty_count = d->mm.shadow_dirty_count;
   21.45 +		d->mm.shadow_fault_count = 0;
   21.46 +		d->mm.shadow_dirty_count = 0;
   21.47  	
   21.48 -		sc->pages = p->tot_pages;
   21.49 +		sc->pages = d->tot_pages;
   21.50  	
   21.51  #define chunk (8*1024) // do this in 1KB chunks for L1 cache
   21.52  	
   21.53 -		for(i=0;i<p->tot_pages;i+=chunk)
   21.54 +		for(i=0;i<d->tot_pages;i+=chunk)
   21.55  		{
   21.56 -			int bytes = ((  ((p->tot_pages-i) > (chunk))?
   21.57 -							(chunk):(p->tot_pages-i) ) + 7) / 8;
   21.58 +			int bytes = ((  ((d->tot_pages-i) > (chunk))?
   21.59 +							(chunk):(d->tot_pages-i) ) + 7) / 8;
   21.60  	    
   21.61  			copy_to_user( sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
   21.62 -						  p->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
   21.63 +						  d->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
   21.64  						  bytes );
   21.65  	    
   21.66  			for(j=0; zero && j<bytes/sizeof(unsigned long);j++)
   21.67  			{
   21.68 -				if( p->mm.shadow_dirty_bitmap[j] != 0 )
   21.69 +				if( d->mm.shadow_dirty_bitmap[j] != 0 )
   21.70  					zero = 0;
   21.71  			}
   21.72  
   21.73 -			memset( p->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
   21.74 +			memset( d->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
   21.75  					0, bytes);
   21.76  		}
   21.77  
   21.78          /* Might as well stop the domain as an optimization. */
   21.79  		if ( zero )
   21.80 -            domain_controller_pause(p);
   21.81 +            domain_stop(d);
   21.82  
   21.83  		break;
   21.84      }
   21.85 @@ -426,24 +426,24 @@ static int shadow_mode_table_op( struct 
   21.86      {
   21.87  		int i;
   21.88  	
   21.89 -		if( p->tot_pages > sc->pages || 
   21.90 -			!sc->dirty_bitmap || !p->mm.shadow_dirty_bitmap )
   21.91 +		if( d->tot_pages > sc->pages || 
   21.92 +			!sc->dirty_bitmap || !d->mm.shadow_dirty_bitmap )
   21.93  		{
   21.94  			rc = -EINVAL;
   21.95  			goto out;
   21.96  		}
   21.97  	
   21.98 -		sc->pages = p->tot_pages;
   21.99 +		sc->pages = d->tot_pages;
  21.100  	
  21.101  #define chunk (8*1024) // do this in 1KB chunks for L1 cache
  21.102  	
  21.103 -		for(i=0;i<p->tot_pages;i+=chunk)
  21.104 +		for(i=0;i<d->tot_pages;i+=chunk)
  21.105  		{
  21.106 -			int bytes = ((  ((p->tot_pages-i) > (chunk))?
  21.107 -							(chunk):(p->tot_pages-i) ) + 7) / 8;
  21.108 +			int bytes = ((  ((d->tot_pages-i) > (chunk))?
  21.109 +							(chunk):(d->tot_pages-i) ) + 7) / 8;
  21.110  	    
  21.111  			copy_to_user( sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
  21.112 -						  p->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
  21.113 +						  d->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
  21.114  						  bytes );	    
  21.115  		}
  21.116  
    22.1 --- a/xen/common/softirq.c	Fri Jun 18 14:46:29 2004 +0000
    22.2 +++ b/xen/common/softirq.c	Sat Jun 19 16:48:34 2004 +0000
    22.3 @@ -10,10 +10,10 @@
    22.4   */
    22.5  
    22.6  #include <xen/config.h>
    22.7 +#include <xen/init.h>
    22.8  #include <xen/mm.h>
    22.9  #include <xen/sched.h>
   22.10 -#include <xen/interrupt.h>
   22.11 -#include <xen/init.h>
   22.12 +#include <xen/softirq.h>
   22.13  
   22.14  irq_cpustat_t irq_stat[NR_CPUS];
   22.15  
   22.16 @@ -37,20 +37,6 @@ asmlinkage void do_softirq()
   22.17      }
   22.18  }
   22.19  
   22.20 -inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
   22.21 -{
   22.22 -    __cpu_raise_softirq(cpu, nr);
   22.23 -#ifdef CONFIG_SMP
   22.24 -    if ( cpu != smp_processor_id() )
   22.25 -        smp_send_event_check_cpu(cpu);
   22.26 -#endif
   22.27 -}
   22.28 -
   22.29 -void raise_softirq(unsigned int nr)
   22.30 -{
   22.31 -    __cpu_raise_softirq(smp_processor_id(), nr);
   22.32 -}
   22.33 -
   22.34  void open_softirq(int nr, softirq_handler handler)
   22.35  {
   22.36      softirq_handlers[nr] = handler;
    23.1 --- a/xen/drivers/char/serial.c	Fri Jun 18 14:46:29 2004 +0000
    23.2 +++ b/xen/drivers/char/serial.c	Sat Jun 19 16:48:34 2004 +0000
    23.3 @@ -8,14 +8,14 @@
    23.4   * Copyright (c) 2003-2004, K A Fraser
    23.5   */
    23.6  
    23.7 -#include <asm/io.h>
    23.8 -#include <xen/sched.h>
    23.9 +#include <xen/config.h>
   23.10 +#include <xen/irq.h>
   23.11  #include <xen/keyhandler.h> 
   23.12 +#include <asm/pdb.h>
   23.13  #include <xen/reboot.h>
   23.14 -#include <xen/interrupt.h>
   23.15 -#include <xen/irq.h>
   23.16 +#include <xen/sched.h>
   23.17  #include <xen/serial.h>
   23.18 -#include <asm/pdb.h>
   23.19 +#include <asm/io.h>
   23.20  
   23.21  /* Register offsets */
   23.22  #define RBR             0x00    /* receive buffer       */
    24.1 --- a/xen/include/xen/event.h	Fri Jun 18 14:46:29 2004 +0000
    24.2 +++ b/xen/include/xen/event.h	Sat Jun 19 16:48:34 2004 +0000
    24.3 @@ -18,10 +18,12 @@
    24.4   */
    24.5  
    24.6  /* Schedule an asynchronous callback for the specified domain. */
    24.7 -static inline void guest_async_callback(struct domain *p)
    24.8 +static inline void guest_async_callback(struct domain *d)
    24.9  {
   24.10 -    if ( !domain_unblock(p) && p->has_cpu && (p != current) )
   24.11 -        smp_send_event_check_mask(1 << p->processor);
   24.12 +    int running = test_bit(DF_RUNNING, &d->flags);
   24.13 +    domain_unblock(d);
   24.14 +    if ( running )
   24.15 +        smp_send_event_check_cpu(d->processor);
   24.16  }
   24.17  
   24.18  /*
   24.19 @@ -31,43 +33,43 @@ static inline void guest_async_callback(
   24.20   * may require explicit memory barriers.
   24.21   */
   24.22  
   24.23 -static inline void evtchn_set_pending(struct domain *p, int port)
   24.24 +static inline void evtchn_set_pending(struct domain *d, int port)
   24.25  {
   24.26 -    shared_info_t *s = p->shared_info;
   24.27 +    shared_info_t *s = d->shared_info;
   24.28      if ( !test_and_set_bit(port,    &s->evtchn_pending[0]) &&
   24.29           !test_bit        (port,    &s->evtchn_mask[0])    &&
   24.30           !test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
   24.31      {
   24.32          /* The VCPU pending flag must be set /after/ update to evtchn-pend. */
   24.33          s->vcpu_data[0].evtchn_upcall_pending = 1;
   24.34 -        guest_async_callback(p);
   24.35 +        guest_async_callback(d);
   24.36      }
   24.37  }
   24.38  
   24.39 -static inline void evtchn_set_exception(struct domain *p, int port)
   24.40 +static inline void evtchn_set_exception(struct domain *d, int port)
   24.41  {
   24.42 -    if ( !test_and_set_bit(port, &p->shared_info->evtchn_exception[0]) )
   24.43 -        evtchn_set_pending(p, port);
   24.44 +    if ( !test_and_set_bit(port, &d->shared_info->evtchn_exception[0]) )
   24.45 +        evtchn_set_pending(d, port);
   24.46  }
   24.47  
   24.48  /*
   24.49   * send_guest_virq:
   24.50 - *  @p:        Domain to which virtual IRQ should be sent
   24.51 + *  @d:        Domain to which virtual IRQ should be sent
   24.52   *  @virq:     Virtual IRQ number (VIRQ_*)
   24.53   */
   24.54 -static inline void send_guest_virq(struct domain *p, int virq)
   24.55 +static inline void send_guest_virq(struct domain *d, int virq)
   24.56  {
   24.57 -    evtchn_set_pending(p, p->virq_to_evtchn[virq]);
   24.58 +    evtchn_set_pending(d, d->virq_to_evtchn[virq]);
   24.59  }
   24.60  
   24.61  /*
   24.62   * send_guest_pirq:
   24.63 - *  @p:        Domain to which physical IRQ should be sent
   24.64 + *  @d:        Domain to which physical IRQ should be sent
   24.65   *  @pirq:     Physical IRQ number
   24.66   */
   24.67 -static inline void send_guest_pirq(struct domain *p, int pirq)
   24.68 +static inline void send_guest_pirq(struct domain *d, int pirq)
   24.69  {
   24.70 -    evtchn_set_pending(p, p->pirq_to_evtchn[pirq]);
   24.71 +    evtchn_set_pending(d, d->pirq_to_evtchn[pirq]);
   24.72  }
   24.73  
   24.74  #define event_pending(_d)                                     \
    25.1 --- a/xen/include/xen/interrupt.h	Fri Jun 18 14:46:29 2004 +0000
    25.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.3 @@ -1,38 +0,0 @@
    25.4 -#ifndef _LINUX_INTERRUPT_H
    25.5 -#define _LINUX_INTERRUPT_H
    25.6 -
    25.7 -#include <xen/config.h>
    25.8 -#include <xen/lib.h>
    25.9 -#include <xen/smp.h>
   25.10 -#include <xen/cache.h>
   25.11 -
   25.12 -#include <asm/bitops.h>
   25.13 -#include <asm/atomic.h>
   25.14 -#include <asm/ptrace.h>
   25.15 -
   25.16 -struct irqaction
   25.17 -{
   25.18 -    void (*handler)(int, void *, struct pt_regs *);
   25.19 -    const char *name;
   25.20 -    void *dev_id;
   25.21 -};
   25.22 -
   25.23 -#include <asm/hardirq.h>
   25.24 -
   25.25 -enum
   25.26 -{
   25.27 -    AC_TIMER_SOFTIRQ=0,
   25.28 -    NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ,
   25.29 -    SCHEDULE_SOFTIRQ, /* NB. This must come last or do_softirq() will break! */
   25.30 -    NR_SOFTIRQS
   25.31 -};
   25.32 -
   25.33 -typedef void (*softirq_handler)(void);
   25.34 -
   25.35 -asmlinkage void do_softirq(void);
   25.36 -extern void open_softirq(int nr, softirq_handler handler);
   25.37 -#define __cpu_raise_softirq(cpu, nr) set_bit(nr, &softirq_pending(cpu))
   25.38 -extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr));
   25.39 -extern void FASTCALL(raise_softirq(unsigned int nr));
   25.40 -
   25.41 -#endif
    26.1 --- a/xen/include/xen/irq.h	Fri Jun 18 14:46:29 2004 +0000
    26.2 +++ b/xen/include/xen/irq.h	Sat Jun 19 16:48:34 2004 +0000
    26.3 @@ -4,6 +4,14 @@
    26.4  #include <xen/config.h>
    26.5  #include <xen/spinlock.h>
    26.6  #include <asm/ptrace.h>
    26.7 +#include <asm/hardirq.h>
    26.8 +
    26.9 +struct irqaction
   26.10 +{
   26.11 +    void (*handler)(int, void *, struct pt_regs *);
   26.12 +    const char *name;
   26.13 +    void *dev_id;
   26.14 +};
   26.15  
   26.16  /*
   26.17   * IRQ line status.
    27.1 --- a/xen/include/xen/sched.h	Fri Jun 18 14:46:29 2004 +0000
    27.2 +++ b/xen/include/xen/sched.h	Sat Jun 19 16:48:34 2004 +0000
    27.3 @@ -1,5 +1,5 @@
    27.4 -#ifndef _LINUX_SCHED_H
    27.5 -#define _LINUX_SCHED_H
    27.6 +#ifndef __SCHED_H__
    27.7 +#define __SCHED_H__
    27.8  
    27.9  #include <xen/config.h>
   27.10  #include <xen/types.h>
   27.11 @@ -46,8 +46,8 @@ typedef struct event_channel_st
   27.12      } u;
   27.13  } event_channel_t;
   27.14  
   27.15 -int  init_event_channels(struct domain *p);
   27.16 -void destroy_event_channels(struct domain *p);
   27.17 +int  init_event_channels(struct domain *d);
   27.18 +void destroy_event_channels(struct domain *d);
   27.19  
   27.20  struct domain 
   27.21  {
   27.22 @@ -85,7 +85,9 @@ struct domain
   27.23       * From here on things can be added and shuffled without special attention
   27.24       */
   27.25  
   27.26 -    domid_t domain;
   27.27 +    domid_t  domain;
   27.28 +    char     name[MAX_DOMAIN_NAME];
   27.29 +    s_time_t create_time;
   27.30  
   27.31      spinlock_t       page_list_lock;
   27.32      struct list_head page_list;
   27.33 @@ -94,26 +96,19 @@ struct domain
   27.34  
   27.35      /* Scheduling. */
   27.36      struct list_head run_list;
   27.37 -    int              has_cpu;
   27.38      int              stop_code;     /* stop code from OS (if DF_STOPPED). */
   27.39 -    int              cpupinned;     /* true if pinned to curent CPU */
   27.40      s_time_t         lastschd;      /* time this domain was last scheduled */
   27.41      s_time_t         lastdeschd;    /* time this domain was last descheduled */
   27.42      s_time_t         cpu_time;      /* total CPU time received till now */
   27.43      s_time_t         wokenup;       /* time domain got woken up */
   27.44      struct ac_timer  timer;         /* one-shot timer for timeout values */
   27.45 -
   27.46      s_time_t         min_slice;     /* minimum time before reschedule */
   27.47 -
   27.48 -    void *sched_priv;               /* scheduler-specific data */
   27.49 +    void            *sched_priv;    /* scheduler-specific data */
   27.50  
   27.51      struct mm_struct mm;
   27.52  
   27.53      mm_segment_t addr_limit;
   27.54  
   27.55 -    char name[MAX_DOMAIN_NAME];
   27.56 -    s_time_t create_time;
   27.57 -
   27.58      struct thread_struct thread;
   27.59      struct domain *next_list, *next_hash;
   27.60  
   27.61 @@ -156,7 +151,6 @@ struct domain
   27.62  {                                \
   27.63      processor:   0,              \
   27.64      domain:      IDLE_DOMAIN_ID, \
   27.65 -    has_cpu:     0,              \
   27.66      mm:          IDLE0_MM,       \
   27.67      addr_limit:  KERNEL_DS,      \
   27.68      thread:      INIT_THREAD,    \
   27.69 @@ -172,7 +166,7 @@ extern struct domain *idle_task[NR_CPUS]
   27.70  
   27.71  #include <xen/slab.h>
   27.72  
   27.73 -void free_domain_struct(struct domain *p);
   27.74 +void free_domain_struct(struct domain *d);
   27.75  struct domain *alloc_domain_struct();
   27.76  
   27.77  #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
   27.78 @@ -186,13 +180,13 @@ static inline int get_domain(struct doma
   27.79    
   27.80  extern struct domain *do_createdomain(
   27.81      domid_t dom_id, unsigned int cpu);
   27.82 -extern int construct_dom0(struct domain *p, 
   27.83 +extern int construct_dom0(struct domain *d, 
   27.84                            unsigned long alloc_start,
   27.85                            unsigned long alloc_end,
   27.86                            char *image_start, unsigned long image_len, 
   27.87                            char *initrd_start, unsigned long initrd_len,
   27.88                            char *cmdline);
   27.89 -extern int final_setup_guestos(struct domain *p, dom0_builddomain_t *);
   27.90 +extern int final_setup_guestos(struct domain *d, dom0_builddomain_t *);
   27.91  
   27.92  struct domain *find_domain_by_id(domid_t dom);
   27.93  struct domain *find_last_domain(void);
   27.94 @@ -202,7 +196,7 @@ extern void domain_crash(void);
   27.95  extern void domain_suspend(u8 reason);
   27.96  
   27.97  /* arch/process.c */
   27.98 -void new_thread(struct domain *p,
   27.99 +void new_thread(struct domain *d,
  27.100                  unsigned long start_pc,
  27.101                  unsigned long start_stack,
  27.102                  unsigned long start_info);
  27.103 @@ -218,14 +212,14 @@ extern spinlock_t schedule_lock[NR_CPUS]
  27.104  #define set_current_state(_s) do { current->state = (_s); } while (0)
  27.105  void scheduler_init(void);
  27.106  void schedulers_start(void);
  27.107 -void sched_add_domain(struct domain *p);
  27.108 -void sched_rem_domain(struct domain *p);
  27.109 +void sched_add_domain(struct domain *d);
  27.110 +void sched_rem_domain(struct domain *d);
  27.111  long sched_ctl(struct sched_ctl_cmd *);
  27.112  long sched_adjdom(struct sched_adjdom_cmd *);
  27.113  int  sched_id();
  27.114  void init_idle_task(void);
  27.115 -int domain_wakeup(struct domain *p);
  27.116 -void __domain_pause(struct domain *p);
  27.117 +void domain_wake(struct domain *d);
  27.118 +void domain_sleep(struct domain *d);
  27.119  
  27.120  void __enter_scheduler(void);
  27.121  
  27.122 @@ -259,61 +253,65 @@ extern struct domain *task_list;
  27.123  #define DF_PRIVILEGED   5 /* Is this domain privileged?                     */
  27.124  #define DF_CONSOLEWRITEBUG 6 /* Has this domain used the obsolete console?  */
  27.125  #define DF_PHYSDEV      7 /* May this domain do IO to physical devices?     */
  27.126 -
  27.127  #define DF_BLOCKED      8 /* Domain is blocked waiting for an event.        */
  27.128 -#define DF_CONTROLPAUSE 9 /* Domain is paused by control software.          */
  27.129 +#define DF_STOPPED      9 /* Domain is stopped by control software.          */
  27.130  #define DF_SUSPENDED   10 /* Guest suspended its execution for some reason. */
  27.131  #define DF_CRASHED     11 /* Domain crashed inside Xen, cannot continue.    */
  27.132  #define DF_DYING       12 /* Death rattle.                                  */
  27.133 +#define DF_RUNNING     13 /* Currently running on a CPU.                    */
  27.134 +#define DF_CPUPINNED   14 /* Disables auto-migration.                       */
  27.135  
  27.136 -static inline int domain_runnable(struct domain *p)
  27.137 +static inline int domain_runnable(struct domain *d)
  27.138  {
  27.139 -    return ( (atomic_read(&p->pausecnt) == 0) &&
  27.140 -             !(p->flags & ((1<<DF_BLOCKED)|(1<<DF_CONTROLPAUSE)|
  27.141 -                           (1<<DF_SUSPENDED)|(1<<DF_CRASHED)|(1<<DF_DYING))) );
  27.142 +    return ( (atomic_read(&d->pausecnt) == 0) &&
  27.143 +             !(d->flags & ((1<<DF_BLOCKED)|(1<<DF_STOPPED)|
  27.144 +                           (1<<DF_SUSPENDED)|(1<<DF_CRASHED))) );
  27.145  }
  27.146  
  27.147 -/* Returns TRUE if the domain was actually unblocked and woken. */
  27.148 -static inline int domain_unblock(struct domain *d)
  27.149 +static inline void domain_pause(struct domain *d)
  27.150 +{
  27.151 +    ASSERT(d != current);
  27.152 +    atomic_inc(&d->pausecnt);
  27.153 +    domain_sleep(d);
  27.154 +}
  27.155 +
  27.156 +static inline void domain_unpause(struct domain *d)
  27.157  {
  27.158 +    ASSERT(d != current);
  27.159 +    if ( atomic_dec_and_test(&d->pausecnt) )
  27.160 +        domain_wake(d);
  27.161 +}
  27.162 +
  27.163 +static inline void domain_unblock(struct domain *d)
  27.164 +{
  27.165 +    ASSERT(d != current);
  27.166      if ( test_and_clear_bit(DF_BLOCKED, &d->flags) )
  27.167 -        return domain_wakeup(d);
  27.168 -    return 0;
  27.169 +        domain_wake(d);
  27.170  }
  27.171  
  27.172  static inline void domain_unsuspend(struct domain *d)
  27.173  {
  27.174 +    ASSERT(d != current);
  27.175      if ( test_and_clear_bit(DF_SUSPENDED, &d->flags) )
  27.176 -        (void)domain_wakeup(d);
  27.177 -}
  27.178 -
  27.179 -static inline void domain_controller_pause(struct domain *d)
  27.180 -{
  27.181 -    if ( !test_and_set_bit(DF_CONTROLPAUSE, &d->flags) )
  27.182 -        __domain_pause(d);
  27.183 +        domain_wake(d);
  27.184  }
  27.185  
  27.186 -static inline void domain_controller_unpause(struct domain *d)
  27.187 +static inline void domain_stop(struct domain *d)
  27.188  {
  27.189 -    if ( test_and_clear_bit(DF_CONTROLPAUSE, &d->flags) )
  27.190 -        (void)domain_wakeup(d);
  27.191 +    ASSERT(d != current);
  27.192 +    if ( !test_and_set_bit(DF_STOPPED, &d->flags) )
  27.193 +        domain_sleep(d);
  27.194  }
  27.195  
  27.196 -static inline void domain_pause(struct domain *d)
  27.197 +static inline void domain_start(struct domain *d)
  27.198  {
  27.199 -    if ( d == current ) BUG();
  27.200 -    atomic_inc(&d->pausecnt);
  27.201 -    __domain_pause(d);
  27.202 -}
  27.203 -
  27.204 -static inline void domain_unpause(struct domain *d)
  27.205 -{
  27.206 -    if ( atomic_dec_and_test(&d->pausecnt) )
  27.207 -        (void)domain_wakeup(d);
  27.208 +    ASSERT(d != current);
  27.209 +    if ( test_and_clear_bit(DF_STOPPED, &d->flags) )
  27.210 +        domain_wake(d);
  27.211  }
  27.212  
  27.213  
  27.214 -#define IS_PRIV(_p) (test_bit(DF_PRIVILEGED, &(_p)->flags))
  27.215 -#define IS_CAPABLE_PHYSDEV(_p) (test_bit(DF_PHYSDEV, &(_p)->flags))
  27.216 +#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
  27.217 +#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
  27.218  
  27.219 -#endif /*_LINUX_SCHED_H */
  27.220 +#endif /* __SCHED_H__ */
    28.1 --- a/xen/include/xen/smp.h	Fri Jun 18 14:46:29 2004 +0000
    28.2 +++ b/xen/include/xen/smp.h	Sat Jun 19 16:48:34 2004 +0000
    28.3 @@ -25,7 +25,6 @@ extern void smp_send_stop(void);
    28.4  extern void FASTCALL(smp_send_event_check_mask(unsigned long cpu_mask));
    28.5  #define smp_send_event_check_cpu(_cpu) smp_send_event_check_mask(1<<(_cpu))
    28.6  
    28.7 -
    28.8  /*
    28.9   * Boot processor call to load the other CPU's
   28.10   */
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/include/xen/softirq.h	Sat Jun 19 16:48:34 2004 +0000
    29.3 @@ -0,0 +1,34 @@
    29.4 +#ifndef __XEN_SOFTIRQ_H__
    29.5 +#define __XEN_SOFTIRQ_H__
    29.6 +
    29.7 +#include <xen/config.h>
    29.8 +#include <xen/lib.h>
    29.9 +#include <xen/smp.h>
   29.10 +#include <asm/bitops.h>
   29.11 +#include <asm/hardirq.h>
   29.12 +
   29.13 +enum
   29.14 +{
   29.15 +    AC_TIMER_SOFTIRQ=0,
   29.16 +    NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ,
   29.17 +    SCHEDULE_SOFTIRQ, /* NB. This must come last or do_softirq() will break! */
   29.18 +    NR_SOFTIRQS
   29.19 +};
   29.20 +
   29.21 +typedef void (*softirq_handler)(void);
   29.22 +
   29.23 +asmlinkage void do_softirq(void);
   29.24 +extern void open_softirq(int nr, softirq_handler handler);
   29.25 +
   29.26 +static inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
   29.27 +{
   29.28 +    if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
   29.29 +        smp_send_event_check_cpu(cpu);
   29.30 +}
   29.31 +
   29.32 +static inline void raise_softirq(unsigned int nr)
   29.33 +{
   29.34 +    set_bit(nr, &softirq_pending(smp_processor_id()));
   29.35 +}
   29.36 +
   29.37 +#endif /* __XEN_SOFTIRQ_H__ */