ia64/xen-unstable

changeset 1486:55825bcb964e

bitkeeper revision 1.970 (40d07ec6txkdBLgbvygm7O1rxKCY1A)

Cleanups allowed by new ngio world.
author kaf24@scramble.cl.cam.ac.uk
date Wed Jun 16 17:09:26 2004 +0000 (2004-06-16)
parents 44512070eb7b
children 4f79516ed019
files .rootkeys xen/arch/x86/idle0_task.c xen/arch/x86/irq.c xen/arch/x86/setup.c xen/arch/x86/time.c xen/common/domain.c xen/common/kernel.c xen/common/lib.c xen/common/sched_bvt.c xen/common/schedule.c xen/common/softirq.c xen/common/timer.c xen/include/asm-x86/processor.h xen/include/xen/interrupt.h xen/include/xen/lib.h xen/include/xen/sched.h xen/include/xen/time.h xen/include/xen/timer.h xen/include/xen/tqueue.h
line diff
     1.1 --- a/.rootkeys	Wed Jun 16 15:59:23 2004 +0000
     1.2 +++ b/.rootkeys	Wed Jun 16 17:09:26 2004 +0000
     1.3 @@ -342,7 +342,6 @@ 405b8599xI_PoEr3zZoJ2on-jdn7iw xen/commo
     1.4  3ddb79bdB9RNMnkQnUyZ5C9hhMSQQw xen/common/slab.c
     1.5  3ddb79bd0gVQYmL2zvuJnldvD0AGxQ xen/common/softirq.c
     1.6  3e7f358awXBC3Vw-wFRwPw18qL1khg xen/common/string.c
     1.7 -3ddb79bdQqFHtHRGEO2dsxGgo6eAhw xen/common/timer.c
     1.8  403a3edbejm33XLTGMuinKEwQBrOIg xen/common/trace.c
     1.9  3ddb79bd3zgV33PHdt-cgh3sxcb1hw xen/common/vsprintf.c
    1.10  3ddb79c0ppNeJtjC4va8j41ADCnchA xen/drivers/Makefile
    1.11 @@ -500,9 +499,7 @@ 3ddb79c09xbS-xxfKxuV3JETIhBzmg xen/inclu
    1.12  3ddb79c2iIcESrDAB8samy_yAh6olQ xen/include/xen/spinlock.h
    1.13  3e7f358aMtFMUVvN_Zjg5qvEJIqEBA xen/include/xen/string.h
    1.14  3ddb79c0BnA20PbgmuMPSGIBljNRQw xen/include/xen/time.h
    1.15 -3ddb79c2HFkXuRxi1CriJtSFmY6Ybw xen/include/xen/timer.h
    1.16  3ddb79c2_m8lT9jDKse_tePj7zcnNQ xen/include/xen/timex.h
    1.17 -3ddb79c2e2C14HkndNEJlYwXaPrF5A xen/include/xen/tqueue.h
    1.18  403a3edbG9K5uZjuY19_LORbQGmFbA xen/include/xen/trace.h
    1.19  3ddb79c1-kVvF8cVa0k3ZHDdBMj01Q xen/include/xen/types.h
    1.20  3e8827bdaqPeZAWGVOwswgY9bWSx4g xen/include/xen/version.h
     2.1 --- a/xen/arch/x86/idle0_task.c	Wed Jun 16 15:59:23 2004 +0000
     2.2 +++ b/xen/arch/x86/idle0_task.c	Wed Jun 16 17:09:26 2004 +0000
     2.3 @@ -11,5 +11,4 @@ struct task_struct idle0_task = IDLE0_TA
     2.4   * section. Since TSS's are completely CPU-local, we want them
     2.5   * on exact cacheline boundaries, to eliminate cacheline ping-pong.
     2.6   */ 
     2.7 -struct tss_struct init_tss[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = INIT_TSS };
     2.8 -
     2.9 +struct tss_struct init_tss[NR_CPUS] __cacheline_aligned;
     3.1 --- a/xen/arch/x86/irq.c	Wed Jun 16 15:59:23 2004 +0000
     3.2 +++ b/xen/arch/x86/irq.c	Wed Jun 16 17:09:26 2004 +0000
     3.3 @@ -179,7 +179,7 @@ static inline void wait_on_irq(int cpu)
     3.4           * already executing in one..
     3.5           */
     3.6          if (!irqs_running())
     3.7 -            if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
     3.8 +            if (local_bh_count(cpu))
     3.9                  break;
    3.10  
    3.11          /* Duh, we have to loop. Release the lock to avoid deadlocks */
    3.12 @@ -193,7 +193,7 @@ static inline void wait_on_irq(int cpu)
    3.13                  continue;
    3.14              if (global_irq_lock)
    3.15                  continue;
    3.16 -            if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
    3.17 +            if (!local_bh_count(cpu))
    3.18                  continue;
    3.19              if (!test_and_set_bit(0,&global_irq_lock))
    3.20                  break;
     4.1 --- a/xen/arch/x86/setup.c	Wed Jun 16 15:59:23 2004 +0000
     4.2 +++ b/xen/arch/x86/setup.c	Wed Jun 16 17:09:26 2004 +0000
     4.3 @@ -257,6 +257,9 @@ void __init cpu_init(void)
     4.4          panic("CPU#%d already initialized!!!\n", nr);
     4.5      printk("Initializing CPU#%d\n", nr);
     4.6  
     4.7 +    t->bitmap = INVALID_IO_BITMAP_OFFSET;
     4.8 +    memset(t->io_bitmap, ~0, sizeof(t->io_bitmap));
     4.9 +
    4.10      /* Set up GDT and IDT. */
    4.11      SET_GDT_ENTRIES(current, DEFAULT_GDT_ENTRIES);
    4.12      SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS);
    4.13 @@ -294,31 +297,6 @@ static void __init do_initcalls(void)
    4.14          (*call)();
    4.15  }
    4.16  
    4.17 -/*
    4.18 - * IBM-compatible BIOSes place drive info tables at initial interrupt
    4.19 - * vectors 0x41 and 0x46. These are in the for of 16-bit-mode far ptrs.
    4.20 - */
    4.21 -struct drive_info_struct { unsigned char dummy[32]; } drive_info;
    4.22 -void get_bios_driveinfo(void)
    4.23 -{
    4.24 -    unsigned long seg, off, tab1, tab2;
    4.25 -
    4.26 -    off  = (unsigned long)*(unsigned short *)(4*0x41+0);
    4.27 -    seg  = (unsigned long)*(unsigned short *)(4*0x41+2);
    4.28 -    tab1 = (seg<<4) + off;
    4.29 -    
    4.30 -    off  = (unsigned long)*(unsigned short *)(4*0x46+0);
    4.31 -    seg  = (unsigned long)*(unsigned short *)(4*0x46+2);
    4.32 -    tab2 = (seg<<4) + off;
    4.33 -
    4.34 -    printk("Reading BIOS drive-info tables at 0x%05lx and 0x%05lx\n", 
    4.35 -           tab1, tab2);
    4.36 -
    4.37 -    memcpy(drive_info.dummy+ 0, (char *)tab1, 16);
    4.38 -    memcpy(drive_info.dummy+16, (char *)tab2, 16);
    4.39 -}
    4.40 -
    4.41 -
    4.42  unsigned long pci_mem_start = 0x10000000;
    4.43  
    4.44  void __init start_of_day(void)
    4.45 @@ -326,8 +304,6 @@ void __init start_of_day(void)
    4.46      extern void trap_init(void);
    4.47      extern void init_IRQ(void);
    4.48      extern void time_init(void);
    4.49 -    extern void timer_bh(void);
    4.50 -    extern void init_timervecs(void);
    4.51      extern void ac_timer_init(void);
    4.52      extern void initialize_keytable(); 
    4.53      extern void initialize_keyboard(void);
    4.54 @@ -348,12 +324,6 @@ void __init start_of_day(void)
    4.55      if ( opt_watchdog ) 
    4.56          nmi_watchdog = NMI_LOCAL_APIC;
    4.57  
    4.58 -    /*
    4.59 -     * We do this early, but tables are in the lowest 1MB (usually
    4.60 -     * 0xfe000-0xfffff). Therefore they're unlikely to ever get clobbered.
    4.61 -     */
    4.62 -    get_bios_driveinfo();
    4.63 -
    4.64      /* Tell the PCI layer not to allocate too close to the RAM area.. */
    4.65      low_mem_size = ((max_page << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
    4.66      if ( low_mem_size > pci_mem_start ) pci_mem_start = low_mem_size;
    4.67 @@ -380,14 +350,11 @@ void __init start_of_day(void)
    4.68      if ( smp_found_config ) 
    4.69          get_smp_config();
    4.70  #endif
    4.71 -    domain_init();
    4.72      scheduler_init();	
    4.73      trap_init();
    4.74      init_IRQ();  /* installs simple interrupt wrappers. Starts HZ clock. */
    4.75      time_init(); /* installs software handler for HZ clock. */
    4.76      softirq_init();
    4.77 -    init_timervecs();
    4.78 -    init_bh(TIMER_BH, timer_bh);
    4.79      init_apic_mappings(); /* make APICs addressable in our pagetables. */
    4.80  
    4.81  #ifndef CONFIG_SMP    
     5.1 --- a/xen/arch/x86/time.c	Wed Jun 16 15:59:23 2004 +0000
     5.2 +++ b/xen/arch/x86/time.c	Wed Jun 16 17:09:26 2004 +0000
     5.3 @@ -6,11 +6,6 @@
     5.4   *
     5.5   *        File: i386/time.c
     5.6   *      Author: Rolf Neugebar & Keir Fraser
     5.7 - * 
     5.8 - * Environment: Xen Hypervisor
     5.9 - * Description: modified version of Linux' time.c
    5.10 - *              implements system and wall clock time.
    5.11 - *              based on freebsd's implementation.
    5.12   */
    5.13  
    5.14  /*
    5.15 @@ -37,15 +32,13 @@
    5.16  #include <asm/fixmap.h>
    5.17  #include <asm/mc146818rtc.h>
    5.18  
    5.19 -extern rwlock_t xtime_lock;
    5.20 -extern unsigned long wall_jiffies;
    5.21 -
    5.22  /* GLOBAL */
    5.23  unsigned long cpu_khz;  /* Detected as we calibrate the TSC */
    5.24  unsigned long ticks_per_usec; /* TSC ticks per microsecond. */
    5.25  spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
    5.26  int timer_ack = 0;
    5.27  int do_timer_lists_from_pit = 0;
    5.28 +unsigned long volatile jiffies;
    5.29  
    5.30  /* PRIVATE */
    5.31  static unsigned int    rdtsc_bitshift;  /* Which 32 bits of TSC do we use?   */
    5.32 @@ -54,12 +47,14 @@ static u32             st_scale_f;      
    5.33  static u32             st_scale_i;      /* Cycles -> ns, integer part        */
    5.34  static u32             tsc_irq;         /* CPU0's TSC at last 'time update'  */
    5.35  static s_time_t        stime_irq;       /* System time at last 'time update' */
    5.36 +static unsigned long   wc_sec, wc_usec; /* UTC time at last 'time update'.   */
    5.37 +static rwlock_t        time_lock = RW_LOCK_UNLOCKED;
    5.38  
    5.39  static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
    5.40  {
    5.41      u64 full_tsc;
    5.42  
    5.43 -    write_lock(&xtime_lock);
    5.44 +    write_lock(&time_lock);
    5.45  
    5.46  #ifdef CONFIG_X86_IO_APIC
    5.47      if ( timer_ack ) 
    5.48 @@ -80,13 +75,21 @@ static void timer_interrupt(int irq, voi
    5.49      rdtscll(full_tsc);
    5.50      tsc_irq = (u32)(full_tsc >> rdtsc_bitshift);
    5.51  
    5.52 -    /* Updates xtime (wallclock time). */
    5.53 -    do_timer(regs);
    5.54 +    /* Update jiffies counter. */
    5.55 +    (*(unsigned long *)&jiffies)++;
    5.56 +
    5.57 +    /* Update wall time. */
    5.58 +    wc_usec += 1000000/HZ;
    5.59 +    if ( wc_usec >= 1000000 )
    5.60 +    {
    5.61 +        wc_usec -= 1000000;
    5.62 +        wc_sec++;
    5.63 +    }
    5.64  
    5.65      /* Updates system time (nanoseconds since boot). */
    5.66      stime_irq += MILLISECS(1000/HZ);
    5.67  
    5.68 -    write_unlock(&xtime_lock);
    5.69 +    write_unlock(&time_lock);
    5.70  
    5.71      /* Rough hack to allow accurate timers to sort-of-work with no APIC. */
    5.72      if ( do_timer_lists_from_pit )
    5.73 @@ -253,7 +256,7 @@ s_time_t get_s_time(void)
    5.74      s_time_t now;
    5.75      unsigned long flags;
    5.76  
    5.77 -    read_lock_irqsave(&xtime_lock, flags);
    5.78 +    read_lock_irqsave(&time_lock, flags);
    5.79  
    5.80      now = stime_irq + get_time_delta();
    5.81  
    5.82 @@ -265,7 +268,7 @@ s_time_t get_s_time(void)
    5.83          prev_now = now;
    5.84      }
    5.85  
    5.86 -    read_unlock_irqrestore(&xtime_lock, flags);
    5.87 +    read_unlock_irqrestore(&time_lock, flags);
    5.88  
    5.89      return now; 
    5.90  }
    5.91 @@ -275,7 +278,7 @@ void update_dom_time(shared_info_t *si)
    5.92  {
    5.93      unsigned long flags;
    5.94  
    5.95 -    read_lock_irqsave(&xtime_lock, flags);
    5.96 +    read_lock_irqsave(&time_lock, flags);
    5.97  
    5.98      si->time_version1++;
    5.99      wmb();
   5.100 @@ -284,19 +287,13 @@ void update_dom_time(shared_info_t *si)
   5.101      si->tsc_timestamp.tsc_bitshift = rdtsc_bitshift;
   5.102      si->tsc_timestamp.tsc_bits     = tsc_irq;
   5.103      si->system_time    = stime_irq;
   5.104 -    si->wc_sec         = xtime.tv_sec;
   5.105 -    si->wc_usec        = xtime.tv_usec;
   5.106 -    si->wc_usec       += (jiffies - wall_jiffies) * (1000000 / HZ);
   5.107 -    while ( si->wc_usec >= 1000000 )
   5.108 -    {
   5.109 -        si->wc_usec -= 1000000;
   5.110 -        si->wc_sec++;
   5.111 -    }
   5.112 +    si->wc_sec         = wc_sec;
   5.113 +    si->wc_usec        = wc_usec;
   5.114  
   5.115      wmb();
   5.116      si->time_version2++;
   5.117  
   5.118 -    read_unlock_irqrestore(&xtime_lock, flags);
   5.119 +    read_unlock_irqrestore(&time_lock, flags);
   5.120  }
   5.121  
   5.122  
   5.123 @@ -306,23 +303,21 @@ void do_settime(unsigned long secs, unsi
   5.124      s64 delta;
   5.125      long _usecs = (long)usecs;
   5.126  
   5.127 -    write_lock_irq(&xtime_lock);
   5.128 +    write_lock_irq(&time_lock);
   5.129  
   5.130      delta = (s64)(stime_irq - system_time_base);
   5.131  
   5.132 -	_usecs += (long)(delta/1000);
   5.133 -	_usecs -= (jiffies - wall_jiffies) * (1000000 / HZ);
   5.134 -
   5.135 -	while ( _usecs < 0 ) 
   5.136 +    _usecs += (long)(delta/1000);
   5.137 +    while ( _usecs >= 1000000 ) 
   5.138      {
   5.139 -		_usecs += 1000000;
   5.140 -		secs--;
   5.141 -	}
   5.142 +        _usecs -= 1000000;
   5.143 +        secs++;
   5.144 +    }
   5.145  
   5.146 -    xtime.tv_sec  = secs;
   5.147 -    xtime.tv_usec = _usecs;
   5.148 +    wc_sec  = secs;
   5.149 +    wc_usec = _usecs;
   5.150  
   5.151 -    write_unlock_irq(&xtime_lock);
   5.152 +    write_unlock_irq(&time_lock);
   5.153  
   5.154      update_dom_time(current->shared_info);
   5.155  }
   5.156 @@ -350,17 +345,13 @@ int __init init_xen_time()
   5.157      tsc_irq   = (u32)(full_tsc >> rdtsc_bitshift);
   5.158  
   5.159      /* Wallclock time starts as the initial RTC time. */
   5.160 -    xtime.tv_sec  = get_cmos_time();
   5.161 +    wc_sec  = get_cmos_time();
   5.162  
   5.163      printk("Time init:\n");
   5.164 -    printk(".... System Time: %lldns\n", 
   5.165 -           NOW());
   5.166 -    printk(".... cpu_freq:    %08X:%08X\n", 
   5.167 -           (u32)(cpu_freq>>32), (u32)cpu_freq);
   5.168 -    printk(".... scale:       %08X:%08X\n", 
   5.169 -           (u32)(scale>>32), (u32)scale);
   5.170 -    printk(".... Wall Clock:  %lds %ldus\n", 
   5.171 -           xtime.tv_sec, xtime.tv_usec);
   5.172 +    printk(".... System Time: %lldns\n", NOW());
   5.173 +    printk(".... cpu_freq:    %08X:%08X\n", (u32)(cpu_freq>>32),(u32)cpu_freq);
   5.174 +    printk(".... scale:       %08X:%08X\n", (u32)(scale>>32),(u32)scale);
   5.175 +    printk(".... Wall Clock:  %lds %ldus\n", wc_sec, wc_usec);
   5.176  
   5.177      return 0;
   5.178  }
     6.1 --- a/xen/common/domain.c	Wed Jun 16 15:59:23 2004 +0000
     6.2 +++ b/xen/common/domain.c	Wed Jun 16 17:09:26 2004 +0000
     6.3 @@ -1057,9 +1057,3 @@ int construct_dom0(struct task_struct *p
     6.4  
     6.5      return 0;
     6.6  }
     6.7 -
     6.8 -
     6.9 -void __init domain_init(void)
    6.10 -{
    6.11 -    printk("Initialising domains\n");
    6.12 -}
     7.1 --- a/xen/common/kernel.c	Wed Jun 16 15:59:23 2004 +0000
     7.2 +++ b/xen/common/kernel.c	Wed Jun 16 17:09:26 2004 +0000
     7.3 @@ -49,8 +49,6 @@ unsigned int opt_ser_baud = 0;
     7.4  unsigned char opt_com1[30] = "", opt_com2[30] = "";
     7.5  /* opt_dom0_mem: Kilobytes of memory allocated to domain 0. */
     7.6  unsigned int opt_dom0_mem = 16000;
     7.7 -/* opt_ifname: Name of physical network interface to use. */
     7.8 -unsigned char opt_ifname[10] = "eth0";
     7.9  /* opt_noht: If true, Hyperthreading is ignored. */
    7.10  int opt_noht=0;
    7.11  /* opt_noacpi: If true, ACPI tables are not parsed. */
    7.12 @@ -80,7 +78,7 @@ char opt_leveltrigger[30] = "", opt_edge
    7.13  
    7.14  static struct {
    7.15      unsigned char *name;
    7.16 -    enum { OPT_IP, OPT_STR, OPT_UINT, OPT_BOOL } type;
    7.17 +    enum { OPT_STR, OPT_UINT, OPT_BOOL } type;
    7.18      void *var;
    7.19  } opts[] = {
    7.20      { "console",           OPT_STR,  &opt_console },
    7.21 @@ -88,7 +86,6 @@ static struct {
    7.22      { "com1",              OPT_STR,  &opt_com1 },
    7.23      { "com2",              OPT_STR,  &opt_com2 },
    7.24      { "dom0_mem",          OPT_UINT, &opt_dom0_mem }, 
    7.25 -    { "ifname",            OPT_STR,  &opt_ifname },
    7.26      { "noht",              OPT_BOOL, &opt_noht },
    7.27      { "noacpi",            OPT_BOOL, &opt_noacpi },
    7.28      { "nosmp",             OPT_BOOL, &opt_nosmp },
    7.29 @@ -137,10 +134,6 @@ void cmain(unsigned long magic, multiboo
    7.30                  if ( strcmp(opts[i].name, cmdline ) != 0 ) continue;
    7.31                  switch ( opts[i].type )
    7.32                  {
    7.33 -                case OPT_IP:
    7.34 -                    if ( opt != NULL )
    7.35 -                        *(unsigned long *)opts[i].var = str_to_quad(opt);
    7.36 -                    break;
    7.37                  case OPT_STR:
    7.38                      if ( opt != NULL )
    7.39                          strcpy(opts[i].var, opt);
     8.1 --- a/xen/common/lib.c	Wed Jun 16 15:59:23 2004 +0000
     8.2 +++ b/xen/common/lib.c	Wed Jun 16 17:09:26 2004 +0000
     8.3 @@ -31,26 +31,6 @@ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,        
     8.4  _L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L};      /* 240-255 */
     8.5  
     8.6  
     8.7 -unsigned long str_to_quad(unsigned char *s)
     8.8 -{
     8.9 -    unsigned long quad = 0;
    8.10 -    do {
    8.11 -        quad <<= 8;
    8.12 -        quad  |= simple_strtol(s, (char **)&s, 10);
    8.13 -    }  
    8.14 -    while ( *s++ == '.' );
    8.15 -    return quad;
    8.16 -}
    8.17 -
    8.18 -
    8.19 -unsigned char *quad_to_str(unsigned long q, unsigned char *s)
    8.20 -{
    8.21 -    sprintf(s, "%ld.%ld.%ld.%ld", 
    8.22 -            (q>>24)&255, (q>>16)&255, (q>>8)&255, (q>>0)&255);
    8.23 -    return s;
    8.24 -}
    8.25 -   
    8.26 -
    8.27  /* a couple of 64 bit operations ported from freebsd */
    8.28  
    8.29  /*-
    8.30 @@ -409,63 +389,3 @@ u64
    8.31  }
    8.32  
    8.33  #endif /* BITS_PER_LONG == 32 */
    8.34 -
    8.35 -
    8.36 -/* HASH/RANDOMISATION FUNCTION
    8.37 - * Based on lookup2.c, by Bob Jenkins, December 1996, Public Domain.
    8.38 - * You can use this free for any purpose.  It has no warranty.
    8.39 - * See http://burlteburtle.net/bob/hash/evahash.html 
    8.40 - */
    8.41 -
    8.42 -#define mix(a,b,c)                                      \
    8.43 -    do {                                                \
    8.44 -        a -= b; a -= c; a ^= (c>>13);                   \
    8.45 -        b -= c; b -= a; b ^= (a<< 8);                   \
    8.46 -        c -= a; c -= b; c ^= ((b&0xffffffff)>>13);      \
    8.47 -        a -= b; a -= c; a ^= ((c&0xffffffff)>>12);      \
    8.48 -        b -= c; b -= a; b = (b ^ (a<<16)) & 0xffffffff; \
    8.49 -        c -= a; c -= b; c = (c ^ (b>> 5)) & 0xffffffff; \
    8.50 -        a -= b; a -= c; a = (a ^ (c>> 3)) & 0xffffffff; \
    8.51 -        b -= c; b -= a; b = (b ^ (a<<10)) & 0xffffffff; \
    8.52 -        c -= a; c -= b; c = (c ^ (b>>15)) & 0xffffffff; \
    8.53 -    } while ( 0 )
    8.54 -
    8.55 -u32 hash(unsigned char *k, unsigned long len)
    8.56 -{
    8.57 -    u32 a, b, c, l;
    8.58 -
    8.59 -    l = len;
    8.60 -    a = b = 0x9e3779b9;  /* the golden ratio; an arbitrary value */
    8.61 -    c = 0xa5a5a5a5;      /* another arbitrary value (KAF, 13/5/03) */
    8.62 -
    8.63 -    while ( l >= 12 )
    8.64 -    {
    8.65 -        a += (k[0] + ((u32)k[1]<<8) + ((u32)k[2]<<16)  + ((u32)k[3]<<24));
    8.66 -        b += (k[4] + ((u32)k[5]<<8) + ((u32)k[6]<<16)  + ((u32)k[7]<<24));
    8.67 -        c += (k[8] + ((u32)k[9]<<8) + ((u32)k[10]<<16) + ((u32)k[11]<<24));
    8.68 -        mix(a,b,c);
    8.69 -        k += 12; l -= 12;
    8.70 -    }
    8.71 -
    8.72 -    c += len;
    8.73 -    switch ( l )
    8.74 -    {
    8.75 -    case 11: c+=((u32)k[10]<<24);
    8.76 -    case 10: c+=((u32)k[9]<<16);
    8.77 -    case 9 : c+=((u32)k[8]<<8);
    8.78 -        /* the first byte of c is reserved for the length */
    8.79 -    case 8 : b+=((u32)k[7]<<24);
    8.80 -    case 7 : b+=((u32)k[6]<<16);
    8.81 -    case 6 : b+=((u32)k[5]<<8);
    8.82 -    case 5 : b+=k[4];
    8.83 -    case 4 : a+=((u32)k[3]<<24);
    8.84 -    case 3 : a+=((u32)k[2]<<16);
    8.85 -    case 2 : a+=((u32)k[1]<<8);
    8.86 -    case 1 : a+=k[0];
    8.87 -        /* case 0: nothing left to add */
    8.88 -    }
    8.89 -
    8.90 -    mix(a,b,c);
    8.91 -
    8.92 -    return c;
    8.93 -}
     9.1 --- a/xen/common/sched_bvt.c	Wed Jun 16 15:59:23 2004 +0000
     9.2 +++ b/xen/common/sched_bvt.c	Wed Jun 16 17:09:26 2004 +0000
     9.3 @@ -23,7 +23,6 @@
     9.4  #include <xen/time.h>
     9.5  #include <xen/ac_timer.h>
     9.6  #include <xen/interrupt.h>
     9.7 -#include <xen/timer.h>
     9.8  #include <xen/perfc.h>
     9.9  #include <xen/sched-if.h>
    9.10  #include <xen/slab.h>
    10.1 --- a/xen/common/schedule.c	Wed Jun 16 15:59:23 2004 +0000
    10.2 +++ b/xen/common/schedule.c	Wed Jun 16 17:09:26 2004 +0000
    10.3 @@ -23,7 +23,6 @@
    10.4  #include <xen/time.h>
    10.5  #include <xen/ac_timer.h>
    10.6  #include <xen/interrupt.h>
    10.7 -#include <xen/timer.h>
    10.8  #include <xen/perfc.h>
    10.9  #include <xen/sched-if.h>
   10.10  #include <hypervisor-ifs/sched_ctl.h>
   10.11 @@ -583,8 +582,6 @@ void __init scheduler_init(void)
   10.12  {
   10.13      int i;
   10.14  
   10.15 -    printk("Initialising schedulers\n");
   10.16 -
   10.17      for ( i = 0; i < NR_CPUS; i++ )
   10.18      {
   10.19          INIT_LIST_HEAD(&schedule_data[i].runqueue);
   10.20 @@ -613,9 +610,8 @@ void __init scheduler_init(void)
   10.21  
   10.22      for ( i = 0; schedulers[i] != NULL; i++ )
   10.23      {
   10.24 -        ops = *schedulers[i]; /* fetch operations structure */
   10.25 -
   10.26 -        if(strcmp(ops.opt_name, opt_sched) == 0)
   10.27 +        ops = *schedulers[i];
   10.28 +        if ( strcmp(ops.opt_name, opt_sched) == 0 )
   10.29              break;
   10.30      }
   10.31      
   10.32 @@ -623,9 +619,6 @@ void __init scheduler_init(void)
   10.33          printk("Could not find scheduler: %s\n", opt_sched);
   10.34  
   10.35      printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
   10.36 -    
   10.37 -    if ( ops.do_schedule == NULL)
   10.38 -        panic("Chosen scheduler has NULL do_schedule!");
   10.39  
   10.40      if ( SCHED_OP(init_scheduler) < 0 )
   10.41          panic("Initialising scheduler failed!");
   10.42 @@ -637,8 +630,6 @@ void __init scheduler_init(void)
   10.43   */
   10.44  void schedulers_start(void) 
   10.45  {   
   10.46 -    printk("Start schedulers\n");
   10.47 -
   10.48      s_timer_fn(0);
   10.49      smp_call_function((void *)s_timer_fn, NULL, 1, 1);
   10.50  
    11.1 --- a/xen/common/softirq.c	Wed Jun 16 15:59:23 2004 +0000
    11.2 +++ b/xen/common/softirq.c	Wed Jun 16 17:09:26 2004 +0000
    11.3 @@ -15,7 +15,6 @@
    11.4  #include <xen/sched.h>
    11.5  #include <xen/interrupt.h>
    11.6  #include <xen/init.h>
    11.7 -#include <xen/tqueue.h>
    11.8  
    11.9  irq_cpustat_t irq_stat[NR_CPUS];
   11.10  
   11.11 @@ -196,85 +195,8 @@ void tasklet_kill(struct tasklet_struct 
   11.12      clear_bit(TASKLET_STATE_SCHED, &t->state);
   11.13  }
   11.14  
   11.15 -
   11.16 -
   11.17 -/* Old style BHs */
   11.18 -
   11.19 -static void (*bh_base[32])(void);
   11.20 -struct tasklet_struct bh_task_vec[32];
   11.21 -
   11.22 -spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED;
   11.23 -
   11.24 -static void bh_action(unsigned long nr)
   11.25 -{
   11.26 -    int cpu = smp_processor_id();
   11.27 -
   11.28 -    if ( !spin_trylock(&global_bh_lock) )
   11.29 -        goto resched;
   11.30 -
   11.31 -    if ( !hardirq_trylock(cpu) )
   11.32 -        goto resched_unlock;
   11.33 -
   11.34 -    if ( likely(bh_base[nr] != NULL) )
   11.35 -        bh_base[nr]();
   11.36 -
   11.37 -    hardirq_endlock(cpu);
   11.38 -    spin_unlock(&global_bh_lock);
   11.39 -    return;
   11.40 -
   11.41 - resched_unlock:
   11.42 -    spin_unlock(&global_bh_lock);
   11.43 - resched:
   11.44 -    mark_bh(nr);
   11.45 -}
   11.46 -
   11.47 -void init_bh(int nr, void (*routine)(void))
   11.48 -{
   11.49 -    bh_base[nr] = routine;
   11.50 -    mb();
   11.51 -}
   11.52 -
   11.53 -void remove_bh(int nr)
   11.54 -{
   11.55 -    tasklet_kill(bh_task_vec+nr);
   11.56 -    bh_base[nr] = NULL;
   11.57 -}
   11.58 -
   11.59  void __init softirq_init()
   11.60  {
   11.61 -    int i;
   11.62 -
   11.63 -    for ( i = 0; i < 32; i++)
   11.64 -        tasklet_init(bh_task_vec+i, bh_action, i);
   11.65 -
   11.66      open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
   11.67      open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
   11.68  }
   11.69 -
   11.70 -void __run_task_queue(task_queue *list)
   11.71 -{
   11.72 -    struct list_head  head, *next;
   11.73 -    unsigned long     flags;
   11.74 -    void              (*f) (void *);
   11.75 -    struct tq_struct *p;
   11.76 -    void             *data;
   11.77 -
   11.78 -    spin_lock_irqsave(&tqueue_lock, flags);
   11.79 -    list_add(&head, list);
   11.80 -    list_del_init(list);
   11.81 -    spin_unlock_irqrestore(&tqueue_lock, flags);
   11.82 -
   11.83 -    next = head.next;
   11.84 -    while ( next != &head )
   11.85 -    {
   11.86 -        p = list_entry(next, struct tq_struct, list);
   11.87 -        next = next->next;
   11.88 -        f = p->routine;
   11.89 -        data = p->data;
   11.90 -        wmb();
   11.91 -        p->sync = 0;
   11.92 -        if ( likely(f != NULL) )
   11.93 -            f(data);
   11.94 -    }
   11.95 -}
   11.96 -
    12.1 --- a/xen/common/timer.c	Wed Jun 16 15:59:23 2004 +0000
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,338 +0,0 @@
    12.4 -/*
    12.5 - *  linux/kernel/timer.c
    12.6 - *
    12.7 - *  Kernel internal timers, kernel timekeeping, basic process system calls
    12.8 - *
    12.9 - *  Copyright (C) 1991, 1992  Linus Torvalds
   12.10 - *
   12.11 - *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
   12.12 - *
   12.13 - *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
   12.14 - *              "A Kernel Model for Precision Timekeeping" by Dave Mills
   12.15 - *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
   12.16 - *              serialize accesses to xtime/lost_ticks).
   12.17 - *                              Copyright (C) 1998  Andrea Arcangeli
   12.18 - *  1999-03-10  Improved NTP compatibility by Ulrich Windl
   12.19 - */
   12.20 -
   12.21 -#include <xen/config.h>
   12.22 -#include <xen/mm.h>
   12.23 -#include <xen/time.h>
   12.24 -#include <xen/timer.h>
   12.25 -#include <xen/timex.h>
   12.26 -#include <xen/tqueue.h>
   12.27 -#include <xen/delay.h>
   12.28 -#include <xen/interrupt.h>
   12.29 -
   12.30 -#include <xen/event.h>
   12.31 -
   12.32 -#include <asm/uaccess.h>
   12.33 -
   12.34 -struct timeval xtime __attribute__ ((aligned (16)));
   12.35 -unsigned long volatile jiffies;
   12.36 -
   12.37 -/*
   12.38 - * Event timer code
   12.39 - */
   12.40 -#define TVN_BITS 6
   12.41 -#define TVR_BITS 8
   12.42 -#define TVN_SIZE (1 << TVN_BITS)
   12.43 -#define TVR_SIZE (1 << TVR_BITS)
   12.44 -#define TVN_MASK (TVN_SIZE - 1)
   12.45 -#define TVR_MASK (TVR_SIZE - 1)
   12.46 -
   12.47 -struct timer_vec {
   12.48 -    int index;
   12.49 -    struct list_head vec[TVN_SIZE];
   12.50 -};
   12.51 -
   12.52 -struct timer_vec_root {
   12.53 -    int index;
   12.54 -    struct list_head vec[TVR_SIZE];
   12.55 -};
   12.56 -
   12.57 -static struct timer_vec tv5;
   12.58 -static struct timer_vec tv4;
   12.59 -static struct timer_vec tv3;
   12.60 -static struct timer_vec tv2;
   12.61 -static struct timer_vec_root tv1;
   12.62 -
   12.63 -static struct timer_vec * const tvecs[] = {
   12.64 -    (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
   12.65 -};
   12.66 -
   12.67 -#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
   12.68 -
   12.69 -void init_timervecs (void)
   12.70 -{
   12.71 -    int i;
   12.72 -
   12.73 -    for (i = 0; i < TVN_SIZE; i++) {
   12.74 -        INIT_LIST_HEAD(tv5.vec + i);
   12.75 -        INIT_LIST_HEAD(tv4.vec + i);
   12.76 -        INIT_LIST_HEAD(tv3.vec + i);
   12.77 -        INIT_LIST_HEAD(tv2.vec + i);
   12.78 -    }
   12.79 -    for (i = 0; i < TVR_SIZE; i++)
   12.80 -        INIT_LIST_HEAD(tv1.vec + i);
   12.81 -}
   12.82 -
   12.83 -static unsigned long timer_jiffies;
   12.84 -
   12.85 -static inline void internal_add_timer(struct timer_list *timer)
   12.86 -{
   12.87 -    /*
   12.88 -     * must be cli-ed when calling this
   12.89 -     */
   12.90 -    unsigned long expires = timer->expires;
   12.91 -    unsigned long idx = expires - timer_jiffies;
   12.92 -    struct list_head * vec;
   12.93 -
   12.94 -    if (idx < TVR_SIZE) {
   12.95 -        int i = expires & TVR_MASK;
   12.96 -        vec = tv1.vec + i;
   12.97 -    } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
   12.98 -        int i = (expires >> TVR_BITS) & TVN_MASK;
   12.99 -        vec = tv2.vec + i;
  12.100 -    } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
  12.101 -        int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
  12.102 -        vec =  tv3.vec + i;
  12.103 -    } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
  12.104 -        int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
  12.105 -        vec = tv4.vec + i;
  12.106 -    } else if ((signed long) idx < 0) {
  12.107 -        /* can happen if you add a timer with expires == jiffies,
  12.108 -		 * or you set a timer to go off in the past
  12.109 -		 */
  12.110 -        vec = tv1.vec + tv1.index;
  12.111 -    } else if (idx <= 0xffffffffUL) {
  12.112 -        int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
  12.113 -        vec = tv5.vec + i;
  12.114 -    } else {
  12.115 -        /* Can only get here on architectures with 64-bit jiffies */
  12.116 -        INIT_LIST_HEAD(&timer->list);
  12.117 -        return;
  12.118 -    }
  12.119 -    /*
  12.120 -	 * Timers are FIFO!
  12.121 -	 */
  12.122 -    list_add(&timer->list, vec->prev);
  12.123 -}
  12.124 -
  12.125 -/* Initialize both explicitly - let's try to have them in the same cache line */
  12.126 -spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
  12.127 -
  12.128 -#ifdef CONFIG_SMP
  12.129 -volatile struct timer_list * volatile running_timer;
  12.130 -#define timer_enter(t) do { running_timer = t; mb(); } while (0)
  12.131 -#define timer_exit() do { running_timer = NULL; } while (0)
  12.132 -#define timer_is_running(t) (running_timer == t)
  12.133 -#define timer_synchronize(t) while (timer_is_running(t)) barrier()
  12.134 -#else
  12.135 -#define timer_enter(t)		do { } while (0)
  12.136 -#define timer_exit()		do { } while (0)
  12.137 -#endif
  12.138 -
  12.139 -void add_timer(struct timer_list *timer)
  12.140 -{
  12.141 -    unsigned long flags;
  12.142 -
  12.143 -    spin_lock_irqsave(&timerlist_lock, flags);
  12.144 -    if (timer_pending(timer))
  12.145 -        goto bug;
  12.146 -    internal_add_timer(timer);
  12.147 -    spin_unlock_irqrestore(&timerlist_lock, flags);
  12.148 -    return;
  12.149 - bug:
  12.150 -    spin_unlock_irqrestore(&timerlist_lock, flags);
  12.151 -    printk("bug: kernel timer added twice at %p.\n",
  12.152 -           __builtin_return_address(0));
  12.153 -}
  12.154 -
  12.155 -static inline int detach_timer (struct timer_list *timer)
  12.156 -{
  12.157 -    if (!timer_pending(timer))
  12.158 -        return 0;
  12.159 -    list_del(&timer->list);
  12.160 -    return 1;
  12.161 -}
  12.162 -
  12.163 -int mod_timer(struct timer_list *timer, unsigned long expires)
  12.164 -{
  12.165 -    int ret;
  12.166 -    unsigned long flags;
  12.167 -
  12.168 -    spin_lock_irqsave(&timerlist_lock, flags);
  12.169 -    timer->expires = expires;
  12.170 -    ret = detach_timer(timer);
  12.171 -    internal_add_timer(timer);
  12.172 -    spin_unlock_irqrestore(&timerlist_lock, flags);
  12.173 -    return ret;
  12.174 -}
  12.175 -
  12.176 -int del_timer(struct timer_list * timer)
  12.177 -{
  12.178 -    int ret;
  12.179 -    unsigned long flags;
  12.180 -
  12.181 -    spin_lock_irqsave(&timerlist_lock, flags);
  12.182 -    ret = detach_timer(timer);
  12.183 -    timer->list.next = timer->list.prev = NULL;
  12.184 -    spin_unlock_irqrestore(&timerlist_lock, flags);
  12.185 -    return ret;
  12.186 -}
  12.187 -
  12.188 -#ifdef CONFIG_SMP
  12.189 -void sync_timers(void)
  12.190 -{
  12.191 -    spin_unlock_wait(&global_bh_lock);
  12.192 -}
  12.193 -
  12.194 -/*
  12.195 - * SMP specific function to delete periodic timer.
  12.196 - * Caller must disable by some means restarting the timer
  12.197 - * for new. Upon exit the timer is not queued and handler is not running
  12.198 - * on any CPU. It returns number of times, which timer was deleted
  12.199 - * (for reference counting).
  12.200 - */
  12.201 -
  12.202 -int del_timer_sync(struct timer_list * timer)
  12.203 -{
  12.204 -    int ret = 0;
  12.205 -
  12.206 -    for (;;) {
  12.207 -        unsigned long flags;
  12.208 -        int running;
  12.209 -
  12.210 -        spin_lock_irqsave(&timerlist_lock, flags);
  12.211 -        ret += detach_timer(timer);
  12.212 -        timer->list.next = timer->list.prev = 0;
  12.213 -        running = timer_is_running(timer);
  12.214 -        spin_unlock_irqrestore(&timerlist_lock, flags);
  12.215 -
  12.216 -        if (!running)
  12.217 -            break;
  12.218 -
  12.219 -        timer_synchronize(timer);
  12.220 -    }
  12.221 -
  12.222 -    return ret;
  12.223 -}
  12.224 -#endif
  12.225 -
  12.226 -
  12.227 -static inline void cascade_timers(struct timer_vec *tv)
  12.228 -{
  12.229 -    /* cascade all the timers from tv up one level */
  12.230 -    struct list_head *head, *curr, *next;
  12.231 -
  12.232 -    head = tv->vec + tv->index;
  12.233 -    curr = head->next;
  12.234 -    /*
  12.235 -     * We are removing _all_ timers from the list, so we don't  have to
  12.236 -     * detach them individually, just clear the list afterwards.
  12.237 -	 */
  12.238 -    while (curr != head) {
  12.239 -        struct timer_list *tmp;
  12.240 -
  12.241 -        tmp = list_entry(curr, struct timer_list, list);
  12.242 -        next = curr->next;
  12.243 -        list_del(curr); /* not needed */
  12.244 -        internal_add_timer(tmp);
  12.245 -        curr = next;
  12.246 -    }
  12.247 -    INIT_LIST_HEAD(head);
  12.248 -    tv->index = (tv->index + 1) & TVN_MASK;
  12.249 -}
  12.250 -
  12.251 -static inline void run_timer_list(void)
  12.252 -{
  12.253 -    spin_lock_irq(&timerlist_lock);
  12.254 -    while ((long)(jiffies - timer_jiffies) >= 0) {
  12.255 -        struct list_head *head, *curr;
  12.256 -        if (!tv1.index) {
  12.257 -            int n = 1;
  12.258 -            do {
  12.259 -                cascade_timers(tvecs[n]);
  12.260 -            } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
  12.261 -        }
  12.262 -    repeat:
  12.263 -        head = tv1.vec + tv1.index;
  12.264 -        curr = head->next;
  12.265 -        if (curr != head) {
  12.266 -            struct timer_list *timer;
  12.267 -            void (*fn)(unsigned long);
  12.268 -            unsigned long data;
  12.269 -
  12.270 -            timer = list_entry(curr, struct timer_list, list);
  12.271 -            fn = timer->function;
  12.272 -            data= timer->data;
  12.273 -
  12.274 -            detach_timer(timer);
  12.275 -            timer->list.next = timer->list.prev = NULL;
  12.276 -            timer_enter(timer);
  12.277 -            spin_unlock_irq(&timerlist_lock);
  12.278 -            fn(data);
  12.279 -            spin_lock_irq(&timerlist_lock);
  12.280 -            timer_exit();
  12.281 -            goto repeat;
  12.282 -        }
  12.283 -        ++timer_jiffies; 
  12.284 -        tv1.index = (tv1.index + 1) & TVR_MASK;
  12.285 -    }
  12.286 -    spin_unlock_irq(&timerlist_lock);
  12.287 -}
  12.288 -
  12.289 -spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;
  12.290 -
  12.291 -static void update_wall_time(unsigned long ticks)
  12.292 -{
  12.293 -    do {
  12.294 -        ticks--;
  12.295 -        xtime.tv_usec += 1000000/HZ;
  12.296 -    } while (ticks);
  12.297 -
  12.298 -    if (xtime.tv_usec >= 1000000) {
  12.299 -        xtime.tv_usec -= 1000000;
  12.300 -        xtime.tv_sec++;
  12.301 -    }
  12.302 -}
  12.303 -
  12.304 -/* jiffies at the most recent update of wall time */
  12.305 -unsigned long wall_jiffies;
  12.306 -
  12.307 -/*
  12.308 - * This spinlock protect us from races in SMP while playing with xtime. -arca
  12.309 - */
  12.310 -rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
  12.311 -
  12.312 -static inline void update_times(void)
  12.313 -{
  12.314 -    unsigned long ticks;
  12.315 -
  12.316 -    /*
  12.317 -     * update_times() is run from the raw timer_bh handler so we
  12.318 -     * just know that the irqs are locally enabled and so we don't
  12.319 -     * need to save/restore the flags of the local CPU here. -arca
  12.320 -     */
  12.321 -    write_lock_irq(&xtime_lock);
  12.322 -
  12.323 -    ticks = jiffies - wall_jiffies;
  12.324 -    if (ticks) {
  12.325 -        wall_jiffies += ticks;
  12.326 -        update_wall_time(ticks);
  12.327 -    }
  12.328 -    write_unlock_irq(&xtime_lock);
  12.329 -}
  12.330 -
  12.331 -void timer_bh(void)
  12.332 -{
  12.333 -    update_times();
  12.334 -    run_timer_list();
  12.335 -}
  12.336 -
  12.337 -void do_timer(struct pt_regs *regs)
  12.338 -{
  12.339 -    (*(unsigned long *)&jiffies)++;
  12.340 -    mark_bh(TIMER_BH);
  12.341 -}
    13.1 --- a/xen/include/asm-x86/processor.h	Wed Jun 16 15:59:23 2004 +0000
    13.2 +++ b/xen/include/asm-x86/processor.h	Wed Jun 16 17:09:26 2004 +0000
    13.3 @@ -340,33 +340,10 @@ long set_fast_trap(struct task_struct *p
    13.4  	{ {0} }			/* io permissions */		\
    13.5  }
    13.6  
    13.7 -#define INIT_TSS  {						\
    13.8 -	0,0, /* back_link, __blh */				\
    13.9 -	0, /* esp0 */						\
   13.10 -	0, 0, /* ss0 */						\
   13.11 -	0,0,0,0,0,0, /* stack1, stack2 */			\
   13.12 -	0, /* cr3 */						\
   13.13 -	0,0, /* eip,eflags */					\
   13.14 -	0,0,0,0, /* eax,ecx,edx,ebx */				\
   13.15 -	0,0,0,0, /* esp,ebp,esi,edi */				\
   13.16 -	0,0,0,0,0,0, /* es,cs,ss */				\
   13.17 -	0,0,0,0,0,0, /* ds,fs,gs */				\
   13.18 -	0,0, /* ldt */						\
   13.19 -	0, INVALID_IO_BITMAP_OFFSET, /* tace, bitmap */		\
   13.20 -	{ [0 ... IO_BITMAP_SIZE] = ~0UL }, /* ioperm */         \
   13.21 -}
   13.22 -
   13.23  #elif defined(__x86_64__)
   13.24  
   13.25  #define INIT_THREAD { 0 }
   13.26  
   13.27 -#define INIT_TSS {                                              \
   13.28 -	0,0,                                                    \
   13.29 -	0,0,0,0,{0},0,0,                                        \
   13.30 -	0, INVALID_IO_BITMAP_OFFSET,                            \
   13.31 -	{ [0 ... IO_BITMAP_SIZE] = ~0UL }                       \
   13.32 -}
   13.33 -
   13.34  #endif /* __x86_64__ */
   13.35  
   13.36  struct mm_struct {
    14.1 --- a/xen/include/xen/interrupt.h	Wed Jun 16 15:59:23 2004 +0000
    14.2 +++ b/xen/include/xen/interrupt.h	Wed Jun 16 17:09:26 2004 +0000
    14.3 @@ -1,4 +1,3 @@
    14.4 -/* interrupt.h */
    14.5  #ifndef _LINUX_INTERRUPT_H
    14.6  #define _LINUX_INTERRUPT_H
    14.7  
    14.8 @@ -11,39 +10,24 @@
    14.9  #include <asm/atomic.h>
   14.10  #include <asm/ptrace.h>
   14.11  
   14.12 -/* For 2.6.x compatibility */
   14.13 -typedef void irqreturn_t;
   14.14 -#define IRQ_NONE
   14.15 -#define IRQ_HANDLED
   14.16 -#define IRQ_RETVAL(x)
   14.17 -
   14.18  struct irqaction {
   14.19 -	void (*handler)(int, void *, struct pt_regs *);
   14.20 -	unsigned long flags;
   14.21 -	unsigned long mask;
   14.22 -	const char *name;
   14.23 -	void *dev_id;
   14.24 -	struct irqaction *next;
   14.25 -};
   14.26 -
   14.27 -enum {
   14.28 -	TIMER_BH = 0,
   14.29 -	SCSI_BH
   14.30 +    void (*handler)(int, void *, struct pt_regs *);
   14.31 +    unsigned long flags;
   14.32 +    unsigned long mask;
   14.33 +    const char *name;
   14.34 +    void *dev_id;
   14.35 +    struct irqaction *next;
   14.36  };
   14.37  
   14.38  #include <asm/hardirq.h>
   14.39  #include <asm/softirq.h>
   14.40  
   14.41 -
   14.42  enum
   14.43  {
   14.44 -	HI_SOFTIRQ=0,
   14.45 -	NET_RX_SOFTIRQ,
   14.46 -	AC_TIMER_SOFTIRQ,
   14.47 -	TASKLET_SOFTIRQ,
   14.48 -        BLKDEV_RESPONSE_SOFTIRQ,
   14.49 -        NET_TX_SOFTIRQ,
   14.50 -        NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ
   14.51 +    HI_SOFTIRQ=0,
   14.52 +    AC_TIMER_SOFTIRQ,
   14.53 +    TASKLET_SOFTIRQ,
   14.54 +    NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ
   14.55  };
   14.56  
   14.57  /* softirq mask and active fields moved to irq_cpustat_t in
   14.58 @@ -52,8 +36,8 @@ enum
   14.59  
   14.60  struct softirq_action
   14.61  {
   14.62 -	void	(*action)(struct softirq_action *);
   14.63 -	void	*data;
   14.64 +    void (*action)(struct softirq_action *);
   14.65 +    void *data;
   14.66  };
   14.67  
   14.68  asmlinkage void do_softirq(void);
   14.69 @@ -63,8 +47,6 @@ extern void softirq_init(void);
   14.70  extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr));
   14.71  extern void FASTCALL(raise_softirq(unsigned int nr));
   14.72  
   14.73 -
   14.74 -
   14.75  /* Tasklets --- multithreaded analogue of BHs.
   14.76  
   14.77     Main feature differing them of generic softirqs: tasklet
   14.78 @@ -87,11 +69,11 @@ extern void FASTCALL(raise_softirq(unsig
   14.79  
   14.80  struct tasklet_struct
   14.81  {
   14.82 -	struct tasklet_struct *next;
   14.83 -	unsigned long state;
   14.84 -	atomic_t count;
   14.85 -	void (*func)(unsigned long);
   14.86 -	unsigned long data;
   14.87 +    struct tasklet_struct *next;
   14.88 +    unsigned long state;
   14.89 +    atomic_t count;
   14.90 +    void (*func)(unsigned long);
   14.91 +    unsigned long data;
   14.92  };
   14.93  
   14.94  #define DECLARE_TASKLET(name, func, data) \
   14.95 @@ -103,13 +85,13 @@ struct tasklet_struct name = { NULL, 0, 
   14.96  
   14.97  enum
   14.98  {
   14.99 -	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
  14.100 -	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
  14.101 +    TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
  14.102 +    TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
  14.103  };
  14.104  
  14.105  struct tasklet_head
  14.106  {
  14.107 -	struct tasklet_struct *list;
  14.108 +    struct tasklet_struct *list;
  14.109  } __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
  14.110  
  14.111  extern struct tasklet_head tasklet_vec[NR_CPUS];
  14.112 @@ -118,18 +100,18 @@ extern struct tasklet_head tasklet_hi_ve
  14.113  #ifdef CONFIG_SMP
  14.114  static inline int tasklet_trylock(struct tasklet_struct *t)
  14.115  {
  14.116 -	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
  14.117 +    return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
  14.118  }
  14.119  
  14.120  static inline void tasklet_unlock(struct tasklet_struct *t)
  14.121  {
  14.122 -	smp_mb__before_clear_bit(); 
  14.123 -	clear_bit(TASKLET_STATE_RUN, &(t)->state);
  14.124 +    smp_mb__before_clear_bit(); 
  14.125 +    clear_bit(TASKLET_STATE_RUN, &(t)->state);
  14.126  }
  14.127  
  14.128  static inline void tasklet_unlock_wait(struct tasklet_struct *t)
  14.129  {
  14.130 -	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
  14.131 +    while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
  14.132  }
  14.133  #else
  14.134  #define tasklet_trylock(t) 1
  14.135 @@ -141,46 +123,46 @@ extern void FASTCALL(__tasklet_schedule(
  14.136  
  14.137  static inline void tasklet_schedule(struct tasklet_struct *t)
  14.138  {
  14.139 -	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  14.140 -		__tasklet_schedule(t);
  14.141 +    if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  14.142 +        __tasklet_schedule(t);
  14.143  }
  14.144  
  14.145  extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
  14.146  
  14.147  static inline void tasklet_hi_schedule(struct tasklet_struct *t)
  14.148  {
  14.149 -	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  14.150 -		__tasklet_hi_schedule(t);
  14.151 +    if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  14.152 +        __tasklet_hi_schedule(t);
  14.153  }
  14.154  
  14.155  
  14.156  static inline void tasklet_disable_nosync(struct tasklet_struct *t)
  14.157  {
  14.158 -	atomic_inc(&t->count);
  14.159 -	smp_mb__after_atomic_inc();
  14.160 +    atomic_inc(&t->count);
  14.161 +    smp_mb__after_atomic_inc();
  14.162  }
  14.163  
  14.164  static inline void tasklet_disable(struct tasklet_struct *t)
  14.165  {
  14.166 -	tasklet_disable_nosync(t);
  14.167 -	tasklet_unlock_wait(t);
  14.168 -	smp_mb();
  14.169 +    tasklet_disable_nosync(t);
  14.170 +    tasklet_unlock_wait(t);
  14.171 +    smp_mb();
  14.172  }
  14.173  
  14.174  static inline void tasklet_enable(struct tasklet_struct *t)
  14.175  {
  14.176 -	smp_mb__before_atomic_dec();
  14.177 -	if (atomic_dec_and_test(&t->count) &&
  14.178 -	    test_bit(TASKLET_STATE_SCHED, &t->state))
  14.179 -		__tasklet_schedule(t);
  14.180 +    smp_mb__before_atomic_dec();
  14.181 +    if (atomic_dec_and_test(&t->count) &&
  14.182 +        test_bit(TASKLET_STATE_SCHED, &t->state))
  14.183 +        __tasklet_schedule(t);
  14.184  }
  14.185  
  14.186  static inline void tasklet_hi_enable(struct tasklet_struct *t)
  14.187  {
  14.188 -	smp_mb__before_atomic_dec();
  14.189 -	if (atomic_dec_and_test(&t->count) &&
  14.190 -	    test_bit(TASKLET_STATE_SCHED, &t->state))
  14.191 -		__tasklet_hi_schedule(t);
  14.192 +    smp_mb__before_atomic_dec();
  14.193 +    if (atomic_dec_and_test(&t->count) &&
  14.194 +        test_bit(TASKLET_STATE_SCHED, &t->state))
  14.195 +        __tasklet_hi_schedule(t);
  14.196  }
  14.197  
  14.198  extern void tasklet_kill(struct tasklet_struct *t);
  14.199 @@ -205,23 +187,6 @@ static void name (unsigned long dummy) \
  14.200  
  14.201  #endif /* CONFIG_SMP */
  14.202  
  14.203 -
  14.204 -/* Old BH definitions */
  14.205 -
  14.206 -extern struct tasklet_struct bh_task_vec[];
  14.207 -
  14.208 -/* It is exported _ONLY_ for wait_on_irq(). */
  14.209 -extern spinlock_t global_bh_lock;
  14.210 -
  14.211 -static inline void mark_bh(int nr)
  14.212 -{
  14.213 -	tasklet_hi_schedule(bh_task_vec+nr);
  14.214 -}
  14.215 -
  14.216 -extern void init_bh(int nr, void (*routine)(void));
  14.217 -extern void remove_bh(int nr);
  14.218 -
  14.219 -
  14.220  /*
  14.221   * Autoprobing for irqs:
  14.222   *
    15.1 --- a/xen/include/xen/lib.h	Wed Jun 16 15:59:23 2004 +0000
    15.2 +++ b/xen/include/xen/lib.h	Wed Jun 16 17:09:26 2004 +0000
    15.3 @@ -3,6 +3,7 @@
    15.4  
    15.5  #include <stdarg.h>
    15.6  #include <xen/types.h>
    15.7 +#include <xen/string.h>
    15.8  
    15.9  #ifndef NDEBUG
   15.10  #define ASSERT(_p) if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , __LINE__, __FILE__); *(int*)0=0; }
   15.11 @@ -13,29 +14,9 @@
   15.12  #define reserve_bootmem(_p,_l) \
   15.13  printk("Memory Reservation 0x%lx, %lu bytes\n", (_p), (_l))
   15.14  
   15.15 -/* lib.c */
   15.16 -#include <xen/string.h>
   15.17 -
   15.18 -/* JWS - pulled over linux string library ({asm,linux}/string.h)
   15.19 -int memcmp(const void * cs,const void * ct,size_t count);
   15.20 -void * memcpy(void * dest,const void *src,size_t count);
   15.21 -int strncmp(const char * cs,const char * ct,size_t count);
   15.22 -int strcmp(const char * cs,const char * ct);
   15.23 -char * strcpy(char * dest,const char *src);
   15.24 -char * strncpy(char * dest,const char *src,size_t count);
   15.25 -void * memset(void * s,int c,size_t count);
   15.26 -size_t strnlen(const char * s, size_t count);
   15.27 -size_t strlen(const char * s);
   15.28 -char * strchr(const char *,int);
   15.29 -char * strstr(const char * s1,const char * s2);
   15.30 -*/
   15.31 -
   15.32 -unsigned long str_to_quad(unsigned char *s);
   15.33 -unsigned char *quad_to_str(unsigned long q, unsigned char *s);
   15.34 -
   15.35  /* kernel.c */
   15.36  #define printk printf
   15.37 -void printf (const char *format, ...);
   15.38 +void printf(const char *format, ...);
   15.39  void panic(const char *format, ...);
   15.40  
   15.41  /* vsprintf.c */
   15.42 @@ -50,7 +31,4 @@ long simple_strtol(const char *cp,char *
   15.43  unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base);
   15.44  long long simple_strtoll(const char *cp,char **endp,unsigned int base);
   15.45  
   15.46 -/* Produce a 32-bit hash from a key string 'k' of length 'len' bytes. */
   15.47 -u32 hash(unsigned char *k, unsigned long len);
   15.48 -
   15.49  #endif /* __LIB_H__ */
    16.1 --- a/xen/include/xen/sched.h	Wed Jun 16 15:59:23 2004 +0000
    16.2 +++ b/xen/include/xen/sched.h	Wed Jun 16 17:09:26 2004 +0000
    16.3 @@ -24,12 +24,8 @@
    16.4  extern unsigned long volatile jiffies;
    16.5  extern rwlock_t tasklist_lock;
    16.6  
    16.7 -extern struct timeval xtime;
    16.8 -
    16.9  #include <xen/spinlock.h>
   16.10  
   16.11 -extern struct mm_struct init_mm;
   16.12 -
   16.13  #define _HYP_EVENT_NEED_RESCHED 0
   16.14  #define _HYP_EVENT_DIE          1
   16.15  
    17.1 --- a/xen/include/xen/time.h	Wed Jun 16 15:59:23 2004 +0000
    17.2 +++ b/xen/include/xen/time.h	Wed Jun 16 17:09:26 2004 +0000
    17.3 @@ -27,14 +27,10 @@
    17.4  #ifndef __XEN_TIME_H__
    17.5  #define __XEN_TIME_H__
    17.6  
    17.7 -#include <asm/ptrace.h>  /* XXX Only used for do_timer which should be moved */
    17.8  #include <asm/time.h>    /* pull in architecture specific time definition */
    17.9  #include <xen/types.h>
   17.10  #include <hypervisor-ifs/hypervisor-if.h>
   17.11  
   17.12 -/*
   17.13 - * Init time
   17.14 - */
   17.15  extern int init_xen_time();
   17.16  
   17.17  
   17.18 @@ -49,22 +45,13 @@ extern int init_xen_time();
   17.19  
   17.20  s_time_t get_s_time(void);
   17.21  
   17.22 -#define NOW()				((s_time_t)get_s_time())
   17.23 -#define SECONDS(_s)			(((s_time_t)(_s))  * 1000000000ULL )
   17.24 -#define MILLISECS(_ms)		(((s_time_t)(_ms)) * 1000000ULL )
   17.25 -#define MICROSECS(_us)		(((s_time_t)(_us)) * 1000ULL )
   17.26 -#define Time_Max			((s_time_t) 0x7fffffffffffffffLL)
   17.27 -#define FOREVER				Time_Max
   17.28 +#define NOW()           ((s_time_t)get_s_time())
   17.29 +#define SECONDS(_s)     (((s_time_t)(_s))  * 1000000000ULL )
   17.30 +#define MILLISECS(_ms)  (((s_time_t)(_ms)) * 1000000ULL )
   17.31 +#define MICROSECS(_us)  (((s_time_t)(_us)) * 1000ULL )
   17.32  
   17.33 -/* Wall Clock Time */
   17.34 -struct timeval {
   17.35 -    long            tv_sec;         /* seconds */
   17.36 -    long            tv_usec;        /* microseconds */
   17.37 -};
   17.38 -  
   17.39  extern void update_dom_time(shared_info_t *si);
   17.40  extern void do_settime(unsigned long secs, unsigned long usecs, 
   17.41                         u64 system_time_base);
   17.42 -extern void do_timer(struct pt_regs *regs);
   17.43  
   17.44  #endif /* __XEN_TIME_H__ */
    18.1 --- a/xen/include/xen/timer.h	Wed Jun 16 15:59:23 2004 +0000
    18.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.3 @@ -1,81 +0,0 @@
    18.4 -#ifndef _LINUX_TIMER_H
    18.5 -#define _LINUX_TIMER_H
    18.6 -
    18.7 -#include <xen/config.h>
    18.8 -#include <xen/list.h>
    18.9 -
   18.10 -/*
   18.11 - * In Linux 2.4, static timers have been removed from the kernel.
   18.12 - * Timers may be dynamically created and destroyed, and should be initialized
   18.13 - * by a call to init_timer() upon creation.
   18.14 - *
   18.15 - * The "data" field enables use of a common timeout function for several
   18.16 - * timeouts. You can use this field to distinguish between the different
   18.17 - * invocations.
   18.18 - *
   18.19 - * RN: Unlike the Linux timers, which are executed at the periodic timer
   18.20 - *     interrupt, in Xen, the timer list is only checked "occasionally", thus
   18.21 - *     its accuracy might be somewhat worse than under Linux. However, the
   18.22 - *     hypervisor should be purely event-driven and, in fact, in the current
   18.23 - *     implementation, timers are only used for watchdog purpose at a very
   18.24 - *     coarse granularity anyway. Thus this is not a problem.
   18.25 - */
   18.26 -struct timer_list {
   18.27 -	struct list_head list;
   18.28 -	unsigned long expires;		/* jiffies */
   18.29 -	unsigned long data;
   18.30 -	void (*function)(unsigned long);
   18.31 -};
   18.32 -
   18.33 -extern void add_timer(struct timer_list * timer);
   18.34 -extern int del_timer(struct timer_list * timer);
   18.35 -
   18.36 -#ifdef CONFIG_SMP
   18.37 -extern int del_timer_sync(struct timer_list * timer);
   18.38 -extern void sync_timers(void);
   18.39 -#else
   18.40 -#define del_timer_sync(t)	del_timer(t)
   18.41 -#define sync_timers()		do { } while (0)
   18.42 -#endif
   18.43 -
   18.44 -/*
   18.45 - * mod_timer is a more efficient way to update the expire field of an
   18.46 - * active timer (if the timer is inactive it will be activated)
   18.47 - * mod_timer(a,b) is equivalent to del_timer(a); a->expires = b; add_timer(a).
   18.48 - * If the timer is known to be not pending (ie, in the handler), mod_timer
   18.49 - * is less efficient than a->expires = b; add_timer(a).
   18.50 - */
   18.51 -int mod_timer(struct timer_list *timer, unsigned long expires);
   18.52 -
   18.53 -extern void it_real_fn(unsigned long);
   18.54 -
   18.55 -static inline void init_timer(struct timer_list * timer)
   18.56 -{
   18.57 -	timer->list.next = timer->list.prev = NULL;
   18.58 -}
   18.59 -
   18.60 -static inline int timer_pending (const struct timer_list * timer)
   18.61 -{
   18.62 -	return timer->list.next != NULL;
   18.63 -}
   18.64 -
   18.65 -/*
   18.66 - *	These inlines deal with timer wrapping correctly. You are 
   18.67 - *	strongly encouraged to use them
   18.68 - *	1. Because people otherwise forget
   18.69 - *	2. Because if the timer wrap changes in future you wont have to
   18.70 - *	   alter your driver code.
   18.71 - *
   18.72 - * time_after(a,b) returns true if the time a is after time b.
   18.73 - *
   18.74 - * Do this with "<0" and ">=0" to only test the sign of the result. A
   18.75 - * good compiler would generate better code (and a really good compiler
   18.76 - * wouldn't care). Gcc is currently neither.
   18.77 - */
   18.78 -#define time_after(a,b)		((long)(b) - (long)(a) < 0)
   18.79 -#define time_before(a,b)	time_after(b,a)
   18.80 -
   18.81 -#define time_after_eq(a,b)	((long)(a) - (long)(b) >= 0)
   18.82 -#define time_before_eq(a,b)	time_after_eq(b,a)
   18.83 -
   18.84 -#endif
    19.1 --- a/xen/include/xen/tqueue.h	Wed Jun 16 15:59:23 2004 +0000
    19.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.3 @@ -1,125 +0,0 @@
    19.4 -/*
    19.5 - * tqueue.h --- task queue handling for Linux.
    19.6 - *
    19.7 - * Mostly based on a proposed bottom-half replacement code written by
    19.8 - * Kai Petzke, wpp@marie.physik.tu-berlin.de.
    19.9 - *
   19.10 - * Modified for use in the Linux kernel by Theodore Ts'o,
   19.11 - * tytso@mit.edu.  Any bugs are my fault, not Kai's.
   19.12 - *
   19.13 - * The original comment follows below.
   19.14 - */
   19.15 -
   19.16 -#ifndef _LINUX_TQUEUE_H
   19.17 -#define _LINUX_TQUEUE_H
   19.18 -
   19.19 -#include <xen/spinlock.h>
   19.20 -#include <xen/list.h>
   19.21 -#include <asm/bitops.h>
   19.22 -#include <asm/system.h>
   19.23 -
   19.24 -/*
   19.25 - * New proposed "bottom half" handlers:
   19.26 - * (C) 1994 Kai Petzke, wpp@marie.physik.tu-berlin.de
   19.27 - *
   19.28 - * Advantages:
   19.29 - * - Bottom halfs are implemented as a linked list.  You can have as many
   19.30 - *   of them, as you want.
   19.31 - * - No more scanning of a bit field is required upon call of a bottom half.
   19.32 - * - Support for chained bottom half lists.  The run_task_queue() function can be
   19.33 - *   used as a bottom half handler.  This is for example useful for bottom
   19.34 - *   halfs, which want to be delayed until the next clock tick.
   19.35 - *
   19.36 - * Notes:
   19.37 - * - Bottom halfs are called in the reverse order that they were linked into
   19.38 - *   the list.
   19.39 - */
   19.40 -
   19.41 -struct tq_struct {
   19.42 -	struct list_head list;		/* linked list of active bh's */
   19.43 -	unsigned long sync;		/* must be initialized to zero */
   19.44 -	void (*routine)(void *);	/* function to call */
   19.45 -	void *data;			/* argument to function */
   19.46 -};
   19.47 -
   19.48 -/*
   19.49 - * Emit code to initialise a tq_struct's routine and data pointers
   19.50 - */
   19.51 -#define PREPARE_TQUEUE(_tq, _routine, _data)			\
   19.52 -	do {							\
   19.53 -		(_tq)->routine = _routine;			\
   19.54 -		(_tq)->data = _data;				\
   19.55 -	} while (0)
   19.56 -
   19.57 -/*
   19.58 - * Emit code to initialise all of a tq_struct
   19.59 - */
   19.60 -#define INIT_TQUEUE(_tq, _routine, _data)			\
   19.61 -	do {							\
   19.62 -		INIT_LIST_HEAD(&(_tq)->list);			\
   19.63 -		(_tq)->sync = 0;				\
   19.64 -		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
   19.65 -	} while (0)
   19.66 -
   19.67 -typedef struct list_head task_queue;
   19.68 -
   19.69 -#define DECLARE_TASK_QUEUE(q)	LIST_HEAD(q)
   19.70 -#define TQ_ACTIVE(q)		(!list_empty(&q))
   19.71 -
   19.72 -extern task_queue tq_disk;
   19.73 -
   19.74 -/*
   19.75 - * To implement your own list of active bottom halfs, use the following
   19.76 - * two definitions:
   19.77 - *
   19.78 - * DECLARE_TASK_QUEUE(my_tqueue);
   19.79 - * struct tq_struct my_task = {
   19.80 - * 	routine: (void (*)(void *)) my_routine,
   19.81 - *	data: &my_data
   19.82 - * };
   19.83 - *
   19.84 - * To activate a bottom half on a list, use:
   19.85 - *
   19.86 - *	queue_task(&my_task, &my_tqueue);
   19.87 - *
   19.88 - * To later run the queued tasks use
   19.89 - *
   19.90 - *	run_task_queue(&my_tqueue);
   19.91 - *
   19.92 - * This allows you to do deferred processing.  For example, you could
   19.93 - * have a task queue called tq_timer, which is executed within the timer
   19.94 - * interrupt.
   19.95 - */
   19.96 -
   19.97 -extern spinlock_t tqueue_lock;
   19.98 -
   19.99 -/*
  19.100 - * Queue a task on a tq.  Return non-zero if it was successfully
  19.101 - * added.
  19.102 - */
  19.103 -static inline int queue_task(struct tq_struct *bh_pointer, task_queue *bh_list)
  19.104 -{
  19.105 -	int ret = 0;
  19.106 -	if (!test_and_set_bit(0,&bh_pointer->sync)) {
  19.107 -		unsigned long flags;
  19.108 -		spin_lock_irqsave(&tqueue_lock, flags);
  19.109 -		list_add_tail(&bh_pointer->list, bh_list);
  19.110 -		spin_unlock_irqrestore(&tqueue_lock, flags);
  19.111 -		ret = 1;
  19.112 -	}
  19.113 -	return ret;
  19.114 -}
  19.115 -
  19.116 -/*
  19.117 - * Call all "bottom halfs" on a given list.
  19.118 - */
  19.119 -
  19.120 -extern void __run_task_queue(task_queue *list);
  19.121 -
  19.122 -static inline void run_task_queue(task_queue *list)
  19.123 -{
  19.124 -	if (TQ_ACTIVE(*list))
  19.125 -		__run_task_queue(list);
  19.126 -}
  19.127 -
  19.128 -#endif /* _LINUX_TQUEUE_H */