ia64/xen-unstable

changeset 807:6ef75cc014dc

bitkeeper revision 1.496 (3f841becdre1gQrbAv8C5pF-O_M1fg)

sched.h, pgalloc.h, timer.c, schedule.c, domain.c, smp.c:
Various cleanups. Nothing major.
author kaf24@scramble.cl.cam.ac.uk
date Wed Oct 08 14:15:08 2003 +0000 (2003-10-08)
parents ac55fdc43f08
children 3f26e93f5e01
files xen/arch/i386/smp.c xen/common/domain.c xen/common/schedule.c xen/common/timer.c xen/include/asm-i386/pgalloc.h xen/include/xeno/sched.h
line diff
     1.1 --- a/xen/arch/i386/smp.c	Wed Oct 08 13:42:44 2003 +0000
     1.2 +++ b/xen/arch/i386/smp.c	Wed Oct 08 14:15:08 2003 +0000
     1.3 @@ -55,54 +55,8 @@
     1.4   *	7AP.	We do not assume writes to the LVT deassering IRQs
     1.5   *	8AP.	We do not enable low power mode (deep sleep) during MP bootup
     1.6   *	9AP.	We do not use mixed mode
     1.7 - *
     1.8 - *	Pentium
     1.9 - *		There is a marginal case where REP MOVS on 100MHz SMP
    1.10 - *	machines with B stepping processors can fail. XXX should provide
    1.11 - *	an L1cache=Writethrough or L1cache=off option.
    1.12 - *
    1.13 - *		B stepping CPUs may hang. There are hardware work arounds
    1.14 - *	for this. We warn about it in case your board doesnt have the work
    1.15 - *	arounds. Basically thats so I can tell anyone with a B stepping
    1.16 - *	CPU and SMP problems "tough".
    1.17 - *
    1.18 - *	Specific items [From Pentium Processor Specification Update]
    1.19 - *
    1.20 - *	1AP.	Linux doesn't use remote read
    1.21 - *	2AP.	Linux doesn't trust APIC errors
    1.22 - *	3AP.	We work around this
    1.23 - *	4AP.	Linux never generated 3 interrupts of the same priority
    1.24 - *		to cause a lost local interrupt.
    1.25 - *	5AP.	Remote read is never used
    1.26 - *	6AP.	not affected - worked around in hardware
    1.27 - *	7AP.	not affected - worked around in hardware
    1.28 - *	8AP.	worked around in hardware - we get explicit CS errors if not
    1.29 - *	9AP.	only 'noapic' mode affected. Might generate spurious
    1.30 - *		interrupts, we log only the first one and count the
    1.31 - *		rest silently.
    1.32 - *	10AP.	not affected - worked around in hardware
    1.33 - *	11AP.	Linux reads the APIC between writes to avoid this, as per
    1.34 - *		the documentation. Make sure you preserve this as it affects
    1.35 - *		the C stepping chips too.
    1.36 - *	12AP.	not affected - worked around in hardware
    1.37 - *	13AP.	not affected - worked around in hardware
    1.38 - *	14AP.	we always deassert INIT during bootup
    1.39 - *	15AP.	not affected - worked around in hardware
    1.40 - *	16AP.	not affected - worked around in hardware
    1.41 - *	17AP.	not affected - worked around in hardware
    1.42 - *	18AP.	not affected - worked around in hardware
    1.43 - *	19AP.	not affected - worked around in BIOS
    1.44 - *
    1.45 - *	If this sounds worrying believe me these bugs are either ___RARE___,
    1.46 - *	or are signal timing bugs worked around in hardware and there's
    1.47 - *	about nothing of note with C stepping upwards.
    1.48   */
    1.49  
    1.50 -/* The 'big kernel lock' */
    1.51 -spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
    1.52 -
    1.53 -struct tlb_state cpu_tlbstate[NR_CPUS] = {[0 ... NR_CPUS-1] = { 0 }};
    1.54 -
    1.55  /*
    1.56   * the following functions deal with sending IPIs between CPUs.
    1.57   *
    1.58 @@ -245,6 +199,14 @@ static inline void send_IPI_all(int vect
    1.59  }
    1.60  
    1.61  /*
    1.62 + * ********* XEN NOTICE **********
    1.63 + * I've left the following comments lying around as they look liek they might
    1.64 + * be useful to get multiprocessor guest OSes going. However, I suspect the
    1.65 + * issues we face will be quite different so I've ripped out all the
    1.66 + * TLBSTATE logic (I didn't understand it anyway :-). These comments do
    1.67 + * not apply to Xen, therefore! -- Keir (8th Oct 2003).
    1.68 + */
    1.69 +/*
    1.70   *	Smarter SMP flushing macros. 
    1.71   *		c/o Linus Torvalds.
    1.72   *
    1.73 @@ -252,28 +214,6 @@ static inline void send_IPI_all(int vect
    1.74   *	writing to user space from interrupts. (Its not allowed anyway).
    1.75   *
    1.76   *	Optimizations Manfred Spraul <manfred@colorfullife.com>
    1.77 - */
    1.78 -
    1.79 -static volatile unsigned long flush_cpumask;
    1.80 -#if 0
    1.81 -static struct mm_struct * flush_mm;
    1.82 -static unsigned long flush_va;
    1.83 -#endif
    1.84 -static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
    1.85 -#define FLUSH_ALL	0xffffffff
    1.86 -
    1.87 -/*
    1.88 - * We cannot call mmdrop() because we are in interrupt context, 
    1.89 - * instead update mm.cpu_vm_mask.
    1.90 - */
    1.91 -static void inline leave_mm (unsigned long cpu)
    1.92 -{
    1.93 -    if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
    1.94 -        BUG();
    1.95 -    clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
    1.96 -}
    1.97 -
    1.98 -/*
    1.99   *
   1.100   * The flush IPI assumes that a thread switch happens in this order:
   1.101   * [cpu0: the cpu that switches]
   1.102 @@ -310,15 +250,17 @@ static void inline leave_mm (unsigned lo
   1.103   *
   1.104   * The good news is that cpu_tlbstate is local to each cpu, no
   1.105   * write/read ordering problems.
   1.106 - */
   1.107 -
   1.108 -/*
   1.109 + *
   1.110   * TLB flush IPI:
   1.111   *
   1.112   * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
   1.113   * 2) Leave the mm if we are in the lazy tlb mode.
   1.114   */
   1.115  
   1.116 +static volatile unsigned long flush_cpumask;
   1.117 +static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
   1.118 +#define FLUSH_ALL	0xffffffff
   1.119 +
   1.120  asmlinkage void smp_invalidate_interrupt (void)
   1.121  {
   1.122      unsigned long cpu = smp_processor_id();
   1.123 @@ -326,19 +268,8 @@ asmlinkage void smp_invalidate_interrupt
   1.124      if (!test_bit(cpu, &flush_cpumask))
   1.125          return;
   1.126  
   1.127 -#if 0		 
   1.128 -    if (flush_mm == cpu_tlbstate[cpu].active_mm) {
   1.129 -        if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
   1.130 -            if (flush_va == FLUSH_ALL)
   1.131 -#endif
   1.132 -                local_flush_tlb();
   1.133 -#if 0
   1.134 -            else
   1.135 -                __flush_tlb_one(flush_va);
   1.136 -        } else
   1.137 -            leave_mm(cpu);
   1.138 -    }
   1.139 -#endif
   1.140 +    local_flush_tlb();
   1.141 +
   1.142      ack_APIC_irq();
   1.143      clear_bit(cpu, &flush_cpumask);
   1.144  }
   1.145 @@ -354,11 +285,7 @@ void flush_tlb_others(unsigned long cpum
   1.146  	
   1.147  static inline void do_flush_tlb_all_local(void)
   1.148  {
   1.149 -    unsigned long cpu = smp_processor_id();
   1.150 -
   1.151      __flush_tlb_all();
   1.152 -    if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
   1.153 -        leave_mm(cpu);
   1.154  }
   1.155  
   1.156  static void flush_tlb_all_ipi(void* info)
     2.1 --- a/xen/common/domain.c	Wed Oct 08 13:42:44 2003 +0000
     2.2 +++ b/xen/common/domain.c	Wed Oct 08 14:15:08 2003 +0000
     2.3 @@ -71,7 +71,6 @@ struct task_struct *do_newdomain(unsigne
     2.4      SET_GDT_ADDRESS(p, DEFAULT_GDT_ADDRESS);
     2.5  
     2.6      p->addr_limit = USER_DS;
     2.7 -    p->active_mm  = &p->mm;
     2.8  
     2.9      /*
    2.10       * We're basically forcing default RPLs to 1, so that our "what privilege
     3.1 --- a/xen/common/schedule.c	Wed Oct 08 13:42:44 2003 +0000
     3.2 +++ b/xen/common/schedule.c	Wed Oct 08 14:15:08 2003 +0000
     3.3 @@ -470,7 +470,6 @@ asmlinkage void __enter_scheduler(void)
     3.4      if ( unlikely(prev == next) )
     3.5      {
     3.6          /* We won't go through the normal tail, so do this by hand */
     3.7 -        prev->policy &= ~SCHED_YIELD;
     3.8          update_dom_time(prev->shared_info);
     3.9          return;
    3.10      }
    3.11 @@ -491,8 +490,7 @@ asmlinkage void __enter_scheduler(void)
    3.12  
    3.13      switch_to(prev, next);
    3.14      
    3.15 -    prev->policy &= ~SCHED_YIELD;
    3.16 -    if ( prev->state == TASK_DYING ) 
    3.17 +    if ( unlikely(prev->state == TASK_DYING) ) 
    3.18          put_task_struct(prev);
    3.19  
    3.20      update_dom_time(next->shared_info);
     4.1 --- a/xen/common/timer.c	Wed Oct 08 13:42:44 2003 +0000
     4.2 +++ b/xen/common/timer.c	Wed Oct 08 14:15:08 2003 +0000
     4.3 @@ -506,37 +506,6 @@ static void update_wall_time(unsigned lo
     4.4  	}
     4.5  }
     4.6  
     4.7 -static inline void do_process_times(struct task_struct *p,
     4.8 -	unsigned long user, unsigned long system)
     4.9 -{
    4.10 -}
    4.11 -
    4.12 -
    4.13 -void update_one_process(struct task_struct *p, unsigned long user,
    4.14 -			unsigned long system, int cpu)
    4.15 -{
    4.16 -	do_process_times(p, user, system);
    4.17 -}	
    4.18 -
    4.19 -/*
    4.20 - * Called from the timer interrupt handler to charge one tick to the current 
    4.21 - * process.  user_tick is 1 if the tick is user time, 0 for system.
    4.22 - */
    4.23 -void update_process_times(int user_tick)
    4.24 -{
    4.25 -    struct task_struct *p = current;
    4.26 -    int cpu = smp_processor_id(), system = user_tick ^ 1;
    4.27 -    
    4.28 -    update_one_process(p, user_tick, system, cpu);
    4.29 -    
    4.30 -    if ( --p->counter <= 0 )
    4.31 -    {
    4.32 -        p->counter = 0;
    4.33 -        set_bit(_HYP_EVENT_NEED_RESCHED, &p->hyp_events);
    4.34 -    }
    4.35 -}
    4.36 -
    4.37 -
    4.38  /* jiffies at the most recent update of wall time */
    4.39  unsigned long wall_jiffies;
    4.40  
    4.41 @@ -580,12 +549,8 @@ void timer_bh(void)
    4.42  
    4.43  void do_timer(struct pt_regs *regs)
    4.44  {
    4.45 -
    4.46      (*(unsigned long *)&jiffies)++;
    4.47  
    4.48 -    if ( !using_apic_timer )
    4.49 -        update_process_times(user_mode(regs));
    4.50 -
    4.51      mark_bh(TIMER_BH);
    4.52      if (TQ_ACTIVE(tq_timer))
    4.53          mark_bh(TQUEUE_BH);
     5.1 --- a/xen/include/asm-i386/pgalloc.h	Wed Oct 08 13:42:44 2003 +0000
     5.2 +++ b/xen/include/asm-i386/pgalloc.h	Wed Oct 08 14:15:08 2003 +0000
     5.3 @@ -70,17 +70,6 @@ static inline void flush_tlb_cpu(unsigne
     5.4          flush_tlb_others(1<<cpu);
     5.5  }
     5.6  
     5.7 -#define TLBSTATE_OK	1
     5.8 -#define TLBSTATE_LAZY	2
     5.9 -
    5.10 -struct tlb_state
    5.11 -{
    5.12 -	struct mm_struct *active_mm;
    5.13 -	int state;
    5.14 -};
    5.15 -extern struct tlb_state cpu_tlbstate[NR_CPUS];
    5.16 -
    5.17 -
    5.18  #endif
    5.19  
    5.20  static inline void flush_tlb_pgtables(struct mm_struct *mm,
     6.1 --- a/xen/include/xeno/sched.h	Wed Oct 08 13:42:44 2003 +0000
     6.2 +++ b/xen/include/xeno/sched.h	Wed Oct 08 14:15:08 2003 +0000
     6.3 @@ -26,7 +26,6 @@ extern rwlock_t tasklist_lock;
     6.4  #include <xeno/spinlock.h>
     6.5  
     6.6  struct mm_struct {
     6.7 -    unsigned long cpu_vm_mask;
     6.8      /*
     6.9       * Every domain has a L1 pagetable of its own. Per-domain mappings
    6.10       * are put in this table (eg. the current GDT is mapped here).
    6.11 @@ -48,7 +47,6 @@ struct mm_struct {
    6.12  extern struct mm_struct init_mm;
    6.13  #define IDLE0_MM                                                    \
    6.14  {                                                                   \
    6.15 -    cpu_vm_mask: 0,                                                 \
    6.16      perdomain_pt: 0,                                                \
    6.17      pagetable:   mk_pagetable(__pa(idle_pg_table))                  \
    6.18  }
    6.19 @@ -105,13 +103,9 @@ struct task_struct
    6.20      unsigned int max_pages;     /* max number of pages that can be possesed */
    6.21  
    6.22      /* scheduling */
    6.23 -    struct list_head run_list;      /* the run list  */
    6.24 +    struct list_head run_list;
    6.25      int              has_cpu;
    6.26 -    int              policy;
    6.27 -    int              counter;
    6.28      
    6.29 -    struct ac_timer blt;            /* blocked timeout */
    6.30 -
    6.31      s_time_t lastschd;              /* time this domain was last scheduled */
    6.32      s_time_t cpu_time;              /* total CPU time received till now */
    6.33      s_time_t wokenup;               /* time domain got woken up */
    6.34 @@ -154,12 +148,6 @@ struct task_struct
    6.35  
    6.36      char name[MAX_DOMAIN_NAME];
    6.37  
    6.38 -    /*
    6.39 -     * active_mm stays for now. It's entangled in the tricky TLB flushing
    6.40 -     * stuff which I haven't addressed yet. It stays until I'm man enough
    6.41 -     * to venture in.
    6.42 -     */
    6.43 -    struct mm_struct *active_mm;
    6.44      struct thread_struct thread;
    6.45      struct task_struct *prev_task, *next_task, *next_hash;
    6.46      
    6.47 @@ -190,8 +178,6 @@ struct task_struct
    6.48  #define TASK_SUSPENDED          8
    6.49  #define TASK_DYING              16
    6.50  
    6.51 -#define SCHED_YIELD             0x10
    6.52 -
    6.53  #include <asm/uaccess.h> /* for KERNEL_DS */
    6.54  
    6.55  #define IDLE0_TASK(_t)           \
    6.56 @@ -204,7 +190,6 @@ struct task_struct
    6.57      avt:         0xffffffff,     \
    6.58      mm:          IDLE0_MM,       \
    6.59      addr_limit:  KERNEL_DS,      \
    6.60 -    active_mm:   &idle0_task.mm, \
    6.61      thread:      INIT_THREAD,    \
    6.62      prev_task:   &(_t),          \
    6.63      next_task:   &(_t)           \