ia64/xen-unstable

changeset 3532:51052c8b6456

bitkeeper revision 1.1159.212.38 (41f6537aX7dfqsdH6-jWzX24faBDtQ)

manual merge.
author kaf24@scramble.cl.cam.ac.uk
date Tue Jan 25 14:11:06 2005 +0000 (2005-01-25)
parents e90c7778ab94 dee91b44a753
children c6f1bab39d4f
files Makefile tools/Makefile xen/Makefile xen/Rules.mk xen/arch/x86/Makefile xen/arch/x86/domain.c xen/arch/x86/mpparse.c xen/arch/x86/pci-pc.c xen/arch/x86/pdb-stub.c xen/common/physdev.c xen/common/sched_atropos.c xen/common/sched_bvt.c xen/common/sched_rrobin.c xen/common/slab.c xen/drivers/pci/pci.c xen/include/xen/list.h xen/include/xen/pci.h
line diff
     1.1 --- a/Makefile	Tue Jan 25 13:05:49 2005 +0000
     1.2 +++ b/Makefile	Tue Jan 25 14:11:06 2005 +0000
     1.3 @@ -10,7 +10,7 @@ INSTALL_DIR	:= $(INSTALL) -d -m0755
     1.4  INSTALL_DATA	:= $(INSTALL) -m0644
     1.5  INSTALL_PROG	:= $(INSTALL) -m0755
     1.6  
     1.7 -KERNELS ?= linux-2.6-xen0 linux-2.6-xenU
     1.8 +KERNELS ?= linux-2.6-xen0 linux-2.6-xenU  linux-2.4-xen0 linux-2.4-xenU netbsd-2.0-xenU
     1.9  # linux-2.4-xen0 linux-2.4-xenU netbsd-2.0-xenU
    1.10  # You may use wildcards in the above e.g. KERNELS=*2.4*
    1.11  
     3.1 --- a/xen/Makefile	Tue Jan 25 13:05:49 2005 +0000
     3.2 +++ b/xen/Makefile	Tue Jan 25 14:11:06 2005 +0000
     3.3 @@ -49,9 +49,9 @@ clean:
     3.4  	$(MAKE) -C drivers
     3.5  	$(MAKE) -C arch/$(TARGET_ARCH)
     3.6  
     3.7 -# Blow away kernel.o because build info is stored statically within it.
     3.8  delete-unfresh-files:
     3.9 -	rm -f include/xen/banner.h include/xen/compile.h common/kernel.o
    3.10 +	rm -f include/xen/banner.h include/xen/compile.h
    3.11 +	$(MAKE) -C arch/$(TARGET_ARCH) delete-unfresh-files
    3.12  
    3.13  # compile.h contains dynamic build info. Rebuilt on every 'make' invocation.
    3.14  include/xen/compile.h: LANG=C
     4.1 --- a/xen/Rules.mk	Tue Jan 25 13:05:49 2005 +0000
     4.2 +++ b/xen/Rules.mk	Tue Jan 25 14:11:06 2005 +0000
     4.3 @@ -22,6 +22,7 @@ HDRS    += $(wildcard $(BASEDIR)/include
     4.4  HDRS    += $(wildcard $(BASEDIR)/include/asm-$(TARGET_ARCH)/*.h)
     4.5  HDRS    += $(wildcard $(BASEDIR)/include/asm-$(TARGET_ARCH)/$(TARGET_SUBARCH)/*.h)
     4.6  # compile.h is always regenerated, but other files shouldn't be rebuilt
     4.7 +HDRS    := $(subst $(BASEDIR)/include/xen/banner.h,,$(HDRS))
     4.8  HDRS    := $(subst $(BASEDIR)/include/xen/compile.h,,$(HDRS))
     4.9  
    4.10  C_SRCS  := $(wildcard *.c)
     5.1 --- a/xen/arch/x86/Makefile	Tue Jan 25 13:05:49 2005 +0000
     5.2 +++ b/xen/arch/x86/Makefile	Tue Jan 25 14:11:06 2005 +0000
     5.3 @@ -41,4 +41,8 @@ clean:
     5.4  	rm -f x86_32/*.o x86_32/*~ x86_32/core
     5.5  	rm -f x86_64/*.o x86_64/*~ x86_64/core
     5.6  
     5.7 -.PHONY: default clean
     5.8 +# setup.o contains bits of compile.h so it must be blown away
     5.9 +delete-unfresh-files:
    5.10 +	rm -f setup.o
    5.11 +
    5.12 +.PHONY: default clean delete-unfresh-files
     6.1 --- a/xen/arch/x86/domain.c	Tue Jan 25 13:05:49 2005 +0000
     6.2 +++ b/xen/arch/x86/domain.c	Tue Jan 25 14:11:06 2005 +0000
     6.3 @@ -206,13 +206,11 @@ void machine_halt(void)
     6.4  void dump_pageframe_info(struct domain *d)
     6.5  {
     6.6      struct pfn_info *page;
     6.7 -    struct list_head *ent;
     6.8  
     6.9      if ( d->tot_pages < 10 )
    6.10      {
    6.11 -        list_for_each ( ent, &d->page_list )
    6.12 +        list_for_each_entry ( page, &d->page_list, list )
    6.13          {
    6.14 -            page = list_entry(ent, struct pfn_info, list);
    6.15              printk("Page %08x: caf=%08x, taf=%08x\n",
    6.16                     page_to_phys(page), page->count_info,
    6.17                     page->u.inuse.type_info);
     7.1 --- a/xen/arch/x86/mpparse.c	Tue Jan 25 13:05:49 2005 +0000
     7.2 +++ b/xen/arch/x86/mpparse.c	Tue Jan 25 14:11:06 2005 +0000
     7.3 @@ -1232,7 +1232,6 @@ void __init mp_config_acpi_legacy_irqs (
     7.4  
     7.5  void __init mp_parse_prt (void)
     7.6  {
     7.7 -	struct list_head	*node = NULL;
     7.8  	struct acpi_prt_entry	*entry = NULL;
     7.9  	int			ioapic = -1;
    7.10  	int			ioapic_pin = 0;
    7.11 @@ -1245,9 +1244,7 @@ void __init mp_parse_prt (void)
    7.12  	 * Parsing through the PCI Interrupt Routing Table (PRT) and program
    7.13  	 * routing for all entries.
    7.14  	 */
    7.15 -	list_for_each(node, &acpi_prt.entries) {
    7.16 -		entry = list_entry(node, struct acpi_prt_entry, node);
    7.17 -
    7.18 +	list_for_each_entry(entry, &acpi_prt.entries, node) {
    7.19  		/* Need to get irq for dynamic entry */
    7.20  		if (entry->link.handle) {
    7.21  			irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index, &edge_level, &active_high_low);
     8.1 --- a/xen/arch/x86/pci-pc.c	Tue Jan 25 13:05:49 2005 +0000
     8.2 +++ b/xen/arch/x86/pci-pc.c	Tue Jan 25 14:11:06 2005 +0000
     8.3 @@ -1372,11 +1372,9 @@ void __devinit  pcibios_fixup_bus(struct
     8.4  
     8.5  struct pci_bus * __devinit pcibios_scan_root(int busnum)
     8.6  {
     8.7 -	struct list_head *list;
     8.8  	struct pci_bus *bus;
     8.9  
    8.10 -	list_for_each(list, &pci_root_buses) {
    8.11 -		bus = pci_bus_b(list);
    8.12 +	pci_for_each_bus(bus) {
    8.13  		if (bus->number == busnum) {
    8.14  			/* Already scanned */
    8.15  			return bus;
     9.1 --- a/xen/arch/x86/pdb-stub.c	Tue Jan 25 13:05:49 2005 +0000
     9.2 +++ b/xen/arch/x86/pdb-stub.c	Tue Jan 25 14:11:06 2005 +0000
     9.3 @@ -778,12 +778,10 @@ void pdb_bkpt_add (unsigned long cr3, un
     9.4  struct pdb_breakpoint* pdb_bkpt_search (unsigned long cr3, 
     9.5  					unsigned long address)
     9.6  {
     9.7 -    struct list_head *list_entry;
     9.8      struct pdb_breakpoint *bkpt;
     9.9  
    9.10 -    list_for_each(list_entry, &breakpoints.list)
    9.11 +    list_for_each_entry ( bkpt, &breakpoints.list, list )
    9.12      {
    9.13 -        bkpt = list_entry(list_entry, struct pdb_breakpoint, list);
    9.14  	if ( bkpt->cr3 == cr3 && bkpt->address == address )
    9.15              return bkpt;
    9.16      }
    9.17 @@ -797,12 +795,10 @@ struct pdb_breakpoint* pdb_bkpt_search (
    9.18   */
    9.19  int pdb_bkpt_remove (unsigned long cr3, unsigned long address)
    9.20  {
    9.21 -    struct list_head *list_entry;
    9.22      struct pdb_breakpoint *bkpt;
    9.23  
    9.24 -    list_for_each(list_entry, &breakpoints.list)
    9.25 +    list_for_each_entry ( bkpt, &breakpoints.list, list )
    9.26      {
    9.27 -        bkpt = list_entry(list_entry, struct pdb_breakpoint, list);
    9.28  	if ( bkpt->cr3 == cr3 && bkpt->address == address )
    9.29  	{
    9.30              list_del(&bkpt->list);
    10.1 --- a/xen/common/physdev.c	Tue Jan 25 13:05:49 2005 +0000
    10.2 +++ b/xen/common/physdev.c	Tue Jan 25 14:11:06 2005 +0000
    10.3 @@ -73,11 +73,9 @@ typedef struct _phys_dev_st {
    10.4  static phys_dev_t *find_pdev(struct domain *p, struct pci_dev *dev)
    10.5  {
    10.6      phys_dev_t *t, *res = NULL;
    10.7 -    struct list_head *tmp;
    10.8  
    10.9 -    list_for_each(tmp, &p->pcidev_list)
   10.10 +    list_for_each_entry ( t, &p->pcidev_list, node )
   10.11      {
   10.12 -        t = list_entry(tmp,  phys_dev_t, node);
   10.13          if ( dev == t->dev )
   10.14          {
   10.15              res = t;
   10.16 @@ -230,17 +228,16 @@ int physdev_pci_access_modify(
   10.17  int domain_iomem_in_pfn(struct domain *p, unsigned long pfn)
   10.18  {
   10.19      int ret = 0;
   10.20 -    struct list_head *l;
   10.21 +    phys_dev_t *phys_dev;
   10.22  
   10.23      VERBOSE_INFO("Checking if physdev-capable domain %u needs access to "
   10.24                   "pfn %08lx\n", p->id, pfn);
   10.25      
   10.26      spin_lock(&p->pcidev_lock);
   10.27  
   10.28 -    list_for_each(l, &p->pcidev_list)
   10.29 +    list_for_each_entry ( phys_dev, &p->pcidev_list, node )
   10.30      {
   10.31          int i;
   10.32 -        phys_dev_t *phys_dev = list_entry(l, phys_dev_t, node);
   10.33          struct pci_dev *pci_dev = phys_dev->dev;
   10.34  
   10.35          for ( i = 0; (i < DEVICE_COUNT_RESOURCE) && (ret == 0); i++ )
   10.36 @@ -635,15 +632,11 @@ static long pci_cfgreg_write(int bus, in
   10.37  static long pci_probe_root_buses(u32 *busmask)
   10.38  {
   10.39      phys_dev_t *pdev;
   10.40 -    struct list_head *tmp;
   10.41  
   10.42      memset(busmask, 0, 256/8);
   10.43  
   10.44 -    list_for_each ( tmp, &current->domain->pcidev_list )
   10.45 -    {
   10.46 -        pdev = list_entry(tmp, phys_dev_t, node);
   10.47 +    list_for_each_entry ( pdev, &current->pcidev_list, node )
   10.48          set_bit(pdev->dev->bus->number, busmask);
   10.49 -    }
   10.50  
   10.51      return 0;
   10.52  }
    11.1 --- a/xen/common/sched_atropos.c	Tue Jan 25 13:05:49 2005 +0000
    11.2 +++ b/xen/common/sched_atropos.c	Tue Jan 25 14:11:06 2005 +0000
    11.3 @@ -1,6 +1,6 @@
    11.4  /*
    11.5 - *	atropos.c
    11.6 - *	---------
    11.7 + * atropos.c
    11.8 + * ---------
    11.9   *
   11.10   * Copyright (c) 1994 University of Cambridge Computer Laboratory.
   11.11   * This is part of Nemesis; consult your contract for terms and conditions.
   11.12 @@ -98,8 +98,9 @@ static inline int __task_on_runqueue(str
   11.13  static int q_len(struct list_head *q) 
   11.14  {
   11.15      int i = 0;
   11.16 -    struct list_head *tmp;
   11.17 -    list_for_each(tmp, q) i++;
   11.18 +    struct at_dom_info *tmp;
   11.19 +    list_for_each_entry ( tmp, q, waitq )
   11.20 +        i++;
   11.21      return i;
   11.22  }
   11.23  
   11.24 @@ -129,60 +130,39 @@ static inline struct domain *waitq_el(st
   11.25   */
   11.26  static void requeue(struct domain *sdom)
   11.27  {
   11.28 -    struct at_dom_info *inf = DOM_INFO(sdom);
   11.29 -    struct list_head *prev;
   11.30 -    struct list_head *next;
   11.31 +    struct at_dom_info *i, *inf = DOM_INFO(sdom);
   11.32  
   11.33 -
   11.34 -    if(!domain_runnable(sdom)) return;
   11.35 +    if ( !domain_runnable(sdom) )
   11.36 +        return;
   11.37      
   11.38 -    if(inf->state == ATROPOS_TASK_WAIT ||
   11.39 -        inf->state == ATROPOS_TASK_UNBLOCKED)
   11.40 +    if ( (inf->state == ATROPOS_TASK_WAIT) ||
   11.41 +         (inf->state == ATROPOS_TASK_UNBLOCKED) )
   11.42      {
   11.43 -        prev = WAITQ(sdom->processor);
   11.44 -
   11.45 -        list_for_each(next, WAITQ(sdom->processor))
   11.46 +        list_for_each_entry ( i, WAITQ(sdom->processor), waitq )
   11.47          {
   11.48 -            struct at_dom_info *i = 
   11.49 -                list_entry(next, struct at_dom_info, waitq);
   11.50              if ( i->deadline > inf->deadline )
   11.51              {
   11.52 -                __list_add(&inf->waitq, prev, next);
   11.53 +                __list_add(&inf->waitq, i->waitq.prev, &i->waitq);
   11.54                  break;
   11.55              }
   11.56 -
   11.57 -            prev = next;
   11.58          }
   11.59  
   11.60 -        /* put the domain on the end of the list if it hasn't been put
   11.61 -         * elsewhere */
   11.62 -        if ( next == WAITQ(sdom->processor) )
   11.63 +        if ( &i->waitq == WAITQ(sdom->processor) )
   11.64              list_add_tail(&inf->waitq, WAITQ(sdom->processor));
   11.65      }
   11.66      else if ( domain_runnable(sdom) )
   11.67      {
   11.68 -        /* insert into ordered run queue */
   11.69 -        
   11.70 -        prev = RUNQ(sdom->processor);
   11.71 -
   11.72 -        list_for_each(next, RUNQ(sdom->processor))
   11.73 +        list_for_each_entry ( i, RUNQ(sdom->processor), run_list )
   11.74          {
   11.75 -            struct at_dom_info *p = list_entry(next, struct at_dom_info,
   11.76 -                                               run_list);
   11.77 -
   11.78 -            if( p->deadline > inf->deadline || is_idle_task(p->owner) )
   11.79 +            if ( (i->deadline > inf->deadline) || is_idle_task(i->owner) )
   11.80              {
   11.81 -                __list_add(&inf->run_list, prev, next);
   11.82 +                __list_add(&inf->run_list, i->run_list.prev, &i->run_list);
   11.83                  break;
   11.84              }
   11.85 -
   11.86 -            prev = next;
   11.87          }
   11.88  
   11.89 -        if ( next == RUNQ(sdom->processor) )
   11.90 +        if ( &i->waitq == RUNQ(sdom->processor) )
   11.91              list_add_tail(&inf->run_list, RUNQ(sdom->processor));
   11.92 -        
   11.93 -    
   11.94      }
   11.95      /* silently ignore tasks in other states like BLOCKED, DYING, STOPPED, etc
   11.96       * - they shouldn't be on any queue */
   11.97 @@ -194,7 +174,7 @@ static int at_alloc_task(struct domain *
   11.98      ASSERT(p != NULL);
   11.99      
  11.100      p->sched_priv = xmem_cache_alloc(dom_info_cache);
  11.101 -    if( p->sched_priv == NULL )
  11.102 +    if ( p->sched_priv == NULL )
  11.103          return -1;
  11.104      
  11.105      return 0;
  11.106 @@ -294,26 +274,26 @@ static void unblock(struct domain *sdom)
  11.107      {
  11.108          /* Long blocking case */
  11.109  
  11.110 -	    /* The sdom has passed its deadline since it was blocked. 
  11.111 -	       Give it its new deadline based on the latency value. */
  11.112 -	    inf->prevddln = time;
  11.113 +        /* The sdom has passed its deadline since it was blocked. 
  11.114 +           Give it its new deadline based on the latency value. */
  11.115 +        inf->prevddln = time;
  11.116  
  11.117          /* Scale the scheduling parameters as requested by the latency hint. */
  11.118 -	    inf->deadline = time + inf->latency;
  11.119 +        inf->deadline = time + inf->latency;
  11.120          inf->slice = inf->nat_slice / ( inf->nat_period / inf->latency );
  11.121          inf->period = inf->latency;
  11.122 -	    inf->remain = inf->slice;
  11.123 +        inf->remain = inf->slice;
  11.124      }
  11.125      else 
  11.126      {
  11.127          /* Short blocking case */
  11.128  
  11.129 -	    /* We leave REMAIN intact, but put this domain on the WAIT
  11.130 -	        queue marked as recently unblocked.  It will be given
  11.131 -	        priority over other domains on the wait queue until while
  11.132 -	        REMAIN>0 in a generous attempt to help it make up for its
  11.133 -	        own foolishness. */
  11.134 -	    if(inf->remain > 0)
  11.135 +        /* We leave REMAIN intact, but put this domain on the WAIT
  11.136 +           queue marked as recently unblocked.  It will be given
  11.137 +           priority over other domains on the wait queue until while
  11.138 +           REMAIN>0 in a generous attempt to help it make up for its
  11.139 +           own foolishness. */
  11.140 +        if(inf->remain > 0)
  11.141              inf->state = ATROPOS_TASK_UNBLOCKED;
  11.142          else
  11.143              inf->state = ATROPOS_TASK_WAIT;
  11.144 @@ -349,10 +329,10 @@ static void block(struct domain* sdom)
  11.145   */
  11.146  task_slice_t ksched_scheduler(s_time_t time)
  11.147  {
  11.148 -    struct domain	*cur_sdom = current;  /* Current sdom           */
  11.149 -    s_time_t     newtime;
  11.150 -    s_time_t      ranfor;	        /* How long the domain ran      */
  11.151 -    struct domain	*sdom;	        /* tmp. scheduling domain	*/
  11.152 +    struct domain *cur_sdom = current;  /* Current sdom           */
  11.153 +    s_time_t       newtime;
  11.154 +    s_time_t       ranfor;              /* How long the domain ran      */
  11.155 +    struct domain *sdom;                /* tmp. scheduling domain       */
  11.156      int cpu = cur_sdom->processor;      /* current CPU                  */
  11.157      struct at_dom_info *cur_info;
  11.158      static unsigned long waitq_rrobin = 0;
  11.159 @@ -367,7 +347,7 @@ task_slice_t ksched_scheduler(s_time_t t
  11.160      /* If we were spinning in the idle loop, there is no current
  11.161       * domain to deschedule. */
  11.162      if (is_idle_task(cur_sdom))
  11.163 -	goto deschedule_done;
  11.164 +        goto deschedule_done;
  11.165  
  11.166      /*****************************
  11.167       * 
  11.168 @@ -375,7 +355,7 @@ task_slice_t ksched_scheduler(s_time_t t
  11.169       *
  11.170       ****************************/
  11.171  
  11.172 -   /* Record the time the domain was preempted and for how long it
  11.173 +    /* Record the time the domain was preempted and for how long it
  11.174         ran.  Work out if the domain is going to be blocked to save
  11.175         some pointless queue shuffling */
  11.176      cur_sdom->lastdeschd = time;
  11.177 @@ -388,26 +368,26 @@ task_slice_t ksched_scheduler(s_time_t t
  11.178           (cur_info->state == ATROPOS_TASK_UNBLOCKED) )
  11.179      {
  11.180  
  11.181 -	    /* In this block, we are doing accounting for an sdom which has 
  11.182 -	        been running in contracted time.  Note that this could now happen
  11.183 -	        even if the domain is on the wait queue (i.e. if it blocked) */
  11.184 +        /* In this block, we are doing accounting for an sdom which has 
  11.185 +           been running in contracted time.  Note that this could now happen
  11.186 +           even if the domain is on the wait queue (i.e. if it blocked) */
  11.187  
  11.188 -	    /* Deduct guaranteed time from the domain */
  11.189 -	    cur_info->remain  -= ranfor;
  11.190 +        /* Deduct guaranteed time from the domain */
  11.191 +        cur_info->remain  -= ranfor;
  11.192  
  11.193 -	    /* If guaranteed time has run out... */
  11.194 -	    if ( cur_info->remain <= 0 )
  11.195 +        /* If guaranteed time has run out... */
  11.196 +        if ( cur_info->remain <= 0 )
  11.197          {
  11.198 -	        /* Move domain to correct position in WAIT queue */
  11.199 +            /* Move domain to correct position in WAIT queue */
  11.200              /* XXX sdom_unblocked doesn't need this since it is 
  11.201                 already in the correct place. */
  11.202 -	        cur_info->state = ATROPOS_TASK_WAIT;
  11.203 -	    }
  11.204 +            cur_info->state = ATROPOS_TASK_WAIT;
  11.205 +        }
  11.206      }
  11.207  
  11.208      requeue(cur_sdom);
  11.209  
  11.210 -deschedule_done:
  11.211 + deschedule_done:
  11.212      /*****************************
  11.213       * 
  11.214       * We have now successfully descheduled the current sdom.
  11.215 @@ -424,10 +404,10 @@ deschedule_done:
  11.216       ****************************/
  11.217      
  11.218      while(!list_empty(WAITQ(cpu)) && 
  11.219 -	    DOM_INFO(sdom = waitq_el(WAITQ(cpu)->next))->deadline <= time ) 
  11.220 +          DOM_INFO(sdom = waitq_el(WAITQ(cpu)->next))->deadline <= time ) 
  11.221      {
  11.222  
  11.223 -	    struct at_dom_info *inf = DOM_INFO(sdom);
  11.224 +        struct at_dom_info *inf = DOM_INFO(sdom);
  11.225          dequeue(sdom);
  11.226          
  11.227          if ( inf->period != inf->nat_period )
  11.228 @@ -444,22 +424,22 @@ deschedule_done:
  11.229              }
  11.230          }
  11.231  
  11.232 -	    /* Domain begins a new period and receives a slice of CPU 
  11.233 -	     * If this domain has been blocking then throw away the
  11.234 -	     * rest of it's remain - it can't be trusted */
  11.235 -	    if (inf->remain > 0) 
  11.236 -	        inf->remain = inf->slice;
  11.237 +        /* Domain begins a new period and receives a slice of CPU 
  11.238 +         * If this domain has been blocking then throw away the
  11.239 +         * rest of it's remain - it can't be trusted */
  11.240 +        if (inf->remain > 0) 
  11.241 +            inf->remain = inf->slice;
  11.242          else 
  11.243 -	        inf->remain += inf->slice;
  11.244 +            inf->remain += inf->slice;
  11.245  
  11.246 -	    inf->prevddln = inf->deadline;
  11.247 -	    inf->deadline += inf->period;
  11.248 +        inf->prevddln = inf->deadline;
  11.249 +        inf->deadline += inf->period;
  11.250  
  11.251          if ( inf->remain <= 0 )
  11.252              inf->state = ATROPOS_TASK_WAIT;
  11.253  
  11.254 -	    /* Place on the appropriate queue */
  11.255 -	    requeue(sdom);
  11.256 +        /* Place on the appropriate queue */
  11.257 +        requeue(sdom);
  11.258      }
  11.259  
  11.260      /*****************************
  11.261 @@ -484,30 +464,27 @@ deschedule_done:
  11.262       * queue */
  11.263      if (cur_sdom->id == IDLE_DOMAIN_ID && !list_empty(WAITQ(cpu)))
  11.264      {
  11.265 -        struct list_head *item;
  11.266 +        struct at_dom_info *inf;
  11.267 +
  11.268 +        /* Try running a domain on the WAIT queue - this part of the
  11.269 +           scheduler isn't particularly efficient but then again, we
  11.270 +           don't have any guaranteed domains to worry about. */
  11.271  
  11.272 -	    /* Try running a domain on the WAIT queue - this part of the
  11.273 -	        scheduler isn't particularly efficient but then again, we
  11.274 -	        don't have any guaranteed domains to worry about. */
  11.275 -	
  11.276 -	    /* See if there are any unblocked domains on the WAIT
  11.277 -	        queue who we can give preferential treatment to. */
  11.278 +        /* See if there are any unblocked domains on the WAIT
  11.279 +           queue who we can give preferential treatment to. */
  11.280          
  11.281 -        list_for_each(item, WAITQ(cpu))
  11.282 +        list_for_each_entry ( inf, WAITQ(cpu), waitq )
  11.283          {
  11.284 -            struct at_dom_info *inf =
  11.285 -                list_entry(item, struct at_dom_info, waitq);
  11.286 -
  11.287              sdom = inf->owner;
  11.288              
  11.289 -	        if (inf->state == ATROPOS_TASK_UNBLOCKED) 
  11.290 +            if (inf->state == ATROPOS_TASK_UNBLOCKED) 
  11.291              { 
  11.292 -		        cur_sdom = sdom;
  11.293 -    		    cur_info  = inf;
  11.294 -	    	    newtime  = time + inf->remain;
  11.295 -		        goto found;
  11.296 -	        }
  11.297 -	    }
  11.298 +                cur_sdom = sdom;
  11.299 +                cur_info  = inf;
  11.300 +                newtime  = time + inf->remain;
  11.301 +                goto found;
  11.302 +            }
  11.303 +        }
  11.304  
  11.305          /* init values needed to approximate round-robin for slack time */
  11.306          i = 0;
  11.307 @@ -515,14 +492,11 @@ deschedule_done:
  11.308              waitq_rrobin = 0;
  11.309          
  11.310          
  11.311 -	    /* Last chance: pick a domain on the wait queue with the XTRA
  11.312 -	        flag set.  The NEXT_OPTM field is used to cheaply achieve
  11.313 -	        an approximation of round-robin order */
  11.314 -        list_for_each(item, WAITQ(cpu))
  11.315 +        /* Last chance: pick a domain on the wait queue with the XTRA
  11.316 +           flag set.  The NEXT_OPTM field is used to cheaply achieve
  11.317 +           an approximation of round-robin order */
  11.318 +        list_for_each_entry ( inf, WAITQ(cpu), waitq )
  11.319          {
  11.320 -            struct at_dom_info *inf =
  11.321 -                list_entry(item, struct at_dom_info, waitq);
  11.322 -            
  11.323              sdom = inf->owner;
  11.324              
  11.325              if (inf->xtratime && i >= waitq_rrobin) 
  11.326 @@ -538,7 +512,7 @@ deschedule_done:
  11.327          }
  11.328      }
  11.329  
  11.330 -    found:
  11.331 + found:
  11.332      /**********************
  11.333       * 
  11.334       * We now have to work out the time when we next need to
  11.335 @@ -554,7 +528,7 @@ deschedule_done:
  11.336      /* exhausted its time, cut short the time allocation */
  11.337      if (!list_empty(WAITQ(cpu)))
  11.338      {
  11.339 -	    newtime = MIN(newtime,
  11.340 +        newtime = MIN(newtime,
  11.341                        DOM_INFO(waitq_el(WAITQ(cpu)->next))->deadline);
  11.342      }
  11.343  
  11.344 @@ -603,44 +577,44 @@ static void at_dump_runq_el(struct domai
  11.345  /* dump relevant per-cpu state for a run queue dump */
  11.346  static void at_dump_cpu_state(int cpu)
  11.347  {
  11.348 -    struct list_head *list, *queue;
  11.349 +    struct list_head *queue;
  11.350      int loop = 0;
  11.351      struct at_dom_info *d_inf;
  11.352      struct domain *d;
  11.353  
  11.354      queue = RUNQ(cpu);
  11.355      printk("\nRUNQUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
  11.356 -    (unsigned long) queue->next, (unsigned long) queue->prev);
  11.357 +           (unsigned long) queue->next, (unsigned long) queue->prev);
  11.358  
  11.359 -    list_for_each ( list, queue )
  11.360 +    list_for_each_entry ( d_inf, queue, run_list )
  11.361      {
  11.362 -        d_inf = list_entry(list, struct at_dom_info, run_list);
  11.363          d = d_inf->owner;
  11.364          printk("%3d: %d has=%c ", loop++, d->id, 
  11.365 -                                    test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
  11.366 +               test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
  11.367          at_dump_runq_el(d);
  11.368          printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
  11.369          printk("         l: %lx n: %lx  p: %lx\n",
  11.370 -                        (unsigned long)list, (unsigned long)list->next,
  11.371 -                        (unsigned long)list->prev);
  11.372 +               (unsigned long)&d_inf->run_list,
  11.373 +               (unsigned long)d_inf->run_list.next,
  11.374 +               (unsigned long)d_inf->run_list.prev);
  11.375      }
  11.376  
  11.377  
  11.378      queue = WAITQ(cpu);
  11.379      printk("\nWAITQUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
  11.380 -    (unsigned long) queue->next, (unsigned long) queue->prev);
  11.381 +           (unsigned long) queue->next, (unsigned long) queue->prev);
  11.382  
  11.383 -    list_for_each ( list, queue )
  11.384 +    list_for_each_entry ( d_inf, queue, waitq )
  11.385      {
  11.386 -        d_inf = list_entry(list, struct at_dom_info, waitq);
  11.387          d = d_inf->owner;
  11.388          printk("%3d: %d has=%c ", loop++, d->id, 
  11.389 -                                    test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
  11.390 +               test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
  11.391          at_dump_runq_el(d);
  11.392          printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
  11.393          printk("         l: %lx n: %lx  p: %lx\n",
  11.394 -                        (unsigned long)list, (unsigned long)list->next,
  11.395 -                        (unsigned long)list->prev);
  11.396 +               (unsigned long)&d_inf->waitq,
  11.397 +               (unsigned long)d_inf->waitq.next,
  11.398 +               (unsigned long)d_inf->waitq.prev);
  11.399      }
  11.400         
  11.401  }
    13.1 --- a/xen/common/sched_rrobin.c	Tue Jan 25 13:05:49 2005 +0000
    13.2 +++ b/xen/common/sched_rrobin.c	Tue Jan 25 14:11:06 2005 +0000
    13.3 @@ -187,7 +187,7 @@ static void rr_dump_domain(struct domain
    13.4  
    13.5  static void rr_dump_cpu_state(int i)
    13.6  {
    13.7 -    struct list_head *list, *queue;
    13.8 +    struct list_head *queue;
    13.9      int loop = 0;
   13.10      struct rrobin_dom_info *d_inf;
   13.11  
   13.12 @@ -199,10 +199,9 @@ static void rr_dump_cpu_state(int i)
   13.13      d_inf = list_entry(queue, struct rrobin_dom_info, run_list);
   13.14      rr_dump_domain(d_inf->domain);
   13.15   
   13.16 -    list_for_each ( list, queue )
   13.17 +    list_for_each_entry ( d_inf, queue, run_list )
   13.18      {
   13.19          printk("%3d: ",loop++);
   13.20 -        d_inf = list_entry(list, struct rrobin_dom_info, run_list);
   13.21          rr_dump_domain(d_inf->domain);
   13.22      }
   13.23  }
    14.1 --- a/xen/common/slab.c	Tue Jan 25 13:05:49 2005 +0000
    14.2 +++ b/xen/common/slab.c	Tue Jan 25 14:11:06 2005 +0000
    14.3 @@ -774,11 +774,9 @@ xmem_cache_create (const char *name, siz
    14.4      /* Need the semaphore to access the chain. */
    14.5      down(&cache_chain_sem);
    14.6      {
    14.7 -        struct list_head *p;
    14.8 +	xmem_cache_t *pc;
    14.9  
   14.10 -        list_for_each(p, &cache_chain) {
   14.11 -            xmem_cache_t *pc = list_entry(p, xmem_cache_t, next);
   14.12 -
   14.13 +        list_for_each_entry(pc, &cache_chain, next) {
   14.14              /* The name field is constant - no lock needed. */
   14.15              if (!strcmp(pc->name, name))
   14.16                  BUG();
   14.17 @@ -802,14 +800,14 @@ xmem_cache_create (const char *name, siz
   14.18   */
   14.19  static int is_chained_xmem_cache(xmem_cache_t * cachep)
   14.20  {
   14.21 -    struct list_head *p;
   14.22 +    xmem_cache_t *pc;
   14.23      int ret = 0;
   14.24      unsigned long spin_flags;
   14.25  
   14.26      /* Find the cache in the chain of caches. */
   14.27      down(&cache_chain_sem);
   14.28 -    list_for_each(p, &cache_chain) {
   14.29 -        if (p == &cachep->next) {
   14.30 +    list_for_each_entry(pc, &cache_chain, next) {
   14.31 +        if (pc == &cachep) {
   14.32              ret = 1;
   14.33              break;
   14.34          }
   14.35 @@ -1765,7 +1763,6 @@ void dump_slabinfo()
   14.36      p = &cache_cache.next;
   14.37      do {
   14.38          xmem_cache_t	*cachep;
   14.39 -        struct list_head *q;
   14.40          slab_t		*slabp;
   14.41          unsigned long	active_objs;
   14.42          unsigned long	num_objs;
   14.43 @@ -1776,22 +1773,19 @@ void dump_slabinfo()
   14.44          spin_lock_irq(&cachep->spinlock);
   14.45          active_objs = 0;
   14.46          num_slabs = 0;
   14.47 -        list_for_each(q,&cachep->slabs_full) {
   14.48 -            slabp = list_entry(q, slab_t, list);
   14.49 +        list_for_each_entry(slabp, &cachep->slabs_full, list) {
   14.50              if (slabp->inuse != cachep->num)
   14.51                  BUG();
   14.52              active_objs += cachep->num;
   14.53              active_slabs++;
   14.54          }
   14.55 -        list_for_each(q,&cachep->slabs_partial) {
   14.56 -            slabp = list_entry(q, slab_t, list);
   14.57 +        list_for_each_entry(slabp, &cachep->slabs_partial, list) {
   14.58              if (slabp->inuse == cachep->num || !slabp->inuse)
   14.59                  BUG();
   14.60              active_objs += slabp->inuse;
   14.61              active_slabs++;
   14.62          }
   14.63 -        list_for_each(q,&cachep->slabs_free) {
   14.64 -            slabp = list_entry(q, slab_t, list);
   14.65 +        list_for_each_entry(slabp, &cachep->slabs_free, list) {
   14.66              if (slabp->inuse)
   14.67                  BUG();
   14.68              num_slabs++;
    15.1 --- a/xen/drivers/pci/pci.c	Tue Jan 25 13:05:49 2005 +0000
    15.2 +++ b/xen/drivers/pci/pci.c	Tue Jan 25 14:11:06 2005 +0000
    15.3 @@ -1565,15 +1565,15 @@ static int pci_pm_resume_device(struct p
    15.4  
    15.5  static int pci_pm_save_state_bus(struct pci_bus *bus, u32 state)
    15.6  {
    15.7 -	struct list_head *list;
    15.8 +	struct pci_bus *i;
    15.9  	int error = 0;
   15.10  
   15.11 -	list_for_each(list, &bus->children) {
   15.12 -		error = pci_pm_save_state_bus(pci_bus_b(list),state);
   15.13 +	list_for_each_entry(i, &bus->children, node) {
   15.14 +		error = pci_pm_save_state_bus(i, state);
   15.15  		if (error) return error;
   15.16  	}
   15.17 -	list_for_each(list, &bus->devices) {
   15.18 -		error = pci_pm_save_state_device(pci_dev_b(list),state);
   15.19 +	list_for_each_entry(i, &bus->devices, node) {
   15.20 +		error = pci_pm_save_state_device(i, state);
   15.21  		if (error) return error;
   15.22  	}
   15.23  	return 0;
   15.24 @@ -1581,40 +1581,38 @@ static int pci_pm_save_state_bus(struct 
   15.25  
   15.26  static int pci_pm_suspend_bus(struct pci_bus *bus, u32 state)
   15.27  {
   15.28 -	struct list_head *list;
   15.29 +	struct pci_bus *i;
   15.30  
   15.31  	/* Walk the bus children list */
   15.32 -	list_for_each(list, &bus->children) 
   15.33 -		pci_pm_suspend_bus(pci_bus_b(list),state);
   15.34 +	list_for_each_entry(i, &bus->children, node) 
   15.35 +		pci_pm_suspend_bus(i, state);
   15.36  
   15.37  	/* Walk the device children list */
   15.38 -	list_for_each(list, &bus->devices)
   15.39 -		pci_pm_suspend_device(pci_dev_b(list),state);
   15.40 +	list_for_each_entry(i, &bus->devices, node)
   15.41 +		pci_pm_suspend_device(i, state);
   15.42  	return 0;
   15.43  }
   15.44  
   15.45  static int pci_pm_resume_bus(struct pci_bus *bus)
   15.46  {
   15.47 -	struct list_head *list;
   15.48 +	struct pci_bus *i;
   15.49  
   15.50  	/* Walk the device children list */
   15.51 -	list_for_each(list, &bus->devices)
   15.52 -		pci_pm_resume_device(pci_dev_b(list));
   15.53 +	list_for_each_entry(i, &bus->devices, node)
   15.54 +		pci_pm_resume_device(i);
   15.55  
   15.56  	/* And then walk the bus children */
   15.57 -	list_for_each(list, &bus->children)
   15.58 -		pci_pm_resume_bus(pci_bus_b(list));
   15.59 +	list_for_each_entry(i, &bus->children, node)
   15.60 +		pci_pm_resume_bus(i);
   15.61  	return 0;
   15.62  }
   15.63  
   15.64  static int pci_pm_save_state(u32 state)
   15.65  {
   15.66 -	struct list_head *list;
   15.67  	struct pci_bus *bus;
   15.68  	int error = 0;
   15.69  
   15.70 -	list_for_each(list, &pci_root_buses) {
   15.71 -		bus = pci_bus_b(list);
   15.72 +	list_for_each_entry(bus, &pci_root_buses, node) {
   15.73  		error = pci_pm_save_state_bus(bus,state);
   15.74  		if (!error)
   15.75  			error = pci_pm_save_state_device(bus->self,state);
   15.76 @@ -1624,11 +1622,9 @@ static int pci_pm_save_state(u32 state)
   15.77  
   15.78  static int pci_pm_suspend(u32 state)
   15.79  {
   15.80 -	struct list_head *list;
   15.81  	struct pci_bus *bus;
   15.82  
   15.83 -	list_for_each(list, &pci_root_buses) {
   15.84 -		bus = pci_bus_b(list);
   15.85 +	list_for_each_entry(bus, &pci_root_buses, node) {
   15.86  		pci_pm_suspend_bus(bus,state);
   15.87  		pci_pm_suspend_device(bus->self,state);
   15.88  	}
   15.89 @@ -1637,11 +1633,9 @@ static int pci_pm_suspend(u32 state)
   15.90  
   15.91  int pci_pm_resume(void)
   15.92  {
   15.93 -	struct list_head *list;
   15.94  	struct pci_bus *bus;
   15.95  
   15.96 -	list_for_each(list, &pci_root_buses) {
   15.97 -		bus = pci_bus_b(list);
   15.98 +	list_for_each_entry(bus, &pci_root_buses, node) {
   15.99  		pci_pm_resume_device(bus->self);
  15.100  		pci_pm_resume_bus(bus);
  15.101  	}
    16.1 --- a/xen/include/xen/list.h	Tue Jan 25 13:05:49 2005 +0000
    16.2 +++ b/xen/include/xen/list.h	Tue Jan 25 14:11:06 2005 +0000
    16.3 @@ -161,8 +161,6 @@ static __inline__ void list_splice(struc
    16.4  	for (pos = (head)->next, n = pos->next; pos != (head); \
    16.5  		pos = n, n = pos->next)
    16.6  
    16.7 -#endif
    16.8 -
    16.9  /**
   16.10   * list_for_each_entry	-	iterate over list of given type
   16.11   * @pos:	the type * to use as a loop counter.
   16.12 @@ -175,3 +173,6 @@ static __inline__ void list_splice(struc
   16.13  	     &pos->member != (head); 					\
   16.14  	     pos = list_entry(pos->member.next, typeof(*pos), member),	\
   16.15  		     prefetch(pos->member.next))
   16.16 +
   16.17 +#endif /* _LINUX_LIST_H */
   16.18 +
    17.1 --- a/xen/include/xen/pci.h	Tue Jan 25 13:05:49 2005 +0000
    17.2 +++ b/xen/include/xen/pci.h	Tue Jan 25 14:11:06 2005 +0000
    17.3 @@ -358,7 +358,7 @@ enum pci_mmap_state {
    17.4  	for(dev = pci_dev_g(pci_devices.prev); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.prev))
    17.5  
    17.6  #define pci_for_each_bus(bus) \
    17.7 -for(bus = pci_bus_b(pci_root_buses.next); bus != pci_bus_b(&pci_root_buses); bus = pci_bus_b(bus->node.next))
    17.8 +	list_for_each_entry(bus, &pci_root_buses, node)
    17.9  
   17.10  /*
   17.11   * The pci_dev structure is used to describe both PCI and ISAPnP devices.