ia64/xen-unstable

changeset 446:553dadf99bbc

bitkeeper revision 1.227 (3ec23adaE9CVdAKPJKPvT1rqaIUvLA)

processor.h, xen_block.c, schedule.c, network.c, domain.c, dom0_ops.c:
Renamed free_task_struct to the more descriptive put_task_struct. Fixed several places where put_task_struct isn't called after 'find_domain_by_id'.
author kaf24@scramble.cl.cam.ac.uk
date Wed May 14 12:47:22 2003 +0000 (2003-05-14)
parents 53f6ba7cee5d
children 7aea3ea7b542
files xen/common/dom0_ops.c xen/common/domain.c xen/common/network.c xen/common/schedule.c xen/drivers/block/xen_block.c xen/include/asm-i386/processor.h
line diff
     1.1 --- a/xen/common/dom0_ops.c	Tue May 13 12:04:22 2003 +0000
     1.2 +++ b/xen/common/dom0_ops.c	Wed May 14 12:47:22 2003 +0000
     1.3 @@ -31,7 +31,7 @@ static unsigned int get_domnr(void)
     1.4          domnr = (domnr+1) & ((1<<20)-1);
     1.5          if ( (p = find_domain_by_id(domnr)) == NULL )
     1.6              return domnr;
     1.7 -        free_task_struct(p);
     1.8 +        put_task_struct(p);
     1.9      }
    1.10  
    1.11      return 0;
    1.12 @@ -79,10 +79,9 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    1.13      case DOM0_BUILDDOMAIN:
    1.14      {
    1.15          struct task_struct * p = find_domain_by_id(op.u.meminfo.domain);
    1.16 -        if ( (ret = final_setup_guestos(p, &op.u.meminfo)) != 0 )
    1.17 -            break;
    1.18 -        ret = p->domain;
    1.19 -        free_task_struct(p);
    1.20 +        if ( (ret = final_setup_guestos(p, &op.u.meminfo)) == 0 )
    1.21 +            ret = p->domain;
    1.22 +        put_task_struct(p);
    1.23      }
    1.24      break;
    1.25  
    1.26 @@ -90,18 +89,19 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    1.27      {
    1.28          struct task_struct * p = find_domain_by_id(op.u.meminfo.domain);
    1.29          ret = -EINVAL;
    1.30 -        if ( (p == NULL) || !(p->flags & PF_CONSTRUCTED) )
    1.31 -            break;
    1.32 -        wake_up(p);
    1.33 -        reschedule(p);
    1.34 -        ret = p->domain;
    1.35 -        free_task_struct(p);
    1.36 +        if ( (p != NULL) && (p->flags & PF_CONSTRUCTED) )
    1.37 +        {
    1.38 +            wake_up(p);
    1.39 +            reschedule(p);
    1.40 +            ret = p->domain;
    1.41 +        }
    1.42 +        put_task_struct(p);
    1.43      }
    1.44      break;
    1.45  
    1.46      case DOM0_STOPDOMAIN:
    1.47      {
    1.48 -      ret = stop_other_domain (op.u.meminfo.domain);
    1.49 +        ret = stop_other_domain (op.u.meminfo.domain);
    1.50      }
    1.51      break;
    1.52  
    1.53 @@ -123,8 +123,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    1.54              goto exit_create;
    1.55  
    1.56  	if (op.u.newdomain.name[0]) {
    1.57 -	  strncpy (p -> name, op.u.newdomain.name, MAX_DOMAIN_NAME);
    1.58 -	  p -> name[MAX_DOMAIN_NAME - 1] = 0;
    1.59 +            strncpy (p -> name, op.u.newdomain.name, MAX_DOMAIN_NAME);
    1.60 +            p -> name[MAX_DOMAIN_NAME - 1] = 0;
    1.61  	}
    1.62  
    1.63          ret = alloc_new_dom_mem(p, op.u.newdomain.memory_kb);
    1.64 @@ -180,7 +180,6 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    1.65          unsigned long  warpl   = op.u.adjustdom.warpl;
    1.66          unsigned long  warpu   = op.u.adjustdom.warpu;
    1.67          
    1.68 -
    1.69          if ( dom == IDLE_DOMAIN_ID )
    1.70          {
    1.71              ret = -EPERM;
    1.72 @@ -211,34 +210,35 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    1.73  
    1.74      case DOM0_GETDOMAININFO:
    1.75      { 
    1.76 -      struct task_struct *p;
    1.77 -      u_long flags;
    1.78 +        struct task_struct *p;
    1.79 +        u_long flags;
    1.80  
    1.81 -      p = idle0_task.next_task;
    1.82 -      read_lock_irqsave (&tasklist_lock, flags);
    1.83 -      do {
    1.84 -        if ((!is_idle_task (p)) && (p -> domain >= op.u.getdominfo.domain)) {
    1.85 -          break;
    1.86 -        }
    1.87 -      } while ((p = p -> next_task) != &idle0_task);
    1.88 +        p = idle0_task.next_task;
    1.89 +        read_lock_irqsave (&tasklist_lock, flags);
    1.90 +        do {
    1.91 +            if ((!is_idle_task (p)) && (p -> domain >= op.u.getdominfo.domain))
    1.92 +                break;
    1.93 +        } while ((p = p -> next_task) != &idle0_task);
    1.94  
    1.95 -      if (p == &idle0_task) {
    1.96          ret = -ESRCH;
    1.97 -      } else {
    1.98 -        op.u.getdominfo.domain = p -> domain;
    1.99 -        strcpy (op.u.getdominfo.name, p -> name);
   1.100 -        op.u.getdominfo.processor = p -> processor;
   1.101 -        op.u.getdominfo.has_cpu = p -> has_cpu;
   1.102 -        op.u.getdominfo.state = p -> state;
   1.103 -        op.u.getdominfo.hyp_events = p -> hyp_events;
   1.104 -        op.u.getdominfo.mcu_advance = p -> mcu_advance;
   1.105 -        op.u.getdominfo.pg_head = list_entry(p->pg_head.next,
   1.106 -                                             struct pfn_info, list) - frame_table;
   1.107 -        op.u.getdominfo.tot_pages = p -> tot_pages;
   1.108 -      }
   1.109 -      read_unlock_irqrestore (&tasklist_lock, flags);
   1.110 -      copy_to_user(u_dom0_op, &op, sizeof(op));
   1.111 -      break;
   1.112 +        if ( p != &idle0_task ) 
   1.113 +        {
   1.114 +            op.u.getdominfo.domain      = p->domain;
   1.115 +            strcpy (op.u.getdominfo.name, p->name);
   1.116 +            op.u.getdominfo.processor   = p->processor;
   1.117 +            op.u.getdominfo.has_cpu     = p->has_cpu;
   1.118 +            op.u.getdominfo.state       = p->state;
   1.119 +            op.u.getdominfo.hyp_events  = p->hyp_events;
   1.120 +            op.u.getdominfo.mcu_advance = p->mcu_advance;
   1.121 +            op.u.getdominfo.pg_head     = 
   1.122 +                list_entry(p->pg_head.next, struct pfn_info, list) -
   1.123 +                frame_table;
   1.124 +            op.u.getdominfo.tot_pages   = p->tot_pages;
   1.125 +        }
   1.126 +
   1.127 +        read_unlock_irqrestore(&tasklist_lock, flags);
   1.128 +        copy_to_user(u_dom0_op, &op, sizeof(op));
   1.129 +        break;
   1.130      }
   1.131  
   1.132      default:
     2.1 --- a/xen/common/domain.c	Tue May 13 12:04:22 2003 +0000
     2.2 +++ b/xen/common/domain.c	Wed May 14 12:47:22 2003 +0000
     2.3 @@ -139,7 +139,7 @@ void __kill_domain(struct task_struct *p
     2.4      }
     2.5      else
     2.6      {
     2.7 -        free_task_struct(p);
     2.8 +        put_task_struct(p);
     2.9      }
    2.10  }
    2.11  
    2.12 @@ -173,32 +173,33 @@ long kill_other_domain(unsigned int dom,
    2.13          guest_event_notify(cpu_mask);
    2.14      }
    2.15  
    2.16 -    free_task_struct(p);
    2.17 +    put_task_struct(p);
    2.18      return 0;
    2.19  }
    2.20  
    2.21  void stop_domain(void)
    2.22  {
    2.23 -  current -> state = TASK_SUSPENDED;
    2.24 -  clear_bit(_HYP_EVENT_STOP, &(current->hyp_events));
    2.25 -  schedule ();
    2.26 +    current -> state = TASK_SUSPENDED;
    2.27 +    clear_bit(_HYP_EVENT_STOP, &(current->hyp_events));
    2.28 +    schedule ();
    2.29  }
    2.30  
    2.31  long stop_other_domain(unsigned int dom)
    2.32  {
    2.33 -  unsigned long cpu_mask;
    2.34 -  struct task_struct *p;
    2.35 -
    2.36 -  p = find_domain_by_id (dom);
    2.37 -  if ( p == NULL) return -ESRCH;
    2.38 -
    2.39 -  if ( p -> state != TASK_SUSPENDED )
    2.40 +    unsigned long cpu_mask;
    2.41 +    struct task_struct *p;
    2.42 +    
    2.43 +    p = find_domain_by_id (dom);
    2.44 +    if ( p == NULL) return -ESRCH;
    2.45 +    
    2.46 +    if ( p->state != TASK_SUSPENDED )
    2.47      {
    2.48 -      cpu_mask = mark_hyp_event(p, _HYP_EVENT_STOP);
    2.49 -      hyp_event_notify(cpu_mask);
    2.50 +        cpu_mask = mark_hyp_event(p, _HYP_EVENT_STOP);
    2.51 +        hyp_event_notify(cpu_mask);
    2.52      }
    2.53 -
    2.54 -  return 0;
    2.55 +    
    2.56 +    put_task_struct(p);
    2.57 +    return 0;
    2.58  }
    2.59  
    2.60  unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
     3.1 --- a/xen/common/network.c	Tue May 13 12:04:22 2003 +0000
     3.2 +++ b/xen/common/network.c	Wed May 14 12:47:22 2003 +0000
     3.3 @@ -149,7 +149,7 @@ net_vif_t *create_net_vif(int domain)
     3.4      if ( new_vif != NULL )
     3.5          kmem_cache_free(net_vif_cache, new_vif);
     3.6      if ( p != NULL )
     3.7 -        free_task_struct(p);
     3.8 +        put_task_struct(p);
     3.9      return NULL;
    3.10  }
    3.11  
    3.12 @@ -190,7 +190,7 @@ void destroy_net_vif(net_vif_t *vif)
    3.13      spin_unlock_irqrestore(&p->page_lock, flags);
    3.14  
    3.15      kmem_cache_free(net_vif_cache, vif);
    3.16 -    free_task_struct(p);
    3.17 +    put_task_struct(p);
    3.18  }
    3.19  
    3.20  void unlink_net_vif(net_vif_t *vif)
    3.21 @@ -232,7 +232,7 @@ void vif_query(vif_query_t *vq)
    3.22  
    3.23      copy_to_user(vq->buf, buf, strlen(buf) + 1);
    3.24      
    3.25 -    free_task_struct(p);
    3.26 +    put_task_struct(p);
    3.27  }
    3.28          
    3.29  /* ----[ Net Rule Functions ]-----------------------------------------------*/
     4.1 --- a/xen/common/schedule.c	Tue May 13 12:04:22 2003 +0000
     4.2 +++ b/xen/common/schedule.c	Wed May 14 12:47:22 2003 +0000
     4.3 @@ -477,7 +477,7 @@ asmlinkage void schedule(void)
     4.4      
     4.5      prev->policy &= ~SCHED_YIELD;
     4.6      if ( prev->state == TASK_DYING ) 
     4.7 -        free_task_struct(prev);
     4.8 +        put_task_struct(prev);
     4.9  
    4.10   same_process:
    4.11      /* update the domains notion of time  */
     5.1 --- a/xen/drivers/block/xen_block.c	Tue May 13 12:04:22 2003 +0000
     5.2 +++ b/xen/drivers/block/xen_block.c	Wed May 14 12:47:22 2003 +0000
     5.3 @@ -129,7 +129,7 @@ static void remove_from_blkdev_list(stru
     5.4      {
     5.5          list_del(&p->blkdev_list);
     5.6          p->blkdev_list.next = NULL;
     5.7 -        free_task_struct(p);
     5.8 +        put_task_struct(p);
     5.9      }
    5.10      spin_unlock_irqrestore(&io_schedule_list_lock, flags);
    5.11  }
    5.12 @@ -169,7 +169,7 @@ static void io_schedule(unsigned long un
    5.13          remove_from_blkdev_list(p);
    5.14          if ( do_block_io_op_domain(p, BATCH_PER_DOMAIN) )
    5.15              add_to_blkdev_list_tail(p);
    5.16 -        free_task_struct(p);
    5.17 +        put_task_struct(p);
    5.18      }
    5.19  
    5.20      /* Push the batch through to disc. */
    5.21 @@ -219,7 +219,7 @@ static void end_block_io_op(struct buffe
    5.22      {
    5.23          make_response(pending_req->domain, pending_req->id,
    5.24                        pending_req->operation, pending_req->status);
    5.25 -        free_task_struct(pending_req->domain);
    5.26 +        put_task_struct(pending_req->domain);
    5.27          spin_lock_irqsave(&pend_prod_lock, flags);
    5.28          pending_ring[pending_prod] = pending_req - pending_reqs;
    5.29          PENDREQ_IDX_INC(pending_prod);
    5.30 @@ -768,7 +768,7 @@ void unlink_blkdev_info(struct task_stru
    5.31      {
    5.32          list_del(&p->blkdev_list);
    5.33          p->blkdev_list.next = (void *)0xdeadbeef; /* prevent reinsertion */
    5.34 -        free_task_struct(p);
    5.35 +        put_task_struct(p);
    5.36      }
    5.37      spin_unlock_irqrestore(&io_schedule_list_lock, flags);
    5.38  }
     6.1 --- a/xen/include/asm-i386/processor.h	Tue May 13 12:04:22 2003 +0000
     6.2 +++ b/xen/include/asm-i386/processor.h	Wed May 14 12:47:22 2003 +0000
     6.3 @@ -432,7 +432,7 @@ unsigned long get_wchan(struct task_stru
     6.4  #define THREAD_SIZE (2*PAGE_SIZE)
     6.5  #define alloc_task_struct()  \
     6.6    ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
     6.7 -#define free_task_struct(_p) \
     6.8 +#define put_task_struct(_p) \
     6.9    if ( atomic_dec_and_test(&(_p)->refcnt) ) release_task(_p)
    6.10  #define get_task_struct(_p)  \
    6.11    atomic_inc(&(_p)->refcnt)