ia64/xen-unstable
changeset 1123:b5f640dd2086
bitkeeper revision 1.746.1.1 (403c8c65qu1dJWHVajQ4wyGMSQjo1Q)
Many files:
Clean up task list in Xen. Fix a bug in xentrace for some glibc versions.
Many files:
Clean up task list in Xen. Fix a bug in xentrace for some glibc versions.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Wed Feb 25 11:52:05 2004 +0000 (2004-02-25) |
parents | 205bdb40d5e6 |
children | edb575655020 |
files | tools/xentrace/xentrace.c xen/arch/i386/pdb-stub.c xen/common/dom0_ops.c xen/common/domain.c xen/common/keyhandler.c xen/common/schedule.c xen/drivers/block/xen_block.c xen/drivers/block/xen_vbd.c xen/include/xeno/sched.h |
line diff
1.1 --- a/tools/xentrace/xentrace.c Tue Feb 24 15:18:52 2004 +0000 1.2 +++ b/tools/xentrace/xentrace.c Wed Feb 25 11:52:05 2004 +0000 1.3 @@ -407,8 +407,7 @@ int main(int argc, char **argv) 1.4 { 1.5 int ret; 1.6 FILE *logfile = stdout; 1.7 - 1.8 - const struct sigaction act = { .sa_handler = close_handler }; 1.9 + struct sigaction act; 1.10 1.11 opts.outfile = 0; 1.12 opts.num_cpus = 1; 1.13 @@ -421,6 +420,7 @@ int main(int argc, char **argv) 1.14 logfile = fopen(opts.outfile, "w"); 1.15 1.16 /* ensure that if we get a signal, we'll do cleanup, then exit */ 1.17 + act.sa_handler = close_handler; 1.18 sigaction(SIGHUP, &act, 0); 1.19 sigaction(SIGTERM, &act, 0); 1.20 sigaction(SIGINT, &act, 0);
2.1 --- a/xen/arch/i386/pdb-stub.c Tue Feb 24 15:18:52 2004 +0000 2.2 +++ b/xen/arch/i386/pdb-stub.c Wed Feb 25 11:52:05 2004 +0000 2.3 @@ -86,14 +86,14 @@ pdb_process_query (char *ptr) 2.4 } 2.5 else if (strcmp(ptr, "fThreadInfo") == 0) 2.6 { 2.7 - struct task_struct *p = &idle0_task; 2.8 + struct task_struct *p; 2.9 u_long flags; 2.10 int count = 0, buf_idx = 0; 2.11 2.12 read_lock_irqsave (&tasklist_lock, flags); 2.13 2.14 pdb_out_buffer[buf_idx++] = 'm'; 2.15 - while ( (p = p->next_task) != &idle0_task ) 2.16 + for_each_domain ( p ) 2.17 { 2.18 domid_t domain = p->domain + PDB_DOMAIN_OFFSET; 2.19
3.1 --- a/xen/common/dom0_ops.c Tue Feb 24 15:18:52 2004 +0000 3.2 +++ b/xen/common/dom0_ops.c Wed Feb 25 11:52:05 2004 +0000 3.3 @@ -152,7 +152,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 3.4 { 3.5 domid_t dom = op->u.destroydomain.domain; 3.6 int force = op->u.destroydomain.force; 3.7 - ret = (dom == IDLE_DOMAIN_ID) ? -EPERM : kill_other_domain(dom, force); 3.8 + ret = kill_other_domain(dom, force); 3.9 } 3.10 break; 3.11 3.12 @@ -210,9 +210,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 3.13 unsigned long warpl = op->u.adjustdom.warpl; 3.14 unsigned long warpu = op->u.adjustdom.warpu; 3.15 3.16 - ret = -EPERM; 3.17 - if ( dom != IDLE_DOMAIN_ID ) 3.18 - ret = sched_adjdom(dom, mcu_adv, warp, warpl, warpu); 3.19 + ret = sched_adjdom(dom, mcu_adv, warp, warpl, warpu); 3.20 } 3.21 break; 3.22 3.23 @@ -256,18 +254,19 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 3.24 3.25 case DOM0_GETDOMAININFO: 3.26 { 3.27 - struct task_struct *p = &idle0_task; 3.28 + struct task_struct *p; 3.29 u_long flags; 3.30 int i; 3.31 3.32 read_lock_irqsave (&tasklist_lock, flags); 3.33 3.34 - while ( (p = p->next_task) != &idle0_task ) 3.35 - if ( !is_idle_task(p) && 3.36 - (p->domain >= op->u.getdomaininfo.domain) ) 3.37 + for_each_domain ( p ) 3.38 + { 3.39 + if ( p->domain >= op->u.getdomaininfo.domain ) 3.40 break; 3.41 + } 3.42 3.43 - if ( p == &idle0_task ) 3.44 + if ( p == NULL ) 3.45 { 3.46 ret = -ESRCH; 3.47 }
4.1 --- a/xen/common/domain.c Tue Feb 24 15:18:52 2004 +0000 4.2 +++ b/xen/common/domain.c Wed Feb 25 11:52:05 2004 +0000 4.3 @@ -30,17 +30,16 @@ 4.4 /* Both these structures are protected by the tasklist_lock. */ 4.5 rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; 4.6 struct task_struct *task_hash[TASK_HASH_SIZE]; 4.7 +struct task_struct *task_list; 4.8 4.9 struct task_struct *do_createdomain(domid_t dom_id, unsigned int cpu) 4.10 { 4.11 - int retval; 4.12 char buf[100]; 4.13 - struct task_struct *p = NULL; 4.14 + struct task_struct *p, **pp; 4.15 unsigned long flags; 4.16 4.17 - retval = -ENOMEM; 4.18 - p = alloc_task_struct(); 4.19 - if ( p == NULL ) return NULL; 4.20 + if ( (p = alloc_task_struct()) == NULL ) 4.21 + return NULL; 4.22 memset(p, 0, sizeof(*p)); 4.23 4.24 atomic_set(&p->refcnt, 1); 4.25 @@ -48,37 +47,51 @@ struct task_struct *do_createdomain(domi 4.26 p->domain = dom_id; 4.27 p->processor = cpu; 4.28 4.29 - /* We use a large intermediate to avoid overflow in sprintf. */ 4.30 - sprintf(buf, "Domain-%llu", dom_id); 4.31 - strncpy(p->name, buf, MAX_DOMAIN_NAME); 4.32 - p->name[MAX_DOMAIN_NAME-1] = '\0'; 4.33 + memcpy(&p->thread, &idle0_task.thread, sizeof(p->thread)); 4.34 + 4.35 + if ( p->domain != IDLE_DOMAIN_ID ) 4.36 + { 4.37 + /* We use a large intermediate to avoid overflow in sprintf. */ 4.38 + sprintf(buf, "Domain-%llu", dom_id); 4.39 + strncpy(p->name, buf, MAX_DOMAIN_NAME); 4.40 + p->name[MAX_DOMAIN_NAME-1] = '\0'; 4.41 4.42 - spin_lock_init(&p->blk_ring_lock); 4.43 - spin_lock_init(&p->event_channel_lock); 4.44 + spin_lock_init(&p->blk_ring_lock); 4.45 + spin_lock_init(&p->event_channel_lock); 4.46 + 4.47 + p->addr_limit = USER_DS; 4.48 + 4.49 + spin_lock_init(&p->page_list_lock); 4.50 + INIT_LIST_HEAD(&p->page_list); 4.51 + p->max_pages = p->tot_pages = 0; 4.52 4.53 - p->shared_info = (void *)get_free_page(GFP_KERNEL); 4.54 - memset(p->shared_info, 0, PAGE_SIZE); 4.55 - SHARE_PFN_WITH_DOMAIN(virt_to_page(p->shared_info), p); 4.56 - 4.57 - p->mm.perdomain_pt = (l1_pgentry_t *)get_free_page(GFP_KERNEL); 4.58 - memset(p->mm.perdomain_pt, 0, PAGE_SIZE); 4.59 - 4.60 - init_blkdev_info(p); 4.61 - 4.62 - p->addr_limit = USER_DS; 4.63 + p->shared_info = (void *)get_free_page(GFP_KERNEL); 4.64 + memset(p->shared_info, 0, PAGE_SIZE); 4.65 + SHARE_PFN_WITH_DOMAIN(virt_to_page(p->shared_info), p); 4.66 + 4.67 + p->mm.perdomain_pt = (l1_pgentry_t *)get_free_page(GFP_KERNEL); 4.68 + memset(p->mm.perdomain_pt, 0, PAGE_SIZE); 4.69 + 4.70 + init_blkdev_info(p); 4.71 + 4.72 + write_lock_irqsave(&tasklist_lock, flags); 4.73 + pp = &task_list; /* NB. task_list is maintained in order of dom_id. */ 4.74 + for ( pp = &task_list; *pp != NULL; pp = &(*pp)->next_list ) 4.75 + if ( (*pp)->domain > p->domain ) 4.76 + break; 4.77 + p->next_list = *pp; 4.78 + *pp = p; 4.79 + p->next_hash = task_hash[TASK_HASH(dom_id)]; 4.80 + task_hash[TASK_HASH(dom_id)] = p; 4.81 + write_unlock_irqrestore(&tasklist_lock, flags); 4.82 + } 4.83 + else 4.84 + { 4.85 + sprintf(p->name, "Idle-%d", cpu); 4.86 + } 4.87 4.88 sched_add_domain(p); 4.89 4.90 - spin_lock_init(&p->page_list_lock); 4.91 - INIT_LIST_HEAD(&p->page_list); 4.92 - p->max_pages = p->tot_pages = 0; 4.93 - 4.94 - write_lock_irqsave(&tasklist_lock, flags); 4.95 - SET_LINKS(p); 4.96 - p->next_hash = task_hash[TASK_HASH(dom_id)]; 4.97 - task_hash[TASK_HASH(dom_id)] = p; 4.98 - write_unlock_irqrestore(&tasklist_lock, flags); 4.99 - 4.100 return p; 4.101 } 4.102 4.103 @@ -141,9 +154,13 @@ void __kill_domain(struct task_struct *p 4.104 * holds a reference to the domain being queried. Take care! 4.105 */ 4.106 write_lock_irqsave(&tasklist_lock, flags); 4.107 - REMOVE_LINKS(p); 4.108 - pp = &task_hash[TASK_HASH(p->domain)]; 4.109 - while ( *pp != p ) *pp = (*pp)->next_hash; 4.110 + pp = &task_list; /* Delete from task_list. */ 4.111 + while ( *pp != p ) 4.112 + *pp = (*pp)->next_list; 4.113 + *pp = p->next_list; 4.114 + pp = &task_hash[TASK_HASH(p->domain)]; /* Delete from task_hash. */ 4.115 + while ( *pp != p ) 4.116 + *pp = (*pp)->next_hash; 4.117 *pp = p->next_hash; 4.118 write_unlock_irqrestore(&tasklist_lock, flags); 4.119
5.1 --- a/xen/common/keyhandler.c Tue Feb 24 15:18:52 2004 +0000 5.2 +++ b/xen/common/keyhandler.c Wed Feb 25 11:52:05 2004 +0000 5.3 @@ -108,21 +108,18 @@ void do_task_queues(u_char key, void *de 5.4 5.5 read_lock_irqsave(&tasklist_lock, flags); 5.6 5.7 - p = &idle0_task; 5.8 - do { 5.9 + for_each_domain ( p ) 5.10 + { 5.11 printk("Xen: DOM %llu, CPU %d [has=%c], state = %s, " 5.12 "hyp_events = %08x\n", 5.13 p->domain, p->processor, p->has_cpu ? 'T':'F', 5.14 task_states[p->state], p->hyp_events); 5.15 s = p->shared_info; 5.16 - if( !is_idle_task(p) ) 5.17 - { 5.18 - printk("Guest: events = %08lx, events_mask = %08lx\n", 5.19 - s->events, s->events_mask); 5.20 - printk("Notifying guest...\n"); 5.21 - cpu_mask |= mark_guest_event(p, _EVENT_DEBUG); 5.22 - } 5.23 - } while ( (p = p->next_task) != &idle0_task ); 5.24 + printk("Guest: events = %08lx, events_mask = %08lx\n", 5.25 + s->events, s->events_mask); 5.26 + printk("Notifying guest...\n"); 5.27 + cpu_mask |= mark_guest_event(p, _EVENT_DEBUG); 5.28 + } 5.29 5.30 read_unlock_irqrestore(&tasklist_lock, flags); 5.31
6.1 --- a/xen/common/schedule.c Tue Feb 24 15:18:52 2004 +0000 6.2 +++ b/xen/common/schedule.c Wed Feb 25 11:52:05 2004 +0000 6.3 @@ -479,15 +479,14 @@ asmlinkage void __enter_scheduler(void) 6.4 { 6.5 u_long t_flags; 6.6 write_lock_irqsave(&tasklist_lock, t_flags); 6.7 - p = &idle0_task; 6.8 - do { 6.9 - if ( (p->processor == cpu) && !is_idle_task(p) ) 6.10 + for_each_domain ( p ) 6.11 + { 6.12 + if ( p->processor == cpu ) 6.13 { 6.14 p->evt -= 0xe0000000; 6.15 p->avt -= 0xe0000000; 6.16 } 6.17 } 6.18 - while ( (p = p->next_task) != &idle0_task ); 6.19 write_unlock_irqrestore(&tasklist_lock, t_flags); 6.20 schedule_data[cpu].svt -= 0xe0000000; 6.21 }
7.1 --- a/xen/drivers/block/xen_block.c Tue Feb 24 15:18:52 2004 +0000 7.2 +++ b/xen/drivers/block/xen_block.c Wed Feb 25 11:52:05 2004 +0000 7.3 @@ -569,22 +569,16 @@ static void dump_blockq(u_char key, void 7.4 NR_PENDING_REQS, pending_prod, pending_cons); 7.5 7.6 read_lock_irqsave(&tasklist_lock, flags); 7.7 - p = &idle0_task; 7.8 - do { 7.9 - if ( !is_idle_task(p) ) 7.10 - { 7.11 - printk("Domain: %llu\n", p->domain); 7.12 - blk_ring = p->blk_ring_base; 7.13 - 7.14 - printk(" req_prod:0x%08x, req_cons:0x%08x resp_prod:0x%08x/" 7.15 - "0x%08x on_list=%d\n", 7.16 - blk_ring->req_prod, p->blk_req_cons, 7.17 - blk_ring->resp_prod, p->blk_resp_prod, 7.18 - __on_blkdev_list(p)); 7.19 - } 7.20 - p = p->next_task; 7.21 - } 7.22 - while ( (p = p->next_task) != &idle0_task ); 7.23 + for_each_domain ( p ) 7.24 + { 7.25 + printk("Domain: %llu\n", p->domain); 7.26 + blk_ring = p->blk_ring_base; 7.27 + printk(" req_prod:0x%08x, req_cons:0x%08x resp_prod:0x%08x/" 7.28 + "0x%08x on_list=%d\n", 7.29 + blk_ring->req_prod, p->blk_req_cons, 7.30 + blk_ring->resp_prod, p->blk_resp_prod, 7.31 + __on_blkdev_list(p)); 7.32 + } 7.33 read_unlock_irqrestore(&tasklist_lock, flags); 7.34 7.35 for ( i = 0; i < MAX_PENDING_REQS; i++ )
8.1 --- a/xen/drivers/block/xen_vbd.c Tue Feb 24 15:18:52 2004 +0000 8.2 +++ b/xen/drivers/block/xen_vbd.c Wed Feb 25 11:52:05 2004 +0000 8.3 @@ -557,16 +557,12 @@ long vbd_probe(vbd_probe_t *probe) 8.4 if ( probe->domain == VBD_PROBE_ALL ) 8.5 { 8.6 read_lock_irqsave(&tasklist_lock, flags); 8.7 - p = &idle0_task; 8.8 - while ( (p = p->next_task) != &idle0_task ) 8.9 + for_each_domain ( p ) 8.10 { 8.11 - if ( !is_idle_task(p) ) 8.12 + if ( (ret = vbd_probe_devices(&probe->xdi, p)) != 0 ) 8.13 { 8.14 - if( (ret = vbd_probe_devices(&probe->xdi, p)) != 0 ) 8.15 - { 8.16 - read_unlock_irqrestore(&tasklist_lock, flags); 8.17 - goto out; 8.18 - } 8.19 + read_unlock_irqrestore(&tasklist_lock, flags); 8.20 + goto out; 8.21 } 8.22 } 8.23 read_unlock_irqrestore(&tasklist_lock, flags);
9.1 --- a/xen/include/xeno/sched.h Tue Feb 24 15:18:52 2004 +0000 9.2 +++ b/xen/include/xeno/sched.h Wed Feb 25 11:52:05 2004 +0000 9.3 @@ -140,8 +140,8 @@ struct task_struct 9.4 char name[MAX_DOMAIN_NAME]; 9.5 9.6 struct thread_struct thread; 9.7 - struct task_struct *prev_task, *next_task, *next_hash; 9.8 - 9.9 + struct task_struct *next_list, *next_hash; 9.10 + 9.11 /* Event channel information. */ 9.12 event_channel_t *event_channel; 9.13 unsigned int max_event_channel; 9.14 @@ -182,8 +182,6 @@ struct task_struct 9.15 mm: IDLE0_MM, \ 9.16 addr_limit: KERNEL_DS, \ 9.17 thread: INIT_THREAD, \ 9.18 - prev_task: &(_t), \ 9.19 - next_task: &(_t), \ 9.20 flags: 1<<PF_IDLETASK \ 9.21 } 9.22 9.23 @@ -284,22 +282,14 @@ void continue_cpu_idle_loop(void); 9.24 9.25 void continue_nonidle_task(void); 9.26 9.27 -/* This hash table is protected by the tasklist_lock. */ 9.28 +/* This task_hash and task_list are protected by the tasklist_lock. */ 9.29 #define TASK_HASH_SIZE 256 9.30 #define TASK_HASH(_id) ((int)(_id)&(TASK_HASH_SIZE-1)) 9.31 extern struct task_struct *task_hash[TASK_HASH_SIZE]; 9.32 - 9.33 -#define REMOVE_LINKS(p) do { \ 9.34 - (p)->next_task->prev_task = (p)->prev_task; \ 9.35 - (p)->prev_task->next_task = (p)->next_task; \ 9.36 - } while (0) 9.37 +extern struct task_struct *task_list; 9.38 9.39 -#define SET_LINKS(p) do { \ 9.40 - (p)->next_task = &idle0_task; \ 9.41 - (p)->prev_task = idle0_task.prev_task; \ 9.42 - idle0_task.prev_task->next_task = (p); \ 9.43 - idle0_task.prev_task = (p); \ 9.44 - } while (0) 9.45 +#define for_each_domain(_p) \ 9.46 + for ( (_p) = task_list; (_p) != NULL; (_p) = (_p)->next_list ) 9.47 9.48 extern void update_process_times(int user); 9.49