ia64/xen-unstable

view xen-2.4.16/common/domain.c @ 86:4a10fe9b20ec

bitkeeper revision 1.15 (3e24a984iRiWWcgfKCxu2p5q3YbxXw)

Many files:
First half of support for per-domain GDTs and LDTs
author kaf24@labyrinth.cl.cam.ac.uk
date Wed Jan 15 00:21:24 2003 +0000 (2003-01-15)
parents c3e6a52cd801
children 336647fd8f40 f7ff141acc2a a8063692097a
line source
1 #include <xeno/config.h>
2 #include <xeno/init.h>
3 #include <xeno/lib.h>
4 #include <xeno/errno.h>
5 #include <xeno/sched.h>
6 #include <xeno/mm.h>
7 #include <xeno/skbuff.h>
8 #include <xeno/interrupt.h>
9 #include <xeno/delay.h>
10 #include <xeno/event.h>
11 #include <xeno/dom0_ops.h>
12 #include <asm/io.h>
13 #include <asm/domain_page.h>
15 rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
17 schedule_data_t schedule_data[NR_CPUS];
19 int wake_up(struct task_struct *p)
20 {
21 unsigned long flags;
22 int ret = 0;
23 spin_lock_irqsave(&schedule_data[p->processor].lock, flags);
24 if ( __task_on_runqueue(p) ) goto out;
25 p->state = TASK_RUNNING;
26 __add_to_runqueue(p);
27 ret = 1;
29 out:
30 spin_unlock_irqrestore(&schedule_data[p->processor].lock, flags);
31 return ret;
32 }
35 struct task_struct *do_newdomain(void)
36 {
37 int retval;
38 struct task_struct *p = NULL;
39 unsigned long flags;
41 retval = -ENOMEM;
42 p = alloc_task_struct();
43 if (!p) goto newdomain_out;
44 memset(p, 0, sizeof(*p));
45 p->shared_info = (void *)get_free_page(GFP_KERNEL);
46 memset(p->shared_info, 0, PAGE_SIZE);
48 SET_GDT_ENTRIES(p, DEFAULT_GDT_ENTRIES);
49 SET_GDT_ADDRESS(p, DEFAULT_GDT_ADDRESS);
51 p->addr_limit = USER_DS;
52 p->state = TASK_UNINTERRUPTIBLE;
53 p->active_mm = &p->mm;
54 p->num_net_vifs = 0;
56 /*
57 * KAF: Passing in newdomain struct to this function is gross!
58 * Therefore, for now we just allocate the single blk_ring
59 * before the multiople net_rings :-)
60 */
61 p->blk_ring_base = (blk_ring_t *)(p->shared_info + 1);
62 p->net_ring_base = (net_ring_t *)(p->blk_ring_base + 1);
63 p->pg_head = p->tot_pages = 0;
64 write_lock_irqsave(&tasklist_lock, flags);
65 SET_LINKS(p);
66 write_unlock_irqrestore(&tasklist_lock, flags);
68 newdomain_out:
69 return(p);
70 }
73 void reschedule(struct task_struct *p)
74 {
75 int cpu = p->processor;
76 struct task_struct *curr;
77 unsigned long flags;
79 if ( p->has_cpu ) return;
81 spin_lock_irqsave(&schedule_data[cpu].lock, flags);
82 curr = schedule_data[cpu].curr;
83 if ( is_idle_task(curr) )
84 {
85 set_bit(_HYP_EVENT_NEED_RESCHED, &curr->hyp_events);
86 spin_unlock_irqrestore(&schedule_data[cpu].lock, flags);
87 #ifdef CONFIG_SMP
88 if ( cpu != smp_processor_id() ) smp_send_event_check_cpu(cpu);
89 #endif
90 }
91 else
92 {
93 spin_unlock_irqrestore(&schedule_data[cpu].lock, flags);
94 }
95 }
98 static void process_timeout(unsigned long __data)
99 {
100 struct task_struct * p = (struct task_struct *) __data;
101 wake_up(p);
102 }
104 long schedule_timeout(long timeout)
105 {
106 struct timer_list timer;
107 unsigned long expire;
109 switch (timeout)
110 {
111 case MAX_SCHEDULE_TIMEOUT:
112 /*
113 * These two special cases are useful to be comfortable in the caller.
114 * Nothing more. We could take MAX_SCHEDULE_TIMEOUT from one of the
115 * negative value but I' d like to return a valid offset (>=0) to allow
116 * the caller to do everything it want with the retval.
117 */
118 schedule();
119 goto out;
120 default:
121 /*
122 * Another bit of PARANOID. Note that the retval will be 0 since no
123 * piece of kernel is supposed to do a check for a negative retval of
124 * schedule_timeout() (since it should never happens anyway). You just
125 * have the printk() that will tell you if something is gone wrong and
126 * where.
127 */
128 if (timeout < 0)
129 {
130 printk(KERN_ERR "schedule_timeout: wrong timeout "
131 "value %lx from %p\n", timeout,
132 __builtin_return_address(0));
133 current->state = TASK_RUNNING;
134 goto out;
135 }
136 }
138 expire = timeout + jiffies;
140 init_timer(&timer);
141 timer.expires = expire;
142 timer.data = (unsigned long) current;
143 timer.function = process_timeout;
145 add_timer(&timer);
146 schedule();
147 del_timer_sync(&timer);
149 timeout = expire - jiffies;
151 out:
152 return timeout < 0 ? 0 : timeout;
153 }
156 long do_yield(void)
157 {
158 current->state = TASK_INTERRUPTIBLE;
159 schedule();
160 return 0;
161 }
163 /* Get a pointer to the specified domain. Consider replacing this
164 * with a hash lookup later.
165 *
166 * Also, kill_other_domain should call this instead of scanning on its own.
167 */
168 struct task_struct *find_domain_by_id(unsigned int dom)
169 {
170 struct task_struct *p = &idle0_task;
172 read_lock_irq(&tasklist_lock);
173 do {
174 if ( (p->domain == dom) ) {
175 read_unlock_irq(&tasklist_lock);
176 return (p);
177 }
178 } while ( (p = p->next_task) != &idle0_task );
179 read_unlock_irq(&tasklist_lock);
181 return 0;
182 }
185 void kill_domain_with_errmsg(const char *err)
186 {
187 printk("DOM%d FATAL ERROR: %s\n",
188 current->domain, err);
189 kill_domain();
190 }
193 /* Kill the currently executing domain. */
194 void kill_domain(void)
195 {
196 if ( current->domain == 0 )
197 {
198 extern void machine_restart(char *);
199 printk("Domain 0 killed: rebooting machine!\n");
200 machine_restart(0);
201 }
203 printk("Killing domain %d\n", current->domain);
204 current->state = TASK_DYING;
205 schedule();
206 BUG(); /* never get here */
207 }
210 long kill_other_domain(unsigned int dom)
211 {
212 struct task_struct *p = &idle0_task;
213 unsigned long cpu_mask = 0;
214 long ret = -ESRCH;
216 read_lock_irq(&tasklist_lock);
217 do {
218 if ( p->domain == dom )
219 {
220 cpu_mask = mark_guest_event(p, _EVENT_DIE);
221 ret = 0;
222 break;
223 }
224 }
225 while ( (p = p->next_task) != &idle0_task );
226 read_unlock_irq(&tasklist_lock);
228 hyp_event_notify(cpu_mask);
230 return ret;
231 }
234 /* Release resources belonging to task @p. */
235 void release_task(struct task_struct *p)
236 {
237 ASSERT(!__task_on_runqueue(p));
238 ASSERT(p->state == TASK_DYING);
239 ASSERT(!p->has_cpu);
240 write_lock_irq(&tasklist_lock);
241 REMOVE_LINKS(p);
242 write_unlock_irq(&tasklist_lock);
244 /*
245 * Safe! Only queue skbuffs with tasklist_lock held.
246 * Only access shared_info with tasklist_lock held.
247 * And free_task_struct() only releases if refcnt == 0.
248 */
249 while ( p->num_net_vifs )
250 {
251 destroy_net_vif(p);
252 }
253 if ( p->mm.perdomain_pt ) free_page((unsigned long)p->mm.perdomain_pt);
254 free_page((unsigned long)p->shared_info);
255 free_task_struct(p);
256 }
259 asmlinkage void schedule(void)
260 {
261 struct task_struct *prev, *next;
262 struct list_head *tmp;
263 int this_cpu;
265 need_resched_back:
266 prev = current;
267 this_cpu = prev->processor;
269 spin_lock_irq(&schedule_data[this_cpu].lock);
271 ASSERT(!in_interrupt());
272 ASSERT(__task_on_runqueue(prev));
274 if ( !prev->counter )
275 {
276 prev->counter = 2;
277 __move_last_runqueue(prev);
278 }
280 switch ( prev->state )
281 {
282 case TASK_INTERRUPTIBLE:
283 if ( signal_pending(prev) )
284 {
285 prev->state = TASK_RUNNING;
286 break;
287 }
288 default:
289 __del_from_runqueue(prev);
290 case TASK_RUNNING:;
291 }
292 clear_bit(_HYP_EVENT_NEED_RESCHED, &prev->hyp_events);
294 /* Round-robin, skipping idle where possible. */
295 next = NULL;
296 list_for_each(tmp, &schedule_data[smp_processor_id()].runqueue) {
297 next = list_entry(tmp, struct task_struct, run_list);
298 if ( next->domain != IDLE_DOMAIN_ID ) break;
299 }
301 prev->has_cpu = 0;
302 next->has_cpu = 1;
304 schedule_data[this_cpu].prev = prev;
305 schedule_data[this_cpu].curr = next;
307 spin_unlock_irq(&schedule_data[this_cpu].lock);
309 if ( unlikely(prev == next) )
310 {
311 /* We won't go through the normal tail, so do this by hand */
312 prev->policy &= ~SCHED_YIELD;
313 goto same_process;
314 }
316 prepare_to_switch();
317 switch_to(prev, next);
318 prev = schedule_data[this_cpu].prev;
320 prev->policy &= ~SCHED_YIELD;
321 if ( prev->state == TASK_DYING ) release_task(prev);
323 same_process:
324 if ( test_bit(_HYP_EVENT_NEED_RESCHED, &current->hyp_events) )
325 goto need_resched_back;
326 return;
327 }
330 static unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
331 {
332 struct list_head *temp;
333 struct pfn_info *pf, *pf_head;
334 unsigned int alloc_pfns;
335 unsigned int req_pages;
337 /* how many pages do we need to alloc? */
338 req_pages = kbytes >> (PAGE_SHIFT - 10);
340 /* is there enough mem to serve the request? */
341 if(req_pages > free_pfns)
342 return -1;
344 /* allocate pages and build a thread through frame_table */
345 temp = free_list.next;
346 printk("bd240 debug: DOM%d requesting %d pages\n", p->domain, req_pages);
348 /* allocate first page */
349 pf = list_entry(temp, struct pfn_info, list);
350 pf->flags |= p->domain;
351 temp = temp->next;
352 list_del(&pf->list);
353 pf->next = pf->prev = p->pg_head = (pf - frame_table);
354 free_pfns--;
355 pf_head = pf;
357 /* allocate the rest */
358 for(alloc_pfns = req_pages - 1; alloc_pfns; alloc_pfns--){
359 pf = list_entry(temp, struct pfn_info, list);
360 pf->flags |= p->domain;
361 temp = temp->next;
362 list_del(&pf->list);
364 pf->next = p->pg_head;
365 pf->prev = pf_head->prev;
366 (frame_table + pf_head->prev)->next = (pf - frame_table);
367 pf_head->prev = (pf - frame_table);
369 free_pfns--;
370 }
372 p->tot_pages = req_pages;
374 return 0;
375 }
377 /*
378 * Initial load map:
379 * start_address:
380 * OS image
381 * ....
382 * stack_start:
383 * start_info:
384 * <one page>
385 * page tables:
386 * <enough pages>
387 * end_address:
388 * shared_info:
389 * <one page>
390 */
391 #define MB_PER_DOMAIN 16
392 #include <asm/msr.h>
393 #include <xeno/multiboot.h>
394 extern int nr_mods;
395 extern module_t *mod;
396 extern unsigned char *cmdline;
397 int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
398 {
399 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
400 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
401 #define ALLOC_FRAME_FROM_DOMAIN() (alloc_address -= PAGE_SIZE)
402 char *src, *dst;
403 int i, dom = p->domain;
404 unsigned long start_address, phys_l1tab, phys_l2tab;
405 unsigned long cur_address, end_address, alloc_address, vaddr;
406 unsigned long virt_load_address, virt_stack_address, virt_shinfo_address;
407 unsigned long virt_ftable_start_addr = 0, virt_ftable_end_addr;
408 unsigned long ft_mapping = (unsigned long)frame_table;
409 unsigned int ft_size = 0;
410 start_info_t *virt_startinfo_address;
411 unsigned long long time;
412 l2_pgentry_t *l2tab;
413 l1_pgentry_t *l1tab = NULL;
414 struct pfn_info *page = NULL;
415 net_ring_t *net_ring;
416 net_vif_t *net_vif;
418 if ( strncmp(__va(mod[0].mod_start), "XenoGues", 8) )
419 {
420 printk("DOM%d: Invalid guest OS image\n", dom);
421 return -1;
422 }
424 virt_load_address = *(unsigned long *)__va(mod[0].mod_start + 8);
425 if ( (virt_load_address & (PAGE_SIZE-1)) )
426 {
427 printk("DOM%d: Guest OS load address not page-aligned (%08lx)\n",
428 dom, virt_load_address);
429 return -1;
430 }
432 if ( alloc_new_dom_mem(p, params->memory_kb) ) return -ENOMEM;
434 /* temporary, *_address have to be reimplemented in another way
435 * as we can no longer expect contiguous addr space
436 */
437 start_address = p->pg_head << PAGE_SHIFT;
438 alloc_address = end_address = start_address + (p->tot_pages << PAGE_SHIFT);
440 /* start_address += (dom * MB_PER_DOMAIN) << 20; */ /* MB -> bytes */
441 /* alloc_address = end_address = start_address + (MB_PER_DOMAIN << 20); */
443 if ( (mod[nr_mods-1].mod_end-mod[0].mod_start) >
444 ((end_address-start_address)>>1) )
445 {
446 printk("DOM%d: Guest OS image is too large\n"
447 " (%luMB is greater than %luMB limit for a\n"
448 " %luMB address space)\n",
449 dom, (mod[nr_mods-1].mod_end-mod[0].mod_start)>>20,
450 (end_address-start_address)>>21,
451 (end_address-start_address)>>20);
452 /* XXX should free domain memory here XXX */
453 return -1;
454 }
456 /* Set up initial mappings. */
457 printk("DOM%d: Mapping physmem %08lx -> %08lx (%luMB)\n", dom,
458 start_address, end_address, (end_address-start_address)>>20);
459 printk("DOM%d: Guest OS virtual load address is %08lx\n", dom,
460 virt_load_address);
462 /*
463 * WARNING: The new domain must have its 'processor' field
464 * filled in by now !!
465 */
466 phys_l2tab = ALLOC_FRAME_FROM_DOMAIN();
467 l2tab = map_domain_mem(phys_l2tab);
468 memcpy(l2tab, idle_pg_table[p->processor], PAGE_SIZE);
469 l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
470 mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR);
471 memset(l2tab, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
472 p->mm.pagetable = mk_pagetable(phys_l2tab);
474 /*
475 * NB. The upper limit on this loop does one extra page. This is to
476 * make sure a pte exists when we want to map the shared_info struct.
477 */
479 /* bd240: not only one extra page but one + num of pages required for
480 * frame_table if domain 0 is in question. this ugly for loop
481 * condition is going to change once domain building is moved out
482 * of hypervisor.
483 */
485 if(dom == 0)
486 ft_size = frame_table_size;
488 phys_l2tab += l2_table_offset(virt_load_address)*sizeof(l2_pgentry_t);
489 for ( cur_address = start_address;
490 cur_address != (end_address + PAGE_SIZE + ft_size);
491 cur_address += PAGE_SIZE )
492 {
493 if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
494 {
495 phys_l1tab = ALLOC_FRAME_FROM_DOMAIN();
496 l2tab = map_domain_mem(phys_l2tab);
497 *l2tab = mk_l2_pgentry(phys_l1tab|L2_PROT);
498 phys_l2tab += sizeof(l2_pgentry_t);
499 l1tab = map_domain_mem(phys_l1tab);
500 clear_page(l1tab);
501 l1tab += l1_table_offset(
502 virt_load_address + cur_address - start_address);
503 }
504 *l1tab++ = mk_l1_pgentry(cur_address|L1_PROT);
506 /* New domain doesn't own shared_info page, or frame_table. */
507 if ( cur_address < end_address )
508 {
509 page = frame_table + (cur_address >> PAGE_SHIFT);
510 page->flags = dom | PGT_writeable_page;
511 page->type_count = page->tot_count = 1;
512 }
513 }
515 /* Pages that are part of page tables must be read-only. */
516 vaddr = virt_load_address + alloc_address - start_address;
517 phys_l2tab = pagetable_val(p->mm.pagetable) +
518 (l2_table_offset(vaddr) * sizeof(l2_pgentry_t));
519 l2tab = map_domain_mem(phys_l2tab);
520 phys_l1tab = l2_pgentry_to_phys(*l2tab) +
521 (l1_table_offset(vaddr) * sizeof(l1_pgentry_t));
522 phys_l2tab += sizeof(l2_pgentry_t);
523 l1tab = map_domain_mem(phys_l1tab);
524 for ( cur_address = alloc_address;
525 cur_address != end_address;
526 cur_address += PAGE_SIZE )
527 {
528 *l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
529 if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
530 {
531 l2tab = map_domain_mem(phys_l2tab);
532 phys_l1tab = l2_pgentry_to_phys(*l2tab);
533 phys_l2tab += sizeof(l2_pgentry_t);
534 l1tab = map_domain_mem(phys_l1tab);
535 }
536 page = frame_table + (cur_address >> PAGE_SHIFT);
537 page->flags = dom | PGT_l1_page_table;
538 page->tot_count++;
539 }
540 page->flags = dom | PGT_l2_page_table;
542 /* Map in the the shared info structure. */
543 virt_shinfo_address = end_address - start_address + virt_load_address;
544 phys_l2tab = pagetable_val(p->mm.pagetable) +
545 (l2_table_offset(virt_shinfo_address) * sizeof(l2_pgentry_t));
546 l2tab = map_domain_mem(phys_l2tab);
547 phys_l1tab = l2_pgentry_to_phys(*l2tab) +
548 (l1_table_offset(virt_shinfo_address) * sizeof(l1_pgentry_t));
549 l1tab = map_domain_mem(phys_l1tab);
550 *l1tab = mk_l1_pgentry(__pa(p->shared_info)|L1_PROT);
552 /* Set up shared info area. */
553 rdtscll(time);
554 p->shared_info->wall_time = time;
555 p->shared_info->domain_time = time;
556 p->shared_info->ticks_per_ms = ticks_per_usec * 1000;
558 /* for DOM0, setup mapping of frame table */
559 if ( dom == 0 )
560 {
561 virt_ftable_start_addr = virt_shinfo_address + PAGE_SIZE;
562 virt_ftable_end_addr = virt_ftable_start_addr + frame_table_size;
563 for(cur_address = virt_ftable_start_addr;
564 cur_address < virt_ftable_end_addr;
565 cur_address += PAGE_SIZE)
566 {
567 phys_l2tab = pagetable_val(p->mm.pagetable) +
568 (l2_table_offset(cur_address) * sizeof(l2_pgentry_t));
569 l2tab = map_domain_mem(phys_l2tab);
570 phys_l1tab = l2_pgentry_to_phys(*l2tab) +
571 (l1_table_offset(cur_address) * sizeof(l1_pgentry_t));
572 l1tab = map_domain_mem(phys_l1tab);
573 *l1tab = mk_l1_pgentry(__pa(ft_mapping)|L1_PROT);
574 ft_mapping += PAGE_SIZE;
575 }
576 }
578 virt_startinfo_address = (start_info_t *)
579 (alloc_address - start_address - PAGE_SIZE + virt_load_address);
580 virt_stack_address = (unsigned long)virt_startinfo_address;
582 /* Install the new page tables. */
583 __cli();
584 __asm__ __volatile__ (
585 "mov %%eax,%%cr3" : : "a" (pagetable_val(p->mm.pagetable)));
587 /* Copy the guest OS image. */
588 src = (char *)__va(mod[0].mod_start + 12);
589 dst = (char *)virt_load_address;
590 while ( src < (char *)__va(mod[nr_mods-1].mod_end) ) *dst++ = *src++;
592 /* Set up start info area. */
593 memset(virt_startinfo_address, 0, sizeof(*virt_startinfo_address));
594 virt_startinfo_address->nr_pages = (end_address-start_address)>>PAGE_SHIFT;
595 virt_startinfo_address->shared_info =
596 (shared_info_t *)virt_shinfo_address;
597 virt_startinfo_address->pt_base =
598 end_address - PAGE_SIZE - start_address + virt_load_address;
599 virt_startinfo_address->phys_base = start_address;
600 /* NB. Next field will be NULL if dom != 0. */
601 virt_startinfo_address->frame_table = virt_ftable_start_addr;
603 /* Add virtual network interfaces and point to them in startinfo. */
604 while (params->num_vifs-- > 0) {
605 net_vif = create_net_vif(dom);
606 net_ring = net_vif->net_ring;
607 if (!net_ring) panic("no network ring!\n");
608 }
610 /* XXX SMH: horrible hack to convert hypervisor VAs in SHIP to guest VAs */
611 #define SHIP2GUEST(_x) (virt_shinfo_address | (((unsigned long)(_x)) & 0xFFF))
613 virt_startinfo_address->net_rings =
614 (net_ring_t *)SHIP2GUEST(p->net_ring_base);
615 virt_startinfo_address->num_net_rings = p->num_net_vifs;
617 /* Add block io interface */
618 virt_startinfo_address->blk_ring =
619 (blk_ring_t *)SHIP2GUEST(p->blk_ring_base);
622 /* We tell OS about any modules we were given. */
623 if ( nr_mods > 1 )
624 {
625 virt_startinfo_address->mod_start =
626 (mod[1].mod_start-mod[0].mod_start-12) + virt_load_address;
627 virt_startinfo_address->mod_len =
628 mod[nr_mods-1].mod_end - mod[1].mod_start;
629 }
631 dst = virt_startinfo_address->cmd_line;
632 if ( mod[0].string )
633 {
634 char *modline = (char *)__va(mod[0].string);
635 for ( i = 0; i < 255; i++ )
636 {
637 if ( modline[i] == '\0' ) break;
638 *dst++ = modline[i];
639 }
640 }
641 *dst = '\0';
643 if ( opt_nfsroot )
644 {
645 unsigned char boot[150];
646 unsigned char ipbase[20], nfsserv[20], gateway[20], netmask[20];
647 unsigned char nfsroot[70];
648 snprintf(nfsroot, 70, opt_nfsroot, dom);
649 snprintf(boot, 200,
650 " root=/dev/nfs ip=%s:%s:%s:%s::eth0:off nfsroot=%s",
651 quad_to_str(opt_ipbase + dom, ipbase),
652 quad_to_str(opt_nfsserv, nfsserv),
653 quad_to_str(opt_gateway, gateway),
654 quad_to_str(opt_netmask, netmask),
655 nfsroot);
656 strcpy(dst, boot);
657 }
659 /* Reinstate the caller's page tables. */
660 __asm__ __volatile__ (
661 "mov %%eax,%%cr3" : : "a" (pagetable_val(current->mm.pagetable)));
662 __sti();
664 new_thread(p,
665 (unsigned long)virt_load_address,
666 (unsigned long)virt_stack_address,
667 (unsigned long)virt_startinfo_address);
669 return 0;
670 }
673 void __init domain_init(void)
674 {
675 int i;
676 for ( i = 0; i < NR_CPUS; i++ )
677 {
678 INIT_LIST_HEAD(&schedule_data[i].runqueue);
679 spin_lock_init(&schedule_data[i].lock);
680 schedule_data[i].prev = &idle0_task;
681 schedule_data[i].curr = &idle0_task;
682 }
683 }
687 #if 0
688 unsigned long s = (mod[ 0].mod_start + (PAGE_SIZE-1)) & PAGE_MASK;
689 unsigned long e = (mod[nr_mods-1].mod_end + (PAGE_SIZE-1)) & PAGE_MASK;
690 while ( s != e )
691 {
692 free_pages((unsigned long)__va(s), 0);
693 s += PAGE_SIZE;
694 }
695 #endif