ia64/xen-unstable

changeset 1505:ead91151a0e6

bitkeeper revision 1.982 (40d300456_XUbFFOMxRh4MjyB7AfJA)

Hacked the scheduler interfaces in Xen.
We now have synchronous pause.
Suspend/death VIRQs have gone away; replace by dom-controller msgs.
Xen no longer knows about PS/2 keyboard/mouse; DOM0 can go straight
at them.
author kaf24@scramble.cl.cam.ac.uk
date Fri Jun 18 14:46:29 2004 +0000 (2004-06-18)
parents cd6f5625b3d7
children 729cac1fb14e
files .rootkeys extras/mini-os/h/hypervisor.h linux-2.4.26-xen-sparse/arch/xen/kernel/setup.c linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h linux-2.4.26-xen-sparse/include/asm-xen/keyboard.h linux-2.4.26-xen-sparse/mkbuildtree tools/xc/lib/xc_domain.c tools/xc/lib/xc_linux_build.c tools/xc/lib/xc_netbsd_build.c tools/xc/lib/xc_private.c tools/xend/lib/domain_controller.h tools/xend/lib/utils.c xen/arch/x86/entry.S xen/arch/x86/i387.c xen/arch/x86/idle0_task.c xen/arch/x86/irq.c xen/arch/x86/mm.c xen/arch/x86/pdb-stub.c xen/arch/x86/process.c xen/arch/x86/setup.c xen/arch/x86/smpboot.c xen/arch/x86/traps.c xen/common/ac_timer.c xen/common/debug.c xen/common/dom0_ops.c xen/common/dom_mem_ops.c xen/common/domain.c xen/common/event.c xen/common/event_channel.c xen/common/kernel.c xen/common/keyhandler.c xen/common/memory.c xen/common/physdev.c xen/common/sched_atropos.c xen/common/sched_bvt.c xen/common/sched_rrobin.c xen/common/schedule.c xen/common/shadow.c xen/common/slab.c xen/common/softirq.c xen/common/trace.c xen/drivers/char/console.c xen/drivers/char/keyboard.c xen/include/asm-x86/hardirq.h xen/include/asm-x86/i387.h xen/include/asm-x86/ldt.h xen/include/asm-x86/pda.h xen/include/asm-x86/processor.h xen/include/asm-x86/softirq.h xen/include/asm-x86/x86_32/current.h xen/include/asm-x86/x86_64/current.h xen/include/asm-x86/x86_64/ldt.h xen/include/hypervisor-ifs/dom0_ops.h xen/include/hypervisor-ifs/hypervisor-if.h xen/include/hypervisor-ifs/kbd.h xen/include/xen/config.h xen/include/xen/console.h xen/include/xen/event.h xen/include/xen/interrupt.h xen/include/xen/irq.h xen/include/xen/irq_cpustat.h xen/include/xen/mm.h xen/include/xen/sched-if.h xen/include/xen/sched.h xen/include/xen/shadow.h xen/include/xen/spinlock.h
line diff
     1.1 --- a/.rootkeys	Thu Jun 17 16:33:33 2004 +0000
     1.2 +++ b/.rootkeys	Fri Jun 18 14:46:29 2004 +0000
     1.3 @@ -115,7 +115,6 @@ 3e5a4e67YtcyDLQsShhCfQwPSELfvA linux-2.4
     1.4  3e5a4e677VBavzM1UZIEcH1B-RlXMA linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h
     1.5  4060044fVx7-tokvNLKBf_6qBB4lqQ linux-2.4.26-xen-sparse/include/asm-xen/io.h
     1.6  3e5a4e673p7PEOyHFm3nHkYX6HQYBg linux-2.4.26-xen-sparse/include/asm-xen/irq.h
     1.7 -3ead095db_LRUXnxaqs0dA1DWhPoQQ linux-2.4.26-xen-sparse/include/asm-xen/keyboard.h
     1.8  3e5a4e678ddsQOpbSiRdy1GRcDc9WA linux-2.4.26-xen-sparse/include/asm-xen/mmu_context.h
     1.9  40d06e5b2YWInUX1Xv9amVANwd_2Xg linux-2.4.26-xen-sparse/include/asm-xen/module.h
    1.10  3f8707e7ZmZ6TxyX0ZUEfvhA2Pb_xQ linux-2.4.26-xen-sparse/include/asm-xen/msr.h
    1.11 @@ -325,7 +324,6 @@ 3fa152581E5KhrAtqZef2Sr5NKTz4w xen/commo
    1.12  3ddb79bdLX_P6iB7ILiblRLWvebapg xen/common/dom0_ops.c
    1.13  3e6377e4i0c9GtKN65e99OtRbw3AZw xen/common/dom_mem_ops.c
    1.14  3ddb79bdYO5D8Av12NHqPeSviav7cg xen/common/domain.c
    1.15 -3ddb79bdeyutmaXEfpQvvxj7eQ0fCw xen/common/event.c
    1.16  3fba5b96H0khoxNiKbjdi0inpXV-Pw xen/common/event_channel.c
    1.17  3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen/common/kernel.c
    1.18  3e4cd9d8LAAghUY0hNIK72uc2ch_Nw xen/common/keyhandler.c
    1.19 @@ -351,7 +349,6 @@ 40715b2bDxNCz5LFV8FAXihmYJZFUQ xen/drive
    1.20  40715b2cNVOegtvyft_AHFKJYRprfA xen/drivers/acpi/tables.c
    1.21  3e4a8cb7alzQCDKS7MlioPoHBKYkdQ xen/drivers/char/Makefile
    1.22  4049e6bfNSIq7s7OV-Bd69QD0RpR2Q xen/drivers/char/console.c
    1.23 -3e4a8cb7WmiYdC-ASGiCSG_CL8vsqg xen/drivers/char/keyboard.c
    1.24  3e4a8cb7nMChlro4wvOBo76n__iCFA xen/drivers/char/serial.c
    1.25  3ddb79beUWngyIhMHgyPtuTem4o4JA xen/drivers/pci/Makefile
    1.26  3ddb79beU9td0Mnm0VUMklerBa37qQ xen/drivers/pci/compat.c
    1.27 @@ -433,7 +430,6 @@ 40cf1596bim9F9DNdV75klgRSZ6Y2A xen/inclu
    1.28  3ddb79c2plf7ciNgoNjU-RsbUzawsw xen/include/asm-x86/rwlock.h
    1.29  3ddb79c3Hgbb2g8CyWLMCK-6_ZVQSQ xen/include/asm-x86/smp.h
    1.30  3ddb79c3jn8ALV_S9W5aeTYUQRKBpg xen/include/asm-x86/smpboot.h
    1.31 -3ddb79c3e9DCEoR-WzNxcOQDzLu7BQ xen/include/asm-x86/softirq.h
    1.32  3ddb79c3NiyQE2vQnyGiaBnNjBO1rA xen/include/asm-x86/spinlock.h
    1.33  3e7f358aG11EvMI9VJ4_9hD4LUO7rQ xen/include/asm-x86/string.h
    1.34  3ddb79c3ezddh34MdelJpa5tNR00Dw xen/include/asm-x86/system.h
    1.35 @@ -454,7 +450,6 @@ 404f1bc7IwU-qnH8mJeVu0YsNGMrcw xen/inclu
    1.36  3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/hypervisor-ifs/dom0_ops.h
    1.37  403cd194j2pyLqXD8FJ-ukvZzkPenw xen/include/hypervisor-ifs/event_channel.h
    1.38  3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/hypervisor-ifs/hypervisor-if.h
    1.39 -3ead095dE_VF-QA88rl_5cWYRWtRVQ xen/include/hypervisor-ifs/kbd.h
    1.40  4051db79512nOCGweabrFWO2M2h5ng xen/include/hypervisor-ifs/physdev.h
    1.41  40589968wmhPmV5-ENbBYmMjnedgKw xen/include/hypervisor-ifs/sched_ctl.h
    1.42  404f3d2eR2Owk-ZcGOx9ULGHg3nrww xen/include/hypervisor-ifs/trace.h
     2.1 --- a/extras/mini-os/h/hypervisor.h	Thu Jun 17 16:33:33 2004 +0000
     2.2 +++ b/extras/mini-os/h/hypervisor.h	Fri Jun 18 14:46:29 2004 +0000
     2.3 @@ -148,7 +148,7 @@ static inline int HYPERVISOR_shutdown(vo
     2.4      __asm__ __volatile__ (
     2.5          TRAP_INSTR
     2.6          : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
     2.7 -        "b" (SCHEDOP_stop | (STOPCODE_shutdown << SCHEDOP_reasonshift))
     2.8 +        "b" (SCHEDOP_suspend | (STOPCODE_shutdown << SCHEDOP_reasonshift))
     2.9          : "memory" );
    2.10  
    2.11      return ret;
    2.12 @@ -160,7 +160,7 @@ static inline int HYPERVISOR_reboot(void
    2.13      __asm__ __volatile__ (
    2.14          TRAP_INSTR
    2.15          : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
    2.16 -        "b" (SCHEDOP_stop | (STOPCODE_reboot << SCHEDOP_reasonshift))
    2.17 +        "b" (SCHEDOP_suspend | (STOPCODE_reboot << SCHEDOP_reasonshift))
    2.18          : "memory" );
    2.19  
    2.20      return ret;
    2.21 @@ -173,7 +173,7 @@ static inline int HYPERVISOR_suspend(uns
    2.22      __asm__ __volatile__ (
    2.23          TRAP_INSTR
    2.24          : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
    2.25 -        "b" (SCHEDOP_stop | (STOPCODE_suspend << SCHEDOP_reasonshift)), 
    2.26 +        "b" (SCHEDOP_suspend | (STOPCODE_suspend << SCHEDOP_reasonshift)), 
    2.27          "S" (srec) : "memory" );
    2.28  
    2.29      return ret;
    2.30 @@ -268,17 +268,6 @@ static __inline__ int HYPERVISOR_multica
    2.31      return ret;
    2.32  }
    2.33  
    2.34 -static __inline__ long HYPERVISOR_kbd_op(unsigned char op, unsigned char val)
    2.35 -{
    2.36 -    int ret;
    2.37 -    __asm__ __volatile__ (
    2.38 -        TRAP_INSTR
    2.39 -        : "=a" (ret) : "0" (__HYPERVISOR_kbd_op),
    2.40 -        "b" (op), "c" (val) : "memory" );
    2.41 -
    2.42 -    return ret;
    2.43 -}
    2.44 -
    2.45  static __inline__ int HYPERVISOR_update_va_mapping(
    2.46      unsigned long page_nr, unsigned long new_val, unsigned long flags)
    2.47  {
     3.1 --- a/linux-2.4.26-xen-sparse/arch/xen/kernel/setup.c	Thu Jun 17 16:33:33 2004 +0000
     3.2 +++ b/linux-2.4.26-xen-sparse/arch/xen/kernel/setup.c	Fri Jun 18 14:46:29 2004 +0000
     3.3 @@ -62,9 +62,6 @@ unsigned long *phys_to_machine_mapping;
     3.4  multicall_entry_t multicall_list[8];
     3.5  int nr_multicall_ents = 0;
     3.6  
     3.7 -/* used so we treat multiple stop requests as a single one */
     3.8 -int suspending = 0;
     3.9 -
    3.10  /*
    3.11   * Machine setup..
    3.12   */
    3.13 @@ -1149,23 +1146,20 @@ void __init cpu_init (void)
    3.14   * Time-to-die callback handling.
    3.15   */
    3.16  
    3.17 -/* Dynamically-mapped IRQ. */
    3.18 -static int die_irq;
    3.19 -
    3.20 -static void die_interrupt(int irq, void *unused, struct pt_regs *regs)
    3.21 +static void shutdown_handler(ctrl_msg_t *msg, unsigned long id)
    3.22  {
    3.23      extern void ctrl_alt_del(void);
    3.24 +    ctrl_if_send_response(msg);
    3.25      ctrl_alt_del();
    3.26  }
    3.27  
    3.28 -static int __init setup_die_event(void)
    3.29 +static int __init setup_shutdown_event(void)
    3.30  {
    3.31 -    die_irq = bind_virq_to_irq(VIRQ_DIE);
    3.32 -    (void)request_irq(die_irq, die_interrupt, 0, "die", NULL);
    3.33 +    ctrl_if_register_receiver(CMSG_SUSPEND, shutdown_handler, 0);
    3.34      return 0;
    3.35  }
    3.36  
    3.37 -__initcall(setup_die_event);
    3.38 +__initcall(setup_shutdown_event);
    3.39  
    3.40  
    3.41  /******************************************************************************
    3.42 @@ -1174,7 +1168,10 @@ static int __init setup_die_event(void)
    3.43  
    3.44  #include <asm/suspend.h>
    3.45  
    3.46 -static void stop_task(void *unused)
    3.47 +/* Treat multiple suspend requests as a single one. */
    3.48 +static int suspending;
    3.49 +
    3.50 +static void suspend_task(void *unused)
    3.51  {
    3.52      /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */
    3.53      extern void blkdev_suspend(void);
    3.54 @@ -1295,29 +1292,28 @@ static void stop_task(void *unused)
    3.55          free_page((unsigned long)suspend_record);
    3.56  }
    3.57  
    3.58 -static struct tq_struct stop_tq;
    3.59 +static struct tq_struct suspend_tq;
    3.60  
    3.61 -/* Dynamically-mapped IRQ. */
    3.62 -static int stop_irq;
    3.63 -
    3.64 -static void stop_interrupt(int irq, void *unused, struct pt_regs *regs)
    3.65 +static void suspend_handler(ctrl_msg_t *msg, unsigned long id)
    3.66  {
    3.67 -    if (!suspending)
    3.68 +    if ( !suspending )
    3.69      {
    3.70  	suspending = 1;
    3.71 -	stop_tq.routine = stop_task;
    3.72 -	schedule_task(&stop_tq);	
    3.73 +	suspend_tq.routine = suspend_task;
    3.74 +	schedule_task(&suspend_tq);	
    3.75      }
    3.76      else
    3.77 -	printk(KERN_ALERT"Ignore queued stop request\n");
    3.78 +    {
    3.79 +	printk(KERN_ALERT"Ignore queued suspend request\n");
    3.80 +    }
    3.81 +
    3.82 +    ctrl_if_send_response(msg);
    3.83  }
    3.84  
    3.85 -static int __init setup_stop_event(void)
    3.86 +static int __init setup_suspend_event(void)
    3.87  {
    3.88 -    stop_irq = bind_virq_to_irq(VIRQ_STOP);
    3.89 -    (void)request_irq(stop_irq, stop_interrupt, 0, "stop", NULL);
    3.90 +    ctrl_if_register_receiver(CMSG_SUSPEND, suspend_handler, 0);
    3.91      return 0;
    3.92  }
    3.93  
    3.94 -__initcall(setup_stop_event);
    3.95 -
    3.96 +__initcall(setup_suspend_event);
     4.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h	Thu Jun 17 16:33:33 2004 +0000
     4.2 +++ b/linux-2.4.26-xen-sparse/include/asm-xen/hypervisor.h	Fri Jun 18 14:46:29 2004 +0000
     4.3 @@ -248,7 +248,7 @@ static inline int HYPERVISOR_shutdown(vo
     4.4      __asm__ __volatile__ (
     4.5          TRAP_INSTR
     4.6          : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
     4.7 -        "b" (SCHEDOP_stop | (STOPCODE_shutdown << SCHEDOP_reasonshift))
     4.8 +        "b" (SCHEDOP_suspend | (STOPCODE_shutdown << SCHEDOP_reasonshift))
     4.9          : "memory" );
    4.10  
    4.11      return ret;
    4.12 @@ -260,7 +260,7 @@ static inline int HYPERVISOR_reboot(void
    4.13      __asm__ __volatile__ (
    4.14          TRAP_INSTR
    4.15          : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
    4.16 -        "b" (SCHEDOP_stop | (STOPCODE_reboot << SCHEDOP_reasonshift))
    4.17 +        "b" (SCHEDOP_suspend | (STOPCODE_reboot << SCHEDOP_reasonshift))
    4.18          : "memory" );
    4.19  
    4.20      return ret;
    4.21 @@ -273,7 +273,7 @@ static inline int HYPERVISOR_suspend(uns
    4.22      __asm__ __volatile__ (
    4.23          TRAP_INSTR
    4.24          : "=a" (ret) : "0" (__HYPERVISOR_sched_op),
    4.25 -        "b" (SCHEDOP_stop | (STOPCODE_suspend << SCHEDOP_reasonshift)), 
    4.26 +        "b" (SCHEDOP_suspend | (STOPCODE_suspend << SCHEDOP_reasonshift)), 
    4.27          "S" (srec) : "memory" );
    4.28  
    4.29      return ret;
    4.30 @@ -373,17 +373,6 @@ static inline int HYPERVISOR_multicall(v
    4.31      return ret;
    4.32  }
    4.33  
    4.34 -static inline long HYPERVISOR_kbd_op(unsigned char op, unsigned char val)
    4.35 -{
    4.36 -    int ret;
    4.37 -    __asm__ __volatile__ (
    4.38 -        TRAP_INSTR
    4.39 -        : "=a" (ret) : "0" (__HYPERVISOR_kbd_op),
    4.40 -        "b" (op), "c" (val) : "memory" );
    4.41 -
    4.42 -    return ret;
    4.43 -}
    4.44 -
    4.45  static inline int HYPERVISOR_update_va_mapping(
    4.46      unsigned long page_nr, pte_t new_val, unsigned long flags)
    4.47  {
     5.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/keyboard.h	Thu Jun 17 16:33:33 2004 +0000
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,100 +0,0 @@
     5.4 -/* Portions copyright (c) 2003 James Scott, Intel Research Cambridge */
     5.5 -/*
     5.6 - * Talks to hypervisor to get PS/2 keyboard and mouse events, and send keyboard
     5.7 - * and mouse commands
     5.8 - */
     5.9 -
    5.10 -/*  Based on:
    5.11 - *  linux/include/asm-i386/keyboard.h
    5.12 - *
    5.13 - *  Created 3 Nov 1996 by Geert Uytterhoeven
    5.14 - */
    5.15 -
    5.16 -#ifndef _XEN_KEYBOARD_H
    5.17 -#define _XEN_KEYBOARD_H
    5.18 -
    5.19 -#ifdef __KERNEL__
    5.20 -
    5.21 -#include <linux/kernel.h>
    5.22 -#include <linux/ioport.h>
    5.23 -#include <linux/kd.h>
    5.24 -#include <linux/pm.h>
    5.25 -#include <asm/io.h>
    5.26 -
    5.27 -extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
    5.28 -extern int pckbd_getkeycode(unsigned int scancode);
    5.29 -extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
    5.30 -			   char raw_mode);
    5.31 -extern char pckbd_unexpected_up(unsigned char keycode);
    5.32 -extern void pckbd_leds(unsigned char leds);
    5.33 -extern void pckbd_init_hw(void);
    5.34 -extern int pckbd_pm_resume(struct pm_dev *, pm_request_t, void *);
    5.35 -
    5.36 -extern pm_callback pm_kbd_request_override;
    5.37 -extern unsigned char pckbd_sysrq_xlate[128];
    5.38 -
    5.39 -#define kbd_setkeycode		pckbd_setkeycode
    5.40 -#define kbd_getkeycode		pckbd_getkeycode
    5.41 -#define kbd_translate		pckbd_translate
    5.42 -#define kbd_unexpected_up	pckbd_unexpected_up
    5.43 -#define kbd_leds		pckbd_leds
    5.44 -#define kbd_init_hw		pckbd_init_hw
    5.45 -#define kbd_sysrq_xlate		pckbd_sysrq_xlate
    5.46 -
    5.47 -#define SYSRQ_KEY 0x54
    5.48 -
    5.49 -
    5.50 -/* THIS SECTION TALKS TO XEN TO DO PS2 SUPPORT */
    5.51 -#include <asm/hypervisor-ifs/kbd.h>
    5.52 -#include <asm/hypervisor-ifs/hypervisor-if.h>
    5.53 -
    5.54 -#define kbd_controller_present xen_kbd_controller_present
    5.55 -
    5.56 -static inline int xen_kbd_controller_present ()
    5.57 -{
    5.58 -    return start_info.flags & SIF_INITDOMAIN;
    5.59 -}
    5.60 -
    5.61 -/* resource allocation */
    5.62 -#define kbd_request_region()     \
    5.63 -    do { } while (0)
    5.64 -#define kbd_request_irq(handler)                    \
    5.65 -    do {                                            \
    5.66 -        int irq = bind_virq_to_irq(VIRQ_PS2);       \
    5.67 -        request_irq(irq, handler, 0, "ps/2", NULL); \
    5.68 -    } while ( 0 )
    5.69 -
    5.70 -/* could implement these with command to xen to filter mouse stuff... */
    5.71 -#define aux_request_irq(hand, dev_id) 0
    5.72 -#define aux_free_irq(dev_id) do { } while(0)
    5.73 -
    5.74 -/* Some stoneage hardware needs delays after some operations.  */
    5.75 -#define kbd_pause() do { } while(0)
    5.76 -
    5.77 -static unsigned char kbd_current_scancode = 0;
    5.78 -
    5.79 -static unsigned char kbd_read_input(void) 
    5.80 -{
    5.81 -  return kbd_current_scancode;
    5.82 -}
    5.83 -
    5.84 -static unsigned char kbd_read_status(void) 
    5.85 -{
    5.86 -  long res;
    5.87 -  res = HYPERVISOR_kbd_op(KBD_OP_READ,0);
    5.88 -  if ( res<0 ) 
    5.89 -  {
    5.90 -    kbd_current_scancode = 0;
    5.91 -    return 0; /* error with our request - wrong domain? */
    5.92 -  }
    5.93 -  kbd_current_scancode = KBD_CODE_SCANCODE(res);
    5.94 -  return KBD_CODE_STATUS(res);
    5.95 -}
    5.96 -
    5.97 -
    5.98 -#define kbd_write_output(val)  HYPERVISOR_kbd_op(KBD_OP_WRITEOUTPUT, val);
    5.99 -#define kbd_write_command(val) HYPERVISOR_kbd_op(KBD_OP_WRITECOMMAND, val);
   5.100 -
   5.101 -
   5.102 -#endif /* __KERNEL__ */
   5.103 -#endif /* _XEN_KEYBOARD_H */
     6.1 --- a/linux-2.4.26-xen-sparse/mkbuildtree	Thu Jun 17 16:33:33 2004 +0000
     6.2 +++ b/linux-2.4.26-xen-sparse/mkbuildtree	Fri Jun 18 14:46:29 2004 +0000
     6.3 @@ -146,6 +146,7 @@ ln -sf ../asm-i386/ioctl.h
     6.4  ln -sf ../asm-i386/ioctls.h
     6.5  ln -sf ../asm-i386/ipcbuf.h
     6.6  ln -sf ../asm-i386/ipc.h 
     6.7 +ln -sf ../asm-i386/keyboard.h
     6.8  ln -sf ../asm-i386/kmap_types.h
     6.9  ln -sf ../asm-i386/ldt.h 
    6.10  ln -sf ../asm-i386/linux_logo.h
     7.1 --- a/tools/xc/lib/xc_domain.c	Thu Jun 17 16:33:33 2004 +0000
     7.2 +++ b/tools/xc/lib/xc_domain.c	Fri Jun 18 14:46:29 2004 +0000
     7.3 @@ -46,7 +46,6 @@ int xc_domain_stop(int xc_handle,
     7.4      dom0_op_t op;
     7.5      op.cmd = DOM0_STOPDOMAIN;
     7.6      op.u.stopdomain.domain = (domid_t)domid;
     7.7 -    op.u.stopdomain.sync = 0; /* async */
     7.8      return do_dom0_op(xc_handle, &op);
     7.9  }    
    7.10  
    7.11 @@ -97,7 +96,8 @@ int xc_domain_getinfo(int xc_handle,
    7.12          info->has_cpu =
    7.13              (op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) == DOMSTATE_RUNNING;
    7.14          info->stopped = 
    7.15 -            (op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) == DOMSTATE_STOPPED;
    7.16 +            (op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK)
    7.17 +            == DOMSTATE_SUSPENDED;
    7.18  
    7.19          info->nr_pages = op.u.getdomaininfo.tot_pages;
    7.20          info->max_memkb = op.u.getdomaininfo.max_pages<<(PAGE_SHIFT-10);
     8.1 --- a/tools/xc/lib/xc_linux_build.c	Thu Jun 17 16:33:33 2004 +0000
     8.2 +++ b/tools/xc/lib/xc_linux_build.c	Fri Jun 18 14:46:29 2004 +0000
     8.3 @@ -436,7 +436,7 @@ int xc_linux_build(int xc_handle,
     8.4          PERROR("Could not get info on domain");
     8.5          goto error_out;
     8.6      }
     8.7 -    if ( ((op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) != DOMSTATE_STOPPED) ||
     8.8 +    if ( ((op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) != DOMSTATE_PAUSED) ||
     8.9           (ctxt->pt_base != 0) )
    8.10      {
    8.11          ERROR("Domain is already constructed");
     9.1 --- a/tools/xc/lib/xc_netbsd_build.c	Thu Jun 17 16:33:33 2004 +0000
     9.2 +++ b/tools/xc/lib/xc_netbsd_build.c	Fri Jun 18 14:46:29 2004 +0000
     9.3 @@ -258,7 +258,7 @@ int xc_netbsd_build(int xc_handle,
     9.4          PERROR("Could not get info on domain");
     9.5          goto error_out;
     9.6      }
     9.7 -    if ( ((op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) != DOMSTATE_STOPPED) ||
     9.8 +    if ( ((op.u.getdomaininfo.flags&DOMFLAGS_STATEMASK) != DOMSTATE_PAUSED) ||
     9.9           (op.u.getdomaininfo.ctxt->pt_base != 0) )
    9.10      {
    9.11          ERROR("Domain is already constructed");
    10.1 --- a/tools/xc/lib/xc_private.c	Thu Jun 17 16:33:33 2004 +0000
    10.2 +++ b/tools/xc/lib/xc_private.c	Fri Jun 18 14:46:29 2004 +0000
    10.3 @@ -206,43 +206,10 @@ int finish_mmu_updates(int xc_handle, mm
    10.4  int xc_domain_stop_sync( int xc_handle, domid_t domid,
    10.5                           dom0_op_t *op, full_execution_context_t *ctxt)
    10.6  {
    10.7 -    int i;
    10.8 -
    10.9 -    printf("Sleep:");
   10.10 -
   10.11 -    for( i = 0; ; i++ )
   10.12 -    {    
   10.13 -
   10.14 -        op->cmd = DOM0_STOPDOMAIN;
   10.15 -        op->u.stopdomain.domain = (domid_t)domid;
   10.16 -        op->u.stopdomain.sync = 1;
   10.17 -        do_dom0_op(xc_handle, op);
   10.18 -        /* can't trust return code due to sync stop hack :-(( */
   10.19 -
   10.20 -        op->cmd = DOM0_GETDOMAININFO;
   10.21 -        op->u.getdomaininfo.domain = (domid_t)domid;
   10.22 -        op->u.getdomaininfo.ctxt = ctxt;
   10.23 -        if ( (do_dom0_op(xc_handle, op) < 0) || 
   10.24 -             ((u32)op->u.getdomaininfo.domain != domid) )
   10.25 -        {
   10.26 -            PERROR("Could not get info on domain");
   10.27 -            goto out;
   10.28 -        }
   10.29 -
   10.30 -        if ( (op->u.getdomaininfo.flags & DOMFLAGS_STATEMASK) == 
   10.31 -             DOMSTATE_STOPPED )
   10.32 -        {
   10.33 -            printf("Domain %u stopped\n",domid);
   10.34 -            return 0;
   10.35 -        }
   10.36 - 
   10.37 -        printf(".");
   10.38 -    }
   10.39 -
   10.40 -    printf("\n");
   10.41 -
   10.42 - out:
   10.43 -    return -1;    
   10.44 +    op->cmd = DOM0_STOPDOMAIN;
   10.45 +    op->u.stopdomain.domain = (domid_t)domid;
   10.46 +    do_dom0_op(xc_handle, op);
   10.47 +    return 0;
   10.48  }
   10.49  
   10.50  long long  xc_domain_get_cpu_usage( int xc_handle, domid_t domid )
    11.1 --- a/tools/xend/lib/domain_controller.h	Thu Jun 17 16:33:33 2004 +0000
    11.2 +++ b/tools/xend/lib/domain_controller.h	Fri Jun 18 14:46:29 2004 +0000
    11.3 @@ -29,7 +29,7 @@ typedef struct {
    11.4  
    11.5  
    11.6  /*
    11.7 - * Stop codes for SCHEDOP_stop. These are opaque to Xen but interpreted by
    11.8 + * Stop codes for SCHEDOP_suspend. These are opaque to Xen but interpreted by
    11.9   * control software to determine appropriate action.
   11.10   */
   11.11  
   11.12 @@ -69,6 +69,8 @@ typedef struct {
   11.13  #define CMSG_BLKIF_FE       2  /* Block-device frontend   */
   11.14  #define CMSG_NETIF_BE       3  /* Network-device backend  */
   11.15  #define CMSG_NETIF_FE       4  /* Network-device frontend */
   11.16 +#define CMSG_SUSPEND        5  /* Suspend messages        */
   11.17 +#define CMSG_SHUTDOWN       6  /* Shutdown messages       */
   11.18  
   11.19  
   11.20  /******************************************************************************
   11.21 @@ -515,4 +517,27 @@ typedef struct {
   11.22      u32        status;        /*  0: NETIF_DRIVER_STATUS_??? */
   11.23  } PACKED netif_be_driver_status_changed_t; /* 4 bytes */
   11.24  
   11.25 +
   11.26 +/******************************************************************************
   11.27 + * SUSPEND DEFINITIONS
   11.28 + */
   11.29 +
   11.30 +/*
   11.31 + * Subtypes for console messages.
   11.32 + */
   11.33 +/* None. */
   11.34 +
   11.35 +
   11.36 +/******************************************************************************
   11.37 + * CONSOLE DEFINITIONS
   11.38 + */
   11.39 +
   11.40 +/*
   11.41 + * Subtypes for console messages.
   11.42 + */
   11.43 +#define CMSG_SHUTDOWN_HALT      0   /* Shutdown and halt (don't die). */
   11.44 +#define CMSG_SHUTDOWN_POWEROFF  1   /* 'Poweroff' => clean death.     */
   11.45 +#define CMSG_SHUTDOWN_REBOOT    2   /* Shutdown and restart.          */
   11.46 +
   11.47 +
   11.48  #endif /* __DOMAIN_CONTROLLER_H__ */
    12.1 --- a/tools/xend/lib/utils.c	Thu Jun 17 16:33:33 2004 +0000
    12.2 +++ b/tools/xend/lib/utils.c	Fri Jun 18 14:46:29 2004 +0000
    12.3 @@ -687,29 +687,30 @@ static int xup_connect(xu_port_object *x
    12.4                         int local_port, int remote_port){
    12.5      // From our prespective rx = producer, tx = consumer.
    12.6      int err = 0;
    12.7 -    printf("%s> dom=%u %d:%d\n", __FUNCTION__, dom, local_port, remote_port);
    12.8 +    printf("%s> dom=%u %d:%d\n", __FUNCTION__, (unsigned int)dom, 
    12.9 +           local_port, remote_port);
   12.10  
   12.11      // Consumer = tx.
   12.12      //xup->interface->tx_resp_prod = 0;
   12.13      //xup->interface->tx_req_prod = 0;
   12.14      xup->tx_resp_prod = xup->interface->tx_resp_prod;
   12.15      xup->tx_req_cons = xup->interface->tx_resp_prod;
   12.16 -    printf("%s> tx: %p %p : %p %p\n", __FUNCTION__,
   12.17 -           xup->interface->tx_resp_prod,
   12.18 -           xup->tx_resp_prod,
   12.19 -           xup->tx_req_cons,
   12.20 -           xup->interface->tx_req_prod);
   12.21 +    printf("%s> tx: %u %u : %u %u\n", __FUNCTION__,
   12.22 +           (unsigned int)xup->interface->tx_resp_prod,
   12.23 +           (unsigned int)xup->tx_resp_prod,
   12.24 +           (unsigned int)xup->tx_req_cons,
   12.25 +           (unsigned int)xup->interface->tx_req_prod);
   12.26  
   12.27      // Producer = rx.
   12.28      //xup->interface->rx_req_prod  = 0;
   12.29      //xup->interface->rx_resp_prod = 0;
   12.30      xup->rx_req_prod  = xup->interface->rx_req_prod;
   12.31      xup->rx_resp_cons = xup->interface->rx_resp_prod;
   12.32 -    printf("%s> rx: %p %p : %p %p\n", __FUNCTION__,
   12.33 -           xup->rx_resp_cons,
   12.34 -           xup->interface->rx_resp_prod,
   12.35 -           xup->interface->rx_req_prod,
   12.36 -           xup->rx_req_prod);
   12.37 +    printf("%s> rx: %u %u : %u %u\n", __FUNCTION__,
   12.38 +           (unsigned int)xup->rx_resp_cons,
   12.39 +           (unsigned int)xup->interface->rx_resp_prod,
   12.40 +           (unsigned int)xup->interface->rx_req_prod,
   12.41 +           (unsigned int)xup->rx_req_prod);
   12.42  
   12.43      xup->remote_dom   = dom;
   12.44      xup->local_port   = local_port;
    13.1 --- a/xen/arch/x86/entry.S	Thu Jun 17 16:33:33 2004 +0000
    13.2 +++ b/xen/arch/x86/entry.S	Fri Jun 18 14:46:29 2004 +0000
    13.3 @@ -101,9 +101,8 @@ EFLAGS		= 0x38
    13.4  OLDESP		= 0x3C
    13.5  OLDSS		= 0x40
    13.6  
    13.7 -/* Offsets in task_struct */
    13.8 +/* Offsets in domain structure */
    13.9  PROCESSOR       =  0
   13.10 -HYP_EVENTS      =  2
   13.11  SHARED_INFO     =  4
   13.12  EVENT_SEL       =  8
   13.13  EVENT_ADDR      = 12
   13.14 @@ -292,7 +291,7 @@ 6:      pushl %eax
   13.15  /* No special register assumptions */
   13.16  failsafe_callback:
   13.17          GET_CURRENT(%ebx)
   13.18 -        movzwl PROCESSOR(%ebx),%eax
   13.19 +        movl PROCESSOR(%ebx),%eax
   13.20          shl  $4,%eax
   13.21          lea  guest_trap_bounce(%eax),%edx
   13.22          movl FAILSAFE_ADDR(%ebx),%eax
   13.23 @@ -358,13 +357,10 @@ test_all_events:
   13.24          notl %ecx
   13.25          cli                             # tests must not race interrupts
   13.26  /*test_softirqs:*/  
   13.27 -        movzwl PROCESSOR(%ebx),%eax
   13.28 +        movl PROCESSOR(%ebx),%eax
   13.29          shl  $6,%eax                    # sizeof(irq_cpustat) == 64
   13.30          test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
   13.31          jnz  process_softirqs
   13.32 -/*test_hyp_events:*/
   13.33 -        testw %cx, HYP_EVENTS(%ebx)
   13.34 -        jnz  process_hyp_events
   13.35  /*test_guest_events:*/
   13.36          movl SHARED_INFO(%ebx),%eax
   13.37          testb $0xFF,UPCALL_MASK(%eax)
   13.38 @@ -373,7 +369,7 @@ test_all_events:
   13.39          jz   restore_all_guest
   13.40          movb $1,UPCALL_MASK(%eax)       # Upcalls are masked during delivery
   13.41  /*process_guest_events:*/
   13.42 -        movzwl PROCESSOR(%ebx),%edx
   13.43 +        movl PROCESSOR(%ebx),%edx
   13.44          shl  $4,%edx                    # sizeof(guest_trap_bounce) == 16
   13.45          lea  guest_trap_bounce(%edx),%edx
   13.46          movl EVENT_ADDR(%ebx),%eax
   13.47 @@ -388,13 +384,7 @@ process_softirqs:
   13.48          sti       
   13.49          call SYMBOL_NAME(do_softirq)
   13.50          jmp  test_all_events
   13.51 -        
   13.52 -        ALIGN
   13.53 -process_hyp_events:
   13.54 -        sti
   13.55 -        call SYMBOL_NAME(do_hyp_events)
   13.56 -        jmp  test_all_events
   13.57 -        
   13.58 +                
   13.59  /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:         */
   13.60  /*   {EIP, CS, EFLAGS, [ESP, SS]}                                     */
   13.61  /* %edx == guest_trap_bounce, %ebx == task_struct                     */
   13.62 @@ -404,7 +394,7 @@ create_bounce_frame:
   13.63          test $2,%cl
   13.64          jz   1f /* jump if returning to an existing ring-1 activation */
   13.65          /* obtain ss/esp from TSS -- no current ring-1 activations */
   13.66 -        movzwl PROCESSOR(%ebx),%eax
   13.67 +        movl PROCESSOR(%ebx),%eax
   13.68          /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
   13.69          movl %eax, %ecx
   13.70          shll $7, %ecx
   13.71 @@ -467,18 +457,18 @@ FAULT12:movl %eax,8(%esi)
   13.72  crash_domain_fixup1:
   13.73          subl  $4,%esp
   13.74          SAVE_ALL
   13.75 -        jmp   crash_domain
   13.76 +        jmp   domain_crash
   13.77  crash_domain_fixup2:
   13.78          addl  $4,%esp                     
   13.79  crash_domain_fixup3:
   13.80          pushl %ss
   13.81          popl  %ds
   13.82 -        jmp   crash_domain
   13.83 +        jmp   domain_crash
   13.84  .previous
   13.85  
   13.86          ALIGN
   13.87  process_guest_exception_and_events:        
   13.88 -        movzwl PROCESSOR(%ebx),%eax
   13.89 +        movl PROCESSOR(%ebx),%eax
   13.90          shl  $4,%eax
   13.91          lea  guest_trap_bounce(%eax),%edx
   13.92          testb $~0,GTB_FLAGS(%edx)
   13.93 @@ -723,7 +713,7 @@ ENTRY(hypervisor_call_table)
   13.94          .long SYMBOL_NAME(do_set_fast_trap)      /* 15 */
   13.95          .long SYMBOL_NAME(do_dom_mem_op)
   13.96          .long SYMBOL_NAME(do_multicall)
   13.97 -        .long SYMBOL_NAME(do_kbd_op)
   13.98 +        .long SYMBOL_NAME(do_ni_syscall)                    # do_kbd_op
   13.99          .long SYMBOL_NAME(do_update_va_mapping)
  13.100          .long SYMBOL_NAME(do_set_timer_op)       /* 20 */
  13.101          .long SYMBOL_NAME(do_event_channel_op)
    14.1 --- a/xen/arch/x86/i387.c	Thu Jun 17 16:33:33 2004 +0000
    14.2 +++ b/xen/arch/x86/i387.c	Fri Jun 18 14:46:29 2004 +0000
    14.3 @@ -17,10 +17,10 @@ void init_fpu(void)
    14.4  {
    14.5      __asm__("fninit");
    14.6      if ( cpu_has_xmm ) load_mxcsr(0x1f80);
    14.7 -    set_bit(PF_DONEFPUINIT, &current->flags);
    14.8 +    set_bit(DF_DONEFPUINIT, &current->flags);
    14.9  }
   14.10  
   14.11 -static inline void __save_init_fpu( struct task_struct *tsk )
   14.12 +static inline void __save_init_fpu( struct domain *tsk )
   14.13  {
   14.14      if ( cpu_has_fxsr ) {
   14.15          asm volatile( "fxsave %0 ; fnclex"
   14.16 @@ -29,22 +29,22 @@ static inline void __save_init_fpu( stru
   14.17          asm volatile( "fnsave %0 ; fwait"
   14.18                        : "=m" (tsk->thread.i387) );
   14.19      }
   14.20 -    clear_bit(PF_USEDFPU, &tsk->flags);
   14.21 +    clear_bit(DF_USEDFPU, &tsk->flags);
   14.22  }
   14.23  
   14.24 -void save_init_fpu( struct task_struct *tsk )
   14.25 +void save_init_fpu( struct domain *tsk )
   14.26  {
   14.27      /*
   14.28       * The guest OS may have set the 'virtual STTS' flag.
   14.29       * This causes us to set the real flag, so we'll need
   14.30       * to temporarily clear it while saving f-p state.
   14.31       */
   14.32 -    if ( test_bit(PF_GUEST_STTS, &tsk->flags) ) clts();
   14.33 +    if ( test_bit(DF_GUEST_STTS, &tsk->flags) ) clts();
   14.34      __save_init_fpu(tsk);
   14.35      stts();
   14.36  }
   14.37  
   14.38 -void restore_fpu( struct task_struct *tsk )
   14.39 +void restore_fpu( struct domain *tsk )
   14.40  {
   14.41      if ( cpu_has_fxsr ) {
   14.42          asm volatile( "fxrstor %0"
    15.1 --- a/xen/arch/x86/idle0_task.c	Thu Jun 17 16:33:33 2004 +0000
    15.2 +++ b/xen/arch/x86/idle0_task.c	Fri Jun 18 14:46:29 2004 +0000
    15.3 @@ -2,7 +2,7 @@
    15.4  #include <xen/sched.h>
    15.5  #include <asm/desc.h>
    15.6  
    15.7 -struct task_struct idle0_task = IDLE0_TASK(idle0_task);
    15.8 +struct domain idle0_task = IDLE0_TASK(idle0_task);
    15.9  
   15.10  /*
   15.11   * per-CPU TSS segments. Threads are completely 'soft' on Linux,
    16.1 --- a/xen/arch/x86/irq.c	Thu Jun 17 16:33:33 2004 +0000
    16.2 +++ b/xen/arch/x86/irq.c	Fri Jun 18 14:46:29 2004 +0000
    16.3 @@ -185,14 +185,14 @@ typedef struct {
    16.4      u8 nr_guests;
    16.5      u8 in_flight;
    16.6      u8 shareable;
    16.7 -    struct task_struct *guest[IRQ_MAX_GUESTS];
    16.8 +    struct domain *guest[IRQ_MAX_GUESTS];
    16.9  } irq_guest_action_t;
   16.10  
   16.11  static void __do_IRQ_guest(int irq)
   16.12  {
   16.13      irq_desc_t         *desc = &irq_desc[irq];
   16.14      irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
   16.15 -    struct task_struct *p;
   16.16 +    struct domain *p;
   16.17      int                 i;
   16.18  
   16.19      for ( i = 0; i < action->nr_guests; i++ )
   16.20 @@ -204,7 +204,7 @@ static void __do_IRQ_guest(int irq)
   16.21      }
   16.22  }
   16.23  
   16.24 -int pirq_guest_unmask(struct task_struct *p)
   16.25 +int pirq_guest_unmask(struct domain *p)
   16.26  {
   16.27      irq_desc_t    *desc;
   16.28      int            i, j, pirq;
   16.29 @@ -231,7 +231,7 @@ int pirq_guest_unmask(struct task_struct
   16.30      return 0;
   16.31  }
   16.32  
   16.33 -int pirq_guest_bind(struct task_struct *p, int irq, int will_share)
   16.34 +int pirq_guest_bind(struct domain *p, int irq, int will_share)
   16.35  {
   16.36      irq_desc_t         *desc = &irq_desc[irq];
   16.37      irq_guest_action_t *action;
   16.38 @@ -299,7 +299,7 @@ int pirq_guest_bind(struct task_struct *
   16.39      return rc;
   16.40  }
   16.41  
   16.42 -int pirq_guest_unbind(struct task_struct *p, int irq)
   16.43 +int pirq_guest_unbind(struct domain *p, int irq)
   16.44  {
   16.45      irq_desc_t         *desc = &irq_desc[irq];
   16.46      irq_guest_action_t *action;
    17.1 --- a/xen/arch/x86/mm.c	Thu Jun 17 16:33:33 2004 +0000
    17.2 +++ b/xen/arch/x86/mm.c	Fri Jun 18 14:46:29 2004 +0000
    17.3 @@ -212,7 +212,7 @@ int check_descriptor(unsigned long a, un
    17.4  }
    17.5  
    17.6  
    17.7 -long set_gdt(struct task_struct *p, 
    17.8 +long set_gdt(struct domain *p, 
    17.9               unsigned long *frames,
   17.10               unsigned int entries)
   17.11  {
    18.1 --- a/xen/arch/x86/pdb-stub.c	Thu Jun 17 16:33:33 2004 +0000
    18.2 +++ b/xen/arch/x86/pdb-stub.c	Fri Jun 18 14:46:29 2004 +0000
    18.3 @@ -100,7 +100,7 @@ pdb_process_query (char *ptr)
    18.4      else if (strcmp(ptr, "fThreadInfo") == 0)
    18.5      {
    18.6  #ifdef PDB_PAST
    18.7 -        struct task_struct *p;
    18.8 +        struct domain *p;
    18.9          u_long flags;
   18.10  #endif /* PDB_PAST */
   18.11  
   18.12 @@ -197,11 +197,11 @@ pdb_process_query (char *ptr)
   18.13  #ifdef PDB_PAST
   18.14          int thread = 0;
   18.15  	char message[16];
   18.16 -	struct task_struct *p;
   18.17 +	struct domain *p;
   18.18  
   18.19  	p = find_domain_by_id(pdb_ctx[pdb_level].info);
   18.20  	strncpy (message, p->name, 16);
   18.21 -	put_task_struct(p);
   18.22 +	put_domain(p);
   18.23  
   18.24  	ptr += 16;
   18.25          if (hexToInt (&ptr, &thread))
   18.26 @@ -327,7 +327,7 @@ pdb_process_command (char *ptr, struct p
   18.27      {
   18.28          if (pdb_ctx.domain == -1)                        /* pdb context: xen */
   18.29  	{
   18.30 -	    struct task_struct *p;
   18.31 +	    struct domain *p;
   18.32  
   18.33  	    p = &idle0_task;
   18.34  	    if (p->mm.shadow_mode)
   18.35 @@ -337,7 +337,7 @@ pdb_process_command (char *ptr, struct p
   18.36  	}
   18.37  	else if (pdb_ctx.process == -1)             /* pdb context: guest os */
   18.38  	{
   18.39 -	    struct task_struct *p;
   18.40 +	    struct domain *p;
   18.41  
   18.42  	    if (pdb_ctx.domain == -2)
   18.43  	    {
   18.44 @@ -358,11 +358,11 @@ pdb_process_command (char *ptr, struct p
   18.45  	        pdb_ctx.ptbr = pagetable_val(p->mm.shadow_table);
   18.46  	    else
   18.47  	        pdb_ctx.ptbr = pagetable_val(p->mm.pagetable);
   18.48 -	    put_task_struct(p);
   18.49 +	    put_domain(p);
   18.50  	}
   18.51  	else                                         /* pdb context: process */
   18.52  	{
   18.53 -	    struct task_struct *p;
   18.54 +	    struct domain *p;
   18.55  	    unsigned long domain_ptbr;
   18.56  
   18.57  	    p = find_domain_by_id(pdb_ctx.domain);
   18.58 @@ -378,7 +378,7 @@ pdb_process_command (char *ptr, struct p
   18.59  	        domain_ptbr = pagetable_val(p->mm.shadow_table);
   18.60  	    else
   18.61  	        domain_ptbr = pagetable_val(p->mm.pagetable);
   18.62 -	    put_task_struct(p);
   18.63 +	    put_domain(p);
   18.64  
   18.65  	    pdb_ctx.ptbr = domain_ptbr;
   18.66  	    /*pdb_ctx.ptbr=pdb_linux_pid_ptbr(domain_ptbr, pdb_ctx.process);*/
   18.67 @@ -603,13 +603,13 @@ pdb_process_command (char *ptr, struct p
   18.68  	    {
   18.69  	        case PDB_LVL_XEN:
   18.70  		{
   18.71 -		    struct task_struct *p;
   18.72 +		    struct domain *p;
   18.73  		    id -= PDB_ID_OFFSET;
   18.74  		    if ( (p = find_domain_by_id(id)) == NULL)
   18.75  		        strcpy (pdb_out_buffer, "E00");
   18.76  		    else
   18.77  		        strcpy (pdb_out_buffer, "OK");
   18.78 -		    put_task_struct(p);
   18.79 +		    put_domain(p);
   18.80  
   18.81  		    pdb_level = PDB_LVL_GUESTOS;
   18.82  		    pdb_ctx[pdb_level].ctrl = id;
   18.83 @@ -986,11 +986,11 @@ int pdb_change_values_one_page(u_char *b
   18.84  	}
   18.85  	else
   18.86  	{
   18.87 -	    struct task_struct *p = find_domain_by_id(0);
   18.88 +	    struct domain *p = find_domain_by_id(0);
   18.89  	    printk ("pdb error: cr3: 0x%lx    dom0cr3:  0x%lx\n",  cr3,
   18.90  		    p->mm.shadow_mode ? pagetable_val(p->mm.shadow_table)
   18.91  		    : pagetable_val(p->mm.pagetable));
   18.92 -	    put_task_struct(p);
   18.93 +	    put_domain(p);
   18.94  	    printk ("pdb error: L2:0x%p (0x%lx)\n", 
   18.95  		    l2_table, l2_pgentry_val(*l2_table));
   18.96  	}
    19.1 --- a/xen/arch/x86/process.c	Thu Jun 17 16:33:33 2004 +0000
    19.2 +++ b/xen/arch/x86/process.c	Fri Jun 18 14:46:29 2004 +0000
    19.3 @@ -51,9 +51,10 @@ void enable_hlt(void)
    19.4   */
    19.5  static void default_idle(void)
    19.6  {
    19.7 -    if (!hlt_counter) {
    19.8 +    if ( hlt_counter == 0 )
    19.9 +    {
   19.10          __cli();
   19.11 -        if (!current->hyp_events && !softirq_pending(smp_processor_id()))
   19.12 +        if ( !softirq_pending(smp_processor_id()) )
   19.13              safe_halt();
   19.14          else
   19.15              __sti();
   19.16 @@ -66,9 +67,8 @@ void continue_cpu_idle_loop(void)
   19.17      for ( ; ; )
   19.18      {
   19.19          irq_stat[cpu].idle_timestamp = jiffies;
   19.20 -        while (!current->hyp_events && !softirq_pending(cpu))
   19.21 +        while ( !softirq_pending(cpu) )
   19.22              default_idle();
   19.23 -        do_hyp_events();
   19.24          do_softirq();
   19.25      }
   19.26  }
   19.27 @@ -77,7 +77,7 @@ void startup_cpu_idle_loop(void)
   19.28  {
   19.29      /* Just some sanity to ensure that the scheduler is set up okay. */
   19.30      ASSERT(current->domain == IDLE_DOMAIN_ID);
   19.31 -    (void)wake_up(current);
   19.32 +    domain_controller_unpause(current);
   19.33      __enter_scheduler();
   19.34  
   19.35      /*
   19.36 @@ -193,7 +193,7 @@ void machine_power_off(void)
   19.37      machine_restart(0);
   19.38  }
   19.39  
   19.40 -void new_thread(struct task_struct *p,
   19.41 +void new_thread(struct domain *p,
   19.42                  unsigned long start_pc,
   19.43                  unsigned long start_stack,
   19.44                  unsigned long start_info)
   19.45 @@ -231,7 +231,7 @@ void new_thread(struct task_struct *p,
   19.46  			:"r" (thread->debugreg[register]))
   19.47  
   19.48  
   19.49 -void switch_to(struct task_struct *prev_p, struct task_struct *next_p)
   19.50 +void switch_to(struct domain *prev_p, struct domain *next_p)
   19.51  {
   19.52      struct thread_struct *next = &next_p->thread;
   19.53      struct tss_struct *tss = init_tss + smp_processor_id();
    20.1 --- a/xen/arch/x86/setup.c	Thu Jun 17 16:33:33 2004 +0000
    20.2 +++ b/xen/arch/x86/setup.c	Fri Jun 18 14:46:29 2004 +0000
    20.3 @@ -25,7 +25,7 @@ EXPORT_SYMBOL(mmu_cr4_features);
    20.4  
    20.5  unsigned long wait_init_idle;
    20.6  
    20.7 -struct task_struct *idle_task[NR_CPUS] = { &idle0_task };
    20.8 +struct domain *idle_task[NR_CPUS] = { &idle0_task };
    20.9  
   20.10  #ifdef	CONFIG_ACPI_INTERPRETER
   20.11  int acpi_disabled = 0;
   20.12 @@ -304,8 +304,7 @@ void __init start_of_day(void)
   20.13      extern void trap_init(void);
   20.14      extern void time_init(void);
   20.15      extern void ac_timer_init(void);
   20.16 -    extern void initialize_keytable(); 
   20.17 -    extern void initialize_keyboard(void);
   20.18 +    extern void initialize_keytable();
   20.19      extern int opt_nosmp, opt_watchdog, opt_noacpi, opt_ignorebiostables;
   20.20      extern int do_timer_lists_from_pit;
   20.21      unsigned long low_mem_size;
   20.22 @@ -316,9 +315,7 @@ void __init start_of_day(void)
   20.23      memguard_guard_range(cpu0_stack, PAGE_SIZE);
   20.24  #endif
   20.25  
   20.26 -    open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, 
   20.27 -                 (void *)new_tlbflush_clock_period,
   20.28 -                 NULL);
   20.29 +    open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
   20.30  
   20.31      if ( opt_watchdog ) 
   20.32          nmi_watchdog = NMI_LOCAL_APIC;
   20.33 @@ -353,7 +350,6 @@ void __init start_of_day(void)
   20.34      init_IRQ();  /* installs simple interrupt wrappers. Starts HZ clock. */
   20.35      trap_init();
   20.36      time_init(); /* installs software handler for HZ clock. */
   20.37 -    softirq_init();
   20.38      init_apic_mappings(); /* make APICs addressable in our pagetables. */
   20.39  
   20.40  #ifndef CONFIG_SMP    
   20.41 @@ -373,10 +369,9 @@ void __init start_of_day(void)
   20.42  
   20.43      __sti();
   20.44  
   20.45 -    initialize_keytable(); /* call back handling for key codes      */
   20.46 +    initialize_keytable(); /* call back handling for key codes */
   20.47  
   20.48      serial_init_stage2();
   20.49 -    initialize_keyboard(); /* setup keyboard (also for debugging)   */
   20.50  
   20.51  #ifdef XEN_DEBUGGER
   20.52      initialize_pdb();      /* pervasive debugger */
    21.1 --- a/xen/arch/x86/smpboot.c	Thu Jun 17 16:33:33 2004 +0000
    21.2 +++ b/xen/arch/x86/smpboot.c	Fri Jun 18 14:46:29 2004 +0000
    21.3 @@ -644,7 +644,7 @@ static void __init do_boot_cpu (int apic
    21.4   * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
    21.5   */
    21.6  {
    21.7 -    struct task_struct *idle;
    21.8 +    struct domain *idle;
    21.9      unsigned long boot_error = 0;
   21.10      int timeout, cpu;
   21.11      unsigned long start_eip, stack;
   21.12 @@ -654,7 +654,7 @@ static void __init do_boot_cpu (int apic
   21.13      if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
   21.14          panic("failed 'createdomain' for CPU %d", cpu);
   21.15  
   21.16 -    set_bit(PF_IDLETASK, &idle->flags);
   21.17 +    set_bit(DF_IDLETASK, &idle->flags);
   21.18  
   21.19      idle->mm.pagetable = mk_pagetable(__pa(idle_pg_table));
   21.20  
    22.1 --- a/xen/arch/x86/traps.c	Thu Jun 17 16:33:33 2004 +0000
    22.2 +++ b/xen/arch/x86/traps.c	Fri Jun 18 14:46:29 2004 +0000
    22.3 @@ -187,7 +187,7 @@ static inline void do_trap(int trapnr, c
    22.4                             struct pt_regs *regs, 
    22.5                             long error_code, int use_error_code)
    22.6  {
    22.7 -    struct task_struct *p = current;
    22.8 +    struct domain *p = current;
    22.9      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
   22.10      trap_info_t *ti;
   22.11      unsigned long fixup;
   22.12 @@ -248,7 +248,7 @@ DO_ERROR_NOCODE( 0, "divide error", divi
   22.13  
   22.14      asmlinkage void do_int3(struct pt_regs *regs, long error_code)
   22.15  {
   22.16 -    struct task_struct *p = current;
   22.17 +    struct domain *p = current;
   22.18      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
   22.19      trap_info_t *ti;
   22.20  
   22.21 @@ -314,7 +314,7 @@ asmlinkage void do_page_fault(struct pt_
   22.22      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
   22.23      trap_info_t *ti;
   22.24      unsigned long off, addr, fixup;
   22.25 -    struct task_struct *p = current;
   22.26 +    struct domain *p = current;
   22.27      extern int map_ldt_shadow_page(unsigned int);
   22.28  
   22.29      __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
   22.30 @@ -400,7 +400,7 @@ asmlinkage void do_page_fault(struct pt_
   22.31  
   22.32  asmlinkage void do_general_protection(struct pt_regs *regs, long error_code)
   22.33  {
   22.34 -    struct task_struct *p = current;
   22.35 +    struct domain *p = current;
   22.36      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
   22.37      trap_info_t *ti;
   22.38      unsigned long fixup;
   22.39 @@ -523,16 +523,16 @@ asmlinkage void math_state_restore(struc
   22.40      /* Prevent recursion. */
   22.41      clts();
   22.42  
   22.43 -    if ( !test_bit(PF_USEDFPU, &current->flags) )
   22.44 +    if ( !test_bit(DF_USEDFPU, &current->flags) )
   22.45      {
   22.46 -        if ( test_bit(PF_DONEFPUINIT, &current->flags) )
   22.47 +        if ( test_bit(DF_DONEFPUINIT, &current->flags) )
   22.48              restore_fpu(current);
   22.49          else
   22.50              init_fpu();
   22.51 -        set_bit(PF_USEDFPU, &current->flags); /* so we fnsave on switch_to() */
   22.52 +        set_bit(DF_USEDFPU, &current->flags); /* so we fnsave on switch_to() */
   22.53      }
   22.54  
   22.55 -    if ( test_and_clear_bit(PF_GUEST_STTS, &current->flags) )
   22.56 +    if ( test_and_clear_bit(DF_GUEST_STTS, &current->flags) )
   22.57      {
   22.58          struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
   22.59          gtb->flags      = GTBF_TRAP_NOCODE;
   22.60 @@ -545,7 +545,7 @@ asmlinkage void math_state_restore(struc
   22.61  asmlinkage void do_pdb_debug(struct pt_regs *regs, long error_code)
   22.62  {
   22.63      unsigned int condition;
   22.64 -    struct task_struct *tsk = current;
   22.65 +    struct domain *tsk = current;
   22.66      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
   22.67  
   22.68      __asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
   22.69 @@ -567,7 +567,7 @@ asmlinkage void do_pdb_debug(struct pt_r
   22.70  asmlinkage void do_debug(struct pt_regs *regs, long error_code)
   22.71  {
   22.72      unsigned int condition;
   22.73 -    struct task_struct *tsk = current;
   22.74 +    struct domain *tsk = current;
   22.75      struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
   22.76  
   22.77  #ifdef XEN_DEBUGGER
   22.78 @@ -760,7 +760,7 @@ long do_set_callbacks(unsigned long even
   22.79                        unsigned long failsafe_selector,
   22.80                        unsigned long failsafe_address)
   22.81  {
   22.82 -    struct task_struct *p = current;
   22.83 +    struct domain *p = current;
   22.84  
   22.85      if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
   22.86          return -EPERM;
   22.87 @@ -774,7 +774,7 @@ long do_set_callbacks(unsigned long even
   22.88  }
   22.89  
   22.90  
   22.91 -long set_fast_trap(struct task_struct *p, int idx)
   22.92 +long set_fast_trap(struct domain *p, int idx)
   22.93  {
   22.94      trap_info_t *ti;
   22.95  
   22.96 @@ -827,13 +827,13 @@ long do_set_fast_trap(int idx)
   22.97  
   22.98  long do_fpu_taskswitch(void)
   22.99  {
  22.100 -    set_bit(PF_GUEST_STTS, &current->flags);
  22.101 +    set_bit(DF_GUEST_STTS, &current->flags);
  22.102      stts();
  22.103      return 0;
  22.104  }
  22.105  
  22.106  
  22.107 -long set_debugreg(struct task_struct *p, int reg, unsigned long value)
  22.108 +long set_debugreg(struct domain *p, int reg, unsigned long value)
  22.109  {
  22.110      int i;
  22.111  
    23.1 --- a/xen/common/ac_timer.c	Thu Jun 17 16:33:33 2004 +0000
    23.2 +++ b/xen/common/ac_timer.c	Fri Jun 18 14:46:29 2004 +0000
    23.3 @@ -235,7 +235,7 @@ void mod_ac_timer(struct ac_timer *timer
    23.4  }
    23.5  
    23.6  
    23.7 -static void ac_timer_softirq_action(struct softirq_action *a)
    23.8 +static void ac_timer_softirq_action(void)
    23.9  {
   23.10      int              cpu = smp_processor_id();
   23.11      struct ac_timer *t, **heap;
   23.12 @@ -303,7 +303,7 @@ void __init ac_timer_init(void)
   23.13  
   23.14      printk ("ACT: Initialising Accurate timers\n");
   23.15  
   23.16 -    open_softirq(AC_TIMER_SOFTIRQ, ac_timer_softirq_action, NULL);
   23.17 +    open_softirq(AC_TIMER_SOFTIRQ, ac_timer_softirq_action);
   23.18  
   23.19      for ( i = 0; i < smp_num_cpus; i++ )
   23.20      {
    24.1 --- a/xen/common/debug.c	Thu Jun 17 16:33:33 2004 +0000
    24.2 +++ b/xen/common/debug.c	Fri Jun 18 14:46:29 2004 +0000
    24.3 @@ -49,15 +49,11 @@ void pdb_do_debug (dom0_op_t *op)
    24.4      {
    24.5          case 'c' :
    24.6  	{
    24.7 -	    struct task_struct * p = find_domain_by_id(op->u.debug.domain);
    24.8 +	    struct domain *p = find_domain_by_id(op->u.debug.domain);
    24.9  	    if ( p != NULL )
   24.10  	    {
   24.11 -	        if ( test_bit(PF_CONSTRUCTED, &p->flags) )
   24.12 -		{
   24.13 -		    wake_up(p);
   24.14 -		    reschedule(p);
   24.15 -		}
   24.16 -		put_task_struct(p);
   24.17 +                domain_controller_unpause(p);
   24.18 +		put_domain(p);
   24.19  	    }
   24.20  	    else
   24.21  	    {
   24.22 @@ -70,7 +66,7 @@ void pdb_do_debug (dom0_op_t *op)
   24.23              int loop;
   24.24              u_char x;
   24.25  	    unsigned long cr3;
   24.26 -	    struct task_struct *p;
   24.27 +	    struct domain *p;
   24.28  
   24.29  	    p = find_domain_by_id(op->u.debug.domain);
   24.30  	    if (p->mm.shadow_mode)
   24.31 @@ -89,20 +85,17 @@ void pdb_do_debug (dom0_op_t *op)
   24.32                  printk (" %02x", x);
   24.33              }
   24.34              printk ("\n");
   24.35 -	    put_task_struct(p);
   24.36 +	    put_domain(p);
   24.37              break;
   24.38          }
   24.39          case 's' :
   24.40  	{
   24.41 -	    struct task_struct * p = find_domain_by_id(op->u.debug.domain);
   24.42 +	    struct domain * p = find_domain_by_id(op->u.debug.domain);
   24.43  
   24.44  	    if (p != NULL)
   24.45  	    {
   24.46 -	        if (p->state != TASK_STOPPED)
   24.47 -		{
   24.48 -		  send_guest_virq(p, VIRQ_STOP);
   24.49 -		}
   24.50 -		put_task_struct(p);
   24.51 +                domain_controller_pause(p);
   24.52 +		put_domain(p);
   24.53  	    }
   24.54  	    else
   24.55  	    {
    25.1 --- a/xen/common/dom0_ops.c	Thu Jun 17 16:33:33 2004 +0000
    25.2 +++ b/xen/common/dom0_ops.c	Fri Jun 18 14:46:29 2004 +0000
    25.3 @@ -24,7 +24,7 @@
    25.4  #define TRC_DOM0OP_ENTER_BASE  0x00020000
    25.5  #define TRC_DOM0OP_LEAVE_BASE  0x00030000
    25.6  
    25.7 -extern unsigned int alloc_new_dom_mem(struct task_struct *, unsigned int);
    25.8 +extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
    25.9  
   25.10  static int msr_cpu_mask;
   25.11  static unsigned long msr_addr;
   25.12 @@ -70,56 +70,49 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   25.13  
   25.14      case DOM0_BUILDDOMAIN:
   25.15      {
   25.16 -        struct task_struct * p = find_domain_by_id(op->u.builddomain.domain);
   25.17 +        struct domain * p = find_domain_by_id(op->u.builddomain.domain);
   25.18          ret = -EINVAL;
   25.19          if ( p != NULL )
   25.20          {
   25.21              ret = final_setup_guestos(p, &op->u.builddomain);
   25.22 -            put_task_struct(p);
   25.23 +            put_domain(p);
   25.24          }
   25.25      }
   25.26      break;
   25.27  
   25.28      case DOM0_STARTDOMAIN:
   25.29      {
   25.30 -        struct task_struct * p = find_domain_by_id(op->u.startdomain.domain);
   25.31 -        ret = -EINVAL;
   25.32 -        if ( p != NULL )
   25.33 +        struct domain *d = find_domain_by_id(op->u.startdomain.domain);
   25.34 +        ret = -ESRCH;
   25.35 +        if ( d != NULL )
   25.36          {
   25.37 -            if ( test_bit(PF_CONSTRUCTED, &p->flags) )
   25.38 +            ret = -EINVAL;
   25.39 +            if ( test_bit(DF_CONSTRUCTED, &d->flags) )
   25.40              {
   25.41 -                wake_up(p);
   25.42 -                reschedule(p);
   25.43 +                domain_controller_unpause(d);
   25.44                  ret = 0;
   25.45              }
   25.46 -            put_task_struct(p);
   25.47 +            put_domain(d);
   25.48          }
   25.49      }
   25.50      break;
   25.51  
   25.52      case DOM0_STOPDOMAIN:
   25.53      {
   25.54 -        ret = stop_other_domain(op->u.stopdomain.domain);
   25.55 - 
   25.56 -        /*
   25.57 -         * This is grim, but helps for live migrate. It's also unsafe
   25.58 -         * in the strict sense as we're not explicitly setting a
   25.59 -         * timeout, but dom0 is bound to have other timers going off to
   25.60 -         * wake us back up. 
   25.61 -         * We go to sleep so that the other domain can stop quicker, hence
   25.62 -         * we have less total down time in a migrate.
   25.63 -         */
   25.64 -        if( ret == 0 && op->u.stopdomain.sync == 1 )
   25.65 +        struct domain *d = find_domain_by_id(op->u.stopdomain.domain);
   25.66 +        ret = -ESRCH;
   25.67 +        if ( d != NULL )
   25.68          {
   25.69 -            extern long do_block( void );
   25.70 -            do_block(); /* Yuk... */
   25.71 +            domain_controller_pause(d);
   25.72 +            put_domain(d);
   25.73 +            ret = 0;
   25.74          }
   25.75      }
   25.76      break;
   25.77  
   25.78      case DOM0_CREATEDOMAIN:
   25.79      {
   25.80 -        struct task_struct *p;
   25.81 +        struct domain *p;
   25.82          static domid_t    domnr = 0;
   25.83          static spinlock_t domnr_lock = SPIN_LOCK_UNLOCKED;
   25.84          unsigned int pro;
   25.85 @@ -137,7 +130,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   25.86  
   25.87              if ( (p = find_domain_by_id(dom)) == NULL )
   25.88                  break;
   25.89 -            put_task_struct(p);
   25.90 +            put_domain(p);
   25.91          }
   25.92  
   25.93          if (op->u.createdomain.cpu == -1 )
   25.94 @@ -158,7 +151,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   25.95          ret = alloc_new_dom_mem(p, op->u.createdomain.memory_kb);
   25.96          if ( ret != 0 ) 
   25.97          {
   25.98 -            __kill_domain(p);
   25.99 +            domain_kill(p);
  25.100              break;
  25.101          }
  25.102  
  25.103 @@ -171,9 +164,18 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.104  
  25.105      case DOM0_DESTROYDOMAIN:
  25.106      {
  25.107 -        domid_t dom = op->u.destroydomain.domain;
  25.108 -        int force = op->u.destroydomain.force;
  25.109 -        ret = kill_other_domain(dom, force);
  25.110 +        struct domain *d = find_domain_by_id(op->u.destroydomain.domain);
  25.111 +        ret = -ESRCH;
  25.112 +        if ( d != NULL )
  25.113 +        {
  25.114 +            ret = -EINVAL;
  25.115 +            if ( d != current )
  25.116 +            {
  25.117 +                domain_kill(d);
  25.118 +                put_domain(d);
  25.119 +                ret = 0;
  25.120 +            }
  25.121 +        }
  25.122      }
  25.123      break;
  25.124  
  25.125 @@ -185,9 +187,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.126              ret = -EINVAL;
  25.127          else
  25.128          {
  25.129 -            struct task_struct * p = find_domain_by_id(dom);
  25.130 +            struct domain * p = find_domain_by_id(dom);
  25.131              int cpu = op->u.pincpudomain.cpu;
  25.132 -            int we_paused = 0;
  25.133              
  25.134              ret = -ESRCH;
  25.135              
  25.136 @@ -196,30 +197,17 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.137                  if ( cpu == -1 )
  25.138                  {
  25.139                      p->cpupinned = 0;
  25.140 -                    ret = 0;
  25.141                  }
  25.142                  else
  25.143                  {
  25.144 -                    /* Pause domain if necessary. */
  25.145 -                    if( !(p->state & TASK_STOPPED) && 
  25.146 -                        !(p->state & TASK_PAUSED) )
  25.147 -                    {
  25.148 -                        sched_pause_sync(p);
  25.149 -                        we_paused = 1;
  25.150 -                    }
  25.151 -                    
  25.152 -                    /* We need a task structure lock here!!! 
  25.153 -                       FIX ME!! */
  25.154 +                    domain_pause(p);
  25.155                      cpu = cpu % smp_num_cpus;
  25.156                      p->processor = cpu;
  25.157 -                    p->cpupinned = 1;
  25.158 -                    
  25.159 -                    if ( we_paused )
  25.160 -                        wake_up(p);
  25.161 -                    
  25.162 -                    ret = 0;
  25.163 +                    p->cpupinned = 1;                    
  25.164 +                    domain_unpause(p);
  25.165                  }
  25.166 -                put_task_struct(p);
  25.167 +                put_domain(p);
  25.168 +                ret = 0;
  25.169              }      
  25.170          }
  25.171      }
  25.172 @@ -242,7 +230,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.173      case DOM0_GETMEMLIST:
  25.174      {
  25.175          int i;
  25.176 -        struct task_struct *p = find_domain_by_id(op->u.getmemlist.domain);
  25.177 +        struct domain *p = find_domain_by_id(op->u.getmemlist.domain);
  25.178          unsigned long max_pfns = op->u.getmemlist.max_pfns;
  25.179          unsigned long pfn;
  25.180          unsigned long *buffer = op->u.getmemlist.buffer;
  25.181 @@ -272,7 +260,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.182              op->u.getmemlist.num_pfns = i;
  25.183              copy_to_user(u_dom0_op, op, sizeof(*op));
  25.184              
  25.185 -            put_task_struct(p);
  25.186 +            put_domain(p);
  25.187          }
  25.188      }
  25.189      break;
  25.190 @@ -280,9 +268,9 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.191      case DOM0_GETDOMAININFO:
  25.192      { 
  25.193          full_execution_context_t *c;
  25.194 -        struct task_struct       *p;
  25.195 +        struct domain       *p;
  25.196          unsigned long             flags;
  25.197 -        int                       i;
  25.198 +        int                       i, dump_state = 0;
  25.199  
  25.200          read_lock_irqsave(&tasklist_lock, flags);
  25.201  
  25.202 @@ -292,40 +280,42 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.203                  break;
  25.204          }
  25.205  
  25.206 -        if ( (p == NULL) || (p->state == TASK_DYING) )
  25.207 +        if ( p == NULL )
  25.208          {
  25.209              ret = -ESRCH;
  25.210              goto gdi_out;
  25.211          }
  25.212          else
  25.213          {
  25.214 -            op->u.getdomaininfo.domain      = p->domain;
  25.215 +            op->u.getdomaininfo.domain = p->domain;
  25.216              strcpy(op->u.getdomaininfo.name, p->name);
  25.217  
  25.218 -            if ( p->state == TASK_RUNNING )
  25.219 +            /* These are kind of in order of 'importance'. */
  25.220 +            if ( test_bit(DF_CRASHED, &p->flags) )
  25.221 +                op->u.getdomaininfo.flags = DOMSTATE_CRASHED;
  25.222 +            else if ( test_bit(DF_SUSPENDED, &p->flags) )
  25.223 +                op->u.getdomaininfo.flags = DOMSTATE_SUSPENDED;
  25.224 +            else if ( test_bit(DF_CONTROLPAUSE, &p->flags) )
  25.225 +                op->u.getdomaininfo.flags = DOMSTATE_PAUSED;
  25.226 +            else if ( test_bit(DF_BLOCKED, &p->flags) )
  25.227 +                op->u.getdomaininfo.flags = DOMSTATE_BLOCKED;
  25.228 +            else
  25.229 +            {
  25.230                  op->u.getdomaininfo.flags = 
  25.231                      p->has_cpu ? DOMSTATE_RUNNING : DOMSTATE_RUNNABLE;
  25.232 -            else if ( (p->state == TASK_INTERRUPTIBLE) || 
  25.233 -                      (p->state == TASK_UNINTERRUPTIBLE) )
  25.234 -                op->u.getdomaininfo.flags = DOMSTATE_BLOCKED;
  25.235 -            else if ( p->state == TASK_PAUSED )
  25.236 -                op->u.getdomaininfo.flags = DOMSTATE_PAUSED;
  25.237 -            else if ( p->state == TASK_CRASHED )
  25.238 -                op->u.getdomaininfo.flags = DOMSTATE_CRASHED;
  25.239 -            else
  25.240 -                op->u.getdomaininfo.flags = DOMSTATE_STOPPED;
  25.241 +                dump_state = 1;
  25.242 +            }
  25.243 +
  25.244              op->u.getdomaininfo.flags |= p->processor << DOMFLAGS_CPUSHIFT;
  25.245              op->u.getdomaininfo.flags |= p->stop_code << DOMFLAGS_GUESTSHIFT;
  25.246  
  25.247 -            op->u.getdomaininfo.hyp_events  = p->hyp_events;
  25.248              op->u.getdomaininfo.tot_pages   = p->tot_pages;
  25.249              op->u.getdomaininfo.max_pages   = p->max_pages;
  25.250              op->u.getdomaininfo.cpu_time    = p->cpu_time;
  25.251              op->u.getdomaininfo.shared_info_frame = 
  25.252                  __pa(p->shared_info) >> PAGE_SHIFT;
  25.253  
  25.254 -            if ( (p->state == TASK_STOPPED) &&
  25.255 -                 (op->u.getdomaininfo.ctxt != NULL) )
  25.256 +            if ( dump_state && (op->u.getdomaininfo.ctxt != NULL) )
  25.257              {
  25.258                  if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
  25.259                  {
  25.260 @@ -338,7 +328,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.261                  memcpy(&c->cpu_ctxt, 
  25.262                         &p->shared_info->execution_context,
  25.263                         sizeof(p->shared_info->execution_context));
  25.264 -                if ( test_bit(PF_DONEFPUINIT, &p->flags) )
  25.265 +                if ( test_bit(DF_DONEFPUINIT, &p->flags) )
  25.266                      c->flags |= ECF_I387_VALID;
  25.267                  memcpy(&c->fpu_ctxt,
  25.268                         &p->thread.i387,
  25.269 @@ -402,7 +392,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.270          struct pfn_info *page;
  25.271          unsigned long pfn = op->u.getpageframeinfo.pfn;
  25.272          domid_t dom = op->u.getpageframeinfo.domain;
  25.273 -        struct task_struct *p;
  25.274 +        struct domain *p;
  25.275  
  25.276          ret = -EINVAL;
  25.277  
  25.278 @@ -440,7 +430,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.279              put_page(page);
  25.280          }
  25.281  
  25.282 -        put_task_struct(p);
  25.283 +        put_domain(p);
  25.284  
  25.285          copy_to_user(u_dom0_op, op, sizeof(*op));
  25.286      }
  25.287 @@ -550,13 +540,13 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.288  
  25.289      case DOM0_SHADOW_CONTROL:
  25.290      {
  25.291 -        struct task_struct *p; 
  25.292 +        struct domain *p; 
  25.293          ret = -ESRCH;
  25.294          p = find_domain_by_id( op->u.shadow_control.domain );
  25.295          if ( p )
  25.296          {
  25.297              ret = shadow_mode_control(p, &op->u.shadow_control );
  25.298 -            put_task_struct(p);
  25.299 +            put_domain(p);
  25.300              copy_to_user(u_dom0_op, op, sizeof(*op));
  25.301          } 
  25.302      }
  25.303 @@ -573,12 +563,12 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.304  
  25.305      case DOM0_SETDOMAINNAME:
  25.306      {
  25.307 -        struct task_struct *p; 
  25.308 +        struct domain *p; 
  25.309          p = find_domain_by_id( op->u.setdomainname.domain );
  25.310          if ( p )
  25.311          {
  25.312              strncpy(p->name, op->u.setdomainname.name, MAX_DOMAIN_NAME);
  25.313 -            put_task_struct(p);
  25.314 +            put_domain(p);
  25.315          }
  25.316          else 
  25.317              ret = -ESRCH;
  25.318 @@ -587,31 +577,31 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.319  
  25.320      case DOM0_SETDOMAININITIALMEM:
  25.321      {
  25.322 -        struct task_struct *p; 
  25.323 +        struct domain *p; 
  25.324          ret = -ESRCH;
  25.325          p = find_domain_by_id( op->u.setdomaininitialmem.domain );
  25.326          if ( p )
  25.327          { 
  25.328              /* should only be used *before* domain is built. */
  25.329 -            if ( ! test_bit(PF_CONSTRUCTED, &p->flags) )
  25.330 +            if ( ! test_bit(DF_CONSTRUCTED, &p->flags) )
  25.331                  ret = alloc_new_dom_mem( 
  25.332                      p, op->u.setdomaininitialmem.initial_memkb );
  25.333              else
  25.334                  ret = -EINVAL;
  25.335 -            put_task_struct(p);
  25.336 +            put_domain(p);
  25.337          }
  25.338      }
  25.339      break;
  25.340  
  25.341      case DOM0_SETDOMAINMAXMEM:
  25.342      {
  25.343 -        struct task_struct *p; 
  25.344 +        struct domain *p; 
  25.345          p = find_domain_by_id( op->u.setdomainmaxmem.domain );
  25.346          if ( p )
  25.347          {
  25.348              p->max_pages = 
  25.349                  (op->u.setdomainmaxmem.max_memkb+PAGE_SIZE-1)>> PAGE_SHIFT;
  25.350 -            put_task_struct(p);
  25.351 +            put_domain(p);
  25.352          }
  25.353          else 
  25.354              ret = -ESRCH;
  25.355 @@ -625,7 +615,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.356          int num = op->u.getpageframeinfo2.num;
  25.357          domid_t dom = op->u.getpageframeinfo2.domain;
  25.358          unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
  25.359 -        struct task_struct *p;
  25.360 +        struct domain *p;
  25.361          unsigned long l_arr[GPF2_BATCH];
  25.362          ret = -ESRCH;
  25.363  
  25.364 @@ -697,7 +687,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
  25.365              n+=j;     
  25.366          }
  25.367  
  25.368 -        put_task_struct(p);
  25.369 +        put_domain(p);
  25.370  
  25.371      }
  25.372      break;
    26.1 --- a/xen/common/dom_mem_ops.c	Thu Jun 17 16:33:33 2004 +0000
    26.2 +++ b/xen/common/dom_mem_ops.c	Fri Jun 18 14:46:29 2004 +0000
    26.3 @@ -15,7 +15,7 @@
    26.4  #include <xen/event.h>
    26.5  #include <asm/domain_page.h>
    26.6  
    26.7 -static long alloc_dom_mem(struct task_struct *p, 
    26.8 +static long alloc_dom_mem(struct domain *p, 
    26.9                            unsigned long      *pages, 
   26.10                            unsigned long       nr_pages)
   26.11  {
   26.12 @@ -49,7 +49,7 @@ static long alloc_dom_mem(struct task_st
   26.13      return i;
   26.14  }
   26.15      
   26.16 -static long free_dom_mem(struct task_struct *p, 
   26.17 +static long free_dom_mem(struct domain *p, 
   26.18                           unsigned long      *pages, 
   26.19                           unsigned long       nr_pages)
   26.20  {
    27.1 --- a/xen/common/domain.c	Thu Jun 17 16:33:33 2004 +0000
    27.2 +++ b/xen/common/domain.c	Fri Jun 18 14:46:29 2004 +0000
    27.3 @@ -42,19 +42,20 @@
    27.4  
    27.5  /* Both these structures are protected by the tasklist_lock. */
    27.6  rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
    27.7 -struct task_struct *task_hash[TASK_HASH_SIZE];
    27.8 -struct task_struct *task_list;
    27.9 +struct domain *task_hash[TASK_HASH_SIZE];
   27.10 +struct domain *task_list;
   27.11  
   27.12 -struct task_struct *do_createdomain(domid_t dom_id, unsigned int cpu)
   27.13 +struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
   27.14  {
   27.15      char buf[100];
   27.16 -    struct task_struct *p, **pp;
   27.17 +    struct domain *p, **pp;
   27.18      unsigned long flags;
   27.19  
   27.20 -    if ( (p = alloc_task_struct()) == NULL )
   27.21 +    if ( (p = alloc_domain_struct()) == NULL )
   27.22          return NULL;
   27.23  
   27.24      atomic_set(&p->refcnt, 1);
   27.25 +    atomic_set(&p->pausecnt, 0);
   27.26  
   27.27      spin_lock_init(&p->mm.shadow_lock);
   27.28  
   27.29 @@ -68,7 +69,7 @@ struct task_struct *do_createdomain(domi
   27.30      {
   27.31          if ( init_event_channels(p) != 0 )
   27.32          {
   27.33 -            free_task_struct(p);
   27.34 +            free_domain_struct(p);
   27.35              return NULL;
   27.36          }
   27.37          
   27.38 @@ -98,6 +99,8 @@ struct task_struct *do_createdomain(domi
   27.39          spin_lock_init(&p->pcidev_lock);
   27.40          INIT_LIST_HEAD(&p->pcidev_list);
   27.41  
   27.42 +        sched_add_domain(p);
   27.43 +
   27.44          write_lock_irqsave(&tasklist_lock, flags);
   27.45          pp = &task_list; /* NB. task_list is maintained in order of dom_id. */
   27.46          for ( pp = &task_list; *pp != NULL; pp = &(*pp)->next_list )
   27.47 @@ -112,17 +115,17 @@ struct task_struct *do_createdomain(domi
   27.48      else
   27.49      {
   27.50          sprintf(p->name, "Idle-%d", cpu);
   27.51 +        sched_add_domain(p);
   27.52      }
   27.53  
   27.54 -    sched_add_domain(p);
   27.55  
   27.56      return p;
   27.57  }
   27.58  
   27.59  
   27.60 -struct task_struct *find_domain_by_id(domid_t dom)
   27.61 +struct domain *find_domain_by_id(domid_t dom)
   27.62  {
   27.63 -    struct task_struct *p;
   27.64 +    struct domain *p;
   27.65      unsigned long flags;
   27.66  
   27.67      read_lock_irqsave(&tasklist_lock, flags);
   27.68 @@ -131,7 +134,8 @@ struct task_struct *find_domain_by_id(do
   27.69      {
   27.70          if ( p->domain == dom )
   27.71          {
   27.72 -            get_task_struct(p);
   27.73 +            if ( unlikely(!get_domain(p)) )
   27.74 +                p = NULL;
   27.75              break;
   27.76          }
   27.77          p = p->next_hash;
   27.78 @@ -143,9 +147,9 @@ struct task_struct *find_domain_by_id(do
   27.79  
   27.80  
   27.81  /* return the most recent domain created */
   27.82 -struct task_struct *find_last_domain(void)
   27.83 +struct domain *find_last_domain(void)
   27.84  {
   27.85 -    struct task_struct *p, *plast;
   27.86 +    struct domain *p, *plast;
   27.87      unsigned long flags;
   27.88  
   27.89      read_lock_irqsave(&tasklist_lock, flags);
   27.90 @@ -157,103 +161,42 @@ struct task_struct *find_last_domain(voi
   27.91              plast = p;
   27.92          p = p->next_list;
   27.93      }
   27.94 -    get_task_struct(plast);
   27.95 +    if ( !get_domain(plast) )
   27.96 +        plast = NULL;
   27.97      read_unlock_irqrestore(&tasklist_lock, flags);
   27.98  
   27.99      return plast;
  27.100  }
  27.101  
  27.102  
  27.103 -void __kill_domain(struct task_struct *p)
  27.104 +void domain_kill(struct domain *d)
  27.105  {
  27.106 -    struct task_struct **pp;
  27.107 -    unsigned long flags;
  27.108 -
  27.109 -    if ( p->domain == 0 )
  27.110 +    domain_pause(d);
  27.111 +    if ( !test_and_set_bit(DF_DYING, &d->flags) )
  27.112      {
  27.113 -        extern void machine_restart(char *);
  27.114 -        printk("Domain 0 killed: rebooting machine!\n");
  27.115 -        machine_restart(0);
  27.116 -    }
  27.117 -
  27.118 -    /* Only allow the domain to be destroyed once. */
  27.119 -    if ( !sched_rem_domain(p) )
  27.120 -        return;
  27.121 -
  27.122 -    DPRINTK("Killing domain %u\n", p->domain);
  27.123 -
  27.124 -    destroy_event_channels(p);
  27.125 -
  27.126 -    /*
  27.127 -     * Note this means that find_domain_by_id may fail, even when the caller
  27.128 -     * holds a reference to the domain being queried. Take care!
  27.129 -     */
  27.130 -    write_lock_irqsave(&tasklist_lock, flags);
  27.131 -    pp = &task_list;                       /* Delete from task_list. */
  27.132 -    while ( *pp != p ) 
  27.133 -        pp = &(*pp)->next_list;
  27.134 -    *pp = p->next_list;
  27.135 -    pp = &task_hash[TASK_HASH(p->domain)]; /* Delete from task_hash. */
  27.136 -    while ( *pp != p ) 
  27.137 -        pp = &(*pp)->next_hash;
  27.138 -    *pp = p->next_hash;
  27.139 -    write_unlock_irqrestore(&tasklist_lock, flags);
  27.140 -
  27.141 -    if ( p == current )
  27.142 -    {
  27.143 -        __enter_scheduler();
  27.144 -        BUG(); /* never get here */
  27.145 -    }
  27.146 -    else
  27.147 -    {
  27.148 -        put_task_struct(p);
  27.149 +        sched_rem_domain(d);
  27.150 +        put_domain(d);
  27.151      }
  27.152  }
  27.153  
  27.154  
  27.155 -void kill_domain(void)
  27.156 +void domain_crash(void)
  27.157  {
  27.158 -    __kill_domain(current);
  27.159 -}
  27.160 -
  27.161 -
  27.162 -long kill_other_domain(domid_t dom, int force)
  27.163 -{
  27.164 -    struct task_struct *p;
  27.165 -
  27.166 -    if ( (p = find_domain_by_id(dom)) == NULL )
  27.167 -        return -ESRCH;
  27.168 +    struct domain *d;
  27.169  
  27.170 -    if ( (p->state == TASK_STOPPED) || (p->state == TASK_CRASHED) )
  27.171 -        __kill_domain(p);
  27.172 -    else if ( force )
  27.173 -        send_hyp_event(p, _HYP_EVENT_DIE);
  27.174 -    else
  27.175 -        send_guest_virq(p, VIRQ_DIE);
  27.176 -
  27.177 -    put_task_struct(p);
  27.178 -    return 0;
  27.179 -}
  27.180 -
  27.181 -
  27.182 -void crash_domain(void)
  27.183 -{
  27.184 -    struct task_struct *p;
  27.185 -
  27.186 -    set_current_state(TASK_CRASHED);
  27.187 +    set_bit(DF_CRASHED, &current->flags);
  27.188      
  27.189 -    p = find_domain_by_id(0);
  27.190 -    send_guest_virq(p, VIRQ_DOM_EXC);
  27.191 -    put_task_struct(p);
  27.192 +    d = find_domain_by_id(0);
  27.193 +    send_guest_virq(d, VIRQ_DOM_EXC);
  27.194 +    put_domain(d);
  27.195      
  27.196      __enter_scheduler();
  27.197      BUG();
  27.198  }
  27.199  
  27.200 -
  27.201 -void stop_domain(u8 reason)
  27.202 +void domain_suspend(u8 reason)
  27.203  {
  27.204 -    struct task_struct *p;
  27.205 +    struct domain *d;
  27.206  
  27.207      if ( current->domain == 0 )
  27.208      {
  27.209 @@ -267,34 +210,17 @@ void stop_domain(u8 reason)
  27.210             get_execution_context(), 
  27.211             sizeof(execution_context_t));
  27.212      unlazy_fpu(current);
  27.213 -    wmb(); /* All CPUs must see saved info in state TASK_STOPPED. */
  27.214 -    set_current_state(TASK_STOPPED);
  27.215 +    wmb(); /* All CPUs must see saved info when suspended. */
  27.216 +    set_bit(DF_SUSPENDED, &current->flags);
  27.217  
  27.218 -    p = find_domain_by_id(0);
  27.219 -    send_guest_virq(p, VIRQ_DOM_EXC);
  27.220 -    put_task_struct(p);
  27.221 +    d = find_domain_by_id(0);
  27.222 +    send_guest_virq(d, VIRQ_DOM_EXC);
  27.223 +    put_domain(d);
  27.224  
  27.225      __enter_scheduler();
  27.226  }
  27.227  
  27.228 -long stop_other_domain(domid_t dom)
  27.229 -{
  27.230 -    struct task_struct *p;
  27.231 -    
  27.232 -    if ( dom == 0 )
  27.233 -        return -EINVAL;
  27.234 -
  27.235 -    p = find_domain_by_id(dom);
  27.236 -    if ( p == NULL) return -ESRCH;
  27.237 -    
  27.238 -    if ( p->state != TASK_STOPPED )
  27.239 -        send_guest_virq(p, VIRQ_STOP);
  27.240 -    
  27.241 -    put_task_struct(p);
  27.242 -    return 0;
  27.243 -}
  27.244 -
  27.245 -struct pfn_info *alloc_domain_page(struct task_struct *p)
  27.246 +struct pfn_info *alloc_domain_page(struct domain *p)
  27.247  {
  27.248      struct pfn_info *page = NULL;
  27.249      unsigned long flags, mask, pfn_stamp, cpu_stamp;
  27.250 @@ -366,7 +292,7 @@ struct pfn_info *alloc_domain_page(struc
  27.251  void free_domain_page(struct pfn_info *page)
  27.252  {
  27.253      unsigned long flags;
  27.254 -    struct task_struct *p = page->u.domain;
  27.255 +    struct domain *p = page->u.domain;
  27.256  
  27.257      ASSERT(!in_irq());
  27.258  
  27.259 @@ -411,7 +337,7 @@ void free_domain_page(struct pfn_info *p
  27.260  }
  27.261  
  27.262  
  27.263 -void free_all_dom_mem(struct task_struct *p)
  27.264 +void free_all_dom_mem(struct domain *p)
  27.265  {
  27.266      struct list_head *ent, zombies;
  27.267      struct pfn_info *page;
  27.268 @@ -501,7 +427,7 @@ void free_all_dom_mem(struct task_struct
  27.269  }
  27.270  
  27.271  
  27.272 -unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
  27.273 +unsigned int alloc_new_dom_mem(struct domain *p, unsigned int kbytes)
  27.274  {
  27.275      unsigned int alloc_pfns, nr_pages;
  27.276      struct pfn_info *page;
  27.277 @@ -541,19 +467,40 @@ unsigned int alloc_new_dom_mem(struct ta
  27.278   
  27.279  
  27.280  /* Release resources belonging to task @p. */
  27.281 -void release_task(struct task_struct *p)
  27.282 +void domain_destruct(struct domain *p)
  27.283  {
  27.284 -    ASSERT(p->state == TASK_DYING);
  27.285 -    ASSERT(!p->has_cpu);
  27.286 +    struct domain **pp;
  27.287 +    unsigned long flags;
  27.288 +
  27.289 +    if ( !test_bit(DF_DYING, &p->flags) )
  27.290 +        BUG();
  27.291 +
  27.292 +    /* May be already destructed, or get_domain() can race us. */
  27.293 +    if ( cmpxchg(&p->refcnt.counter, 0, DOMAIN_DESTRUCTED) != 0 )
  27.294 +        return;
  27.295  
  27.296      DPRINTK("Releasing task %u\n", p->domain);
  27.297  
  27.298 +    /* Delete from task list and task hashtable. */
  27.299 +    write_lock_irqsave(&tasklist_lock, flags);
  27.300 +    pp = &task_list;
  27.301 +    while ( *pp != p ) 
  27.302 +        pp = &(*pp)->next_list;
  27.303 +    *pp = p->next_list;
  27.304 +    pp = &task_hash[TASK_HASH(p->domain)];
  27.305 +    while ( *pp != p ) 
  27.306 +        pp = &(*pp)->next_hash;
  27.307 +    *pp = p->next_hash;
  27.308 +    write_unlock_irqrestore(&tasklist_lock, flags);
  27.309 +
  27.310 +    destroy_event_channels(p);
  27.311 +
  27.312      /* Free all memory associated with this domain. */
  27.313      free_page((unsigned long)p->mm.perdomain_pt);
  27.314      UNSHARE_PFN(virt_to_page(p->shared_info));
  27.315      free_all_dom_mem(p);
  27.316  
  27.317 -    free_task_struct(p);
  27.318 +    free_domain_struct(p);
  27.319  }
  27.320  
  27.321  
  27.322 @@ -562,7 +509,7 @@ void release_task(struct task_struct *p)
  27.323   * than domain 0. ie. the domains that are being built by the userspace dom0
  27.324   * domain builder.
  27.325   */
  27.326 -int final_setup_guestos(struct task_struct *p, dom0_builddomain_t *builddomain)
  27.327 +int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
  27.328  {
  27.329      unsigned long phys_basetab;
  27.330      int i, rc = 0;
  27.331 @@ -571,7 +518,7 @@ int final_setup_guestos(struct task_stru
  27.332      if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
  27.333          return -ENOMEM;
  27.334  
  27.335 -    if ( test_bit(PF_CONSTRUCTED, &p->flags) )
  27.336 +    if ( test_bit(DF_CONSTRUCTED, &p->flags) )
  27.337      {
  27.338          rc = -EINVAL;
  27.339          goto out;
  27.340 @@ -583,9 +530,9 @@ int final_setup_guestos(struct task_stru
  27.341          goto out;
  27.342      }
  27.343      
  27.344 -    clear_bit(PF_DONEFPUINIT, &p->flags);
  27.345 +    clear_bit(DF_DONEFPUINIT, &p->flags);
  27.346      if ( c->flags & ECF_I387_VALID )
  27.347 -        set_bit(PF_DONEFPUINIT, &p->flags);
  27.348 +        set_bit(DF_DONEFPUINIT, &p->flags);
  27.349      memcpy(&p->shared_info->execution_context,
  27.350             &c->cpu_ctxt,
  27.351             sizeof(p->shared_info->execution_context));
  27.352 @@ -624,7 +571,7 @@ int final_setup_guestos(struct task_stru
  27.353      /* Set up the shared info structure. */
  27.354      update_dom_time(p->shared_info);
  27.355  
  27.356 -    set_bit(PF_CONSTRUCTED, &p->flags);
  27.357 +    set_bit(DF_CONSTRUCTED, &p->flags);
  27.358  
  27.359   out:    
  27.360      if (c) kfree(c);
  27.361 @@ -749,7 +696,7 @@ static int loadelfimage(char *elfbase)
  27.362      return 0;
  27.363  }
  27.364  
  27.365 -int construct_dom0(struct task_struct *p, 
  27.366 +int construct_dom0(struct domain *p, 
  27.367                     unsigned long alloc_start,
  27.368                     unsigned long alloc_end,
  27.369                     char *image_start, unsigned long image_len, 
  27.370 @@ -791,12 +738,12 @@ int construct_dom0(struct task_struct *p
  27.371      /* Machine address of next candidate page-table page. */
  27.372      unsigned long mpt_alloc;
  27.373  
  27.374 -    extern void physdev_init_dom0(struct task_struct *);
  27.375 +    extern void physdev_init_dom0(struct domain *);
  27.376  
  27.377      /* Sanity! */
  27.378      if ( p->domain != 0 ) 
  27.379          BUG();
  27.380 -    if ( test_bit(PF_CONSTRUCTED, &p->flags) ) 
  27.381 +    if ( test_bit(DF_CONSTRUCTED, &p->flags) ) 
  27.382          BUG();
  27.383  
  27.384      printk("*** LOADING DOMAIN 0 ***\n");
  27.385 @@ -1047,7 +994,7 @@ int construct_dom0(struct task_struct *p
  27.386      /* DOM0 gets access to everything. */
  27.387      physdev_init_dom0(p);
  27.388  
  27.389 -    set_bit(PF_CONSTRUCTED, &p->flags);
  27.390 +    set_bit(DF_CONSTRUCTED, &p->flags);
  27.391  
  27.392  #if 0 /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */
  27.393      shadow_mode_enable(&p->mm, SHM_test); 
    28.1 --- a/xen/common/event.c	Thu Jun 17 16:33:33 2004 +0000
    28.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.3 @@ -1,29 +0,0 @@
    28.4 -/******************************************************************************
    28.5 - * event.c
    28.6 - * 
    28.7 - * A nice interface for passing per-domain asynchronous events. 
    28.8 - * These events are handled in the hypervisor, prior to return
    28.9 - * to the guest OS.
   28.10 - * 
   28.11 - * Copyright (c) 2002, K A Fraser
   28.12 - */
   28.13 -
   28.14 -#include <xen/config.h>
   28.15 -#include <xen/event.h>
   28.16 -
   28.17 -typedef void (*hyp_event_callback_fn_t)(void);
   28.18 -
   28.19 -/* Ordering must match definitions of _HYP_EVENT_* in xen/sched.h */
   28.20 -static hyp_event_callback_fn_t event_call_fn[] = 
   28.21 -{
   28.22 -    __enter_scheduler,
   28.23 -    kill_domain,
   28.24 -};
   28.25 -
   28.26 -/* Handle outstanding events for the currently-executing domain. */
   28.27 -void do_hyp_events(void)
   28.28 -{
   28.29 -    int nr;
   28.30 -    while ( (nr = ffs(current->hyp_events)) != 0 )
   28.31 -        (event_call_fn[nr-1])();
   28.32 -}
    29.1 --- a/xen/common/event_channel.c	Thu Jun 17 16:33:33 2004 +0000
    29.2 +++ b/xen/common/event_channel.c	Fri Jun 18 14:46:29 2004 +0000
    29.3 @@ -29,7 +29,7 @@
    29.4  #define INIT_EVENT_CHANNELS   16
    29.5  #define MAX_EVENT_CHANNELS  1024
    29.6  
    29.7 -static int get_free_port(struct task_struct *p)
    29.8 +static int get_free_port(struct domain *p)
    29.9  {
   29.10      int max, port;
   29.11      event_channel_t *chn;
   29.12 @@ -69,7 +69,7 @@ static int get_free_port(struct task_str
   29.13  
   29.14  static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
   29.15  {
   29.16 -    struct task_struct *p1, *p2;
   29.17 +    struct domain *p1, *p2;
   29.18      int                 port1 = 0, port2 = 0;
   29.19      domid_t             dom1 = bind->dom1, dom2 = bind->dom2;
   29.20      long                rc = 0;
   29.21 @@ -86,7 +86,7 @@ static long evtchn_bind_interdomain(evtc
   29.22           ((p2 = find_domain_by_id(dom2)) == NULL) )
   29.23      {
   29.24          if ( p1 != NULL )
   29.25 -            put_task_struct(p1);
   29.26 +            put_domain(p1);
   29.27          return -ESRCH;
   29.28      }
   29.29  
   29.30 @@ -134,8 +134,8 @@ static long evtchn_bind_interdomain(evtc
   29.31      if ( p1 != p2 )
   29.32          spin_unlock(&p2->event_channel_lock);
   29.33      
   29.34 -    put_task_struct(p1);
   29.35 -    put_task_struct(p2);
   29.36 +    put_domain(p1);
   29.37 +    put_domain(p2);
   29.38  
   29.39      bind->port1 = port1;
   29.40      bind->port2 = port2;
   29.41 @@ -146,7 +146,7 @@ static long evtchn_bind_interdomain(evtc
   29.42  
   29.43  static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
   29.44  {
   29.45 -    struct task_struct *p = current;
   29.46 +    struct domain *p = current;
   29.47      int virq = bind->virq;
   29.48      int port;
   29.49  
   29.50 @@ -183,7 +183,7 @@ static long evtchn_bind_virq(evtchn_bind
   29.51  
   29.52  static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
   29.53  {
   29.54 -    struct task_struct *p = current;
   29.55 +    struct domain *p = current;
   29.56      int pirq = bind->pirq;
   29.57      int port, rc;
   29.58  
   29.59 @@ -220,9 +220,9 @@ static long evtchn_bind_pirq(evtchn_bind
   29.60  }
   29.61  
   29.62  
   29.63 -static long __evtchn_close(struct task_struct *p1, int port1)
   29.64 +static long __evtchn_close(struct domain *p1, int port1)
   29.65  {
   29.66 -    struct task_struct *p2 = NULL;
   29.67 +    struct domain *p2 = NULL;
   29.68      event_channel_t    *chn1, *chn2;
   29.69      int                 port2;
   29.70      long                rc = 0;
   29.71 @@ -261,7 +261,17 @@ static long __evtchn_close(struct task_s
   29.72          if ( p2 == NULL )
   29.73          {
   29.74              p2 = chn1[port1].u.remote.dom;
   29.75 -            get_task_struct(p2);
   29.76 +
   29.77 +            /* If we unlock p1 then we could lose p2. Must get a reference. */
   29.78 +            if ( unlikely(!get_domain(p2)) )
   29.79 +            {
   29.80 +                /*
   29.81 +                 * Failed to obtain a reference. No matter: p2 must be dying
   29.82 +                 * and so will close this event channel for us.
   29.83 +                 */
   29.84 +                p2 = NULL;
   29.85 +                goto out;
   29.86 +            }
   29.87  
   29.88              if ( p1->domain < p2->domain )
   29.89              {
   29.90 @@ -279,7 +289,7 @@ static long __evtchn_close(struct task_s
   29.91              rc = -EINVAL;
   29.92              goto out;
   29.93          }
   29.94 -        
   29.95 +    
   29.96          chn2  = p2->event_channel;
   29.97          port2 = chn1[port1].u.remote.port;
   29.98  
   29.99 @@ -307,7 +317,7 @@ static long __evtchn_close(struct task_s
  29.100      {
  29.101          if ( p1 != p2 )
  29.102              spin_unlock(&p2->event_channel_lock);
  29.103 -        put_task_struct(p2);
  29.104 +        put_domain(p2);
  29.105      }
  29.106      
  29.107      spin_unlock(&p1->event_channel_lock);
  29.108 @@ -318,7 +328,7 @@ static long __evtchn_close(struct task_s
  29.109  
  29.110  static long evtchn_close(evtchn_close_t *close)
  29.111  {
  29.112 -    struct task_struct *p;
  29.113 +    struct domain *p;
  29.114      long                rc;
  29.115      domid_t             dom = close->dom;
  29.116  
  29.117 @@ -332,14 +342,14 @@ static long evtchn_close(evtchn_close_t 
  29.118  
  29.119      rc = __evtchn_close(p, close->port);
  29.120  
  29.121 -    put_task_struct(p);
  29.122 +    put_domain(p);
  29.123      return rc;
  29.124  }
  29.125  
  29.126  
  29.127  static long evtchn_send(int lport)
  29.128  {
  29.129 -    struct task_struct *lp = current, *rp;
  29.130 +    struct domain *lp = current, *rp;
  29.131      int                 rport;
  29.132  
  29.133      spin_lock(&lp->event_channel_lock);
  29.134 @@ -355,21 +365,17 @@ static long evtchn_send(int lport)
  29.135      rp    = lp->event_channel[lport].u.remote.dom;
  29.136      rport = lp->event_channel[lport].u.remote.port;
  29.137  
  29.138 -    get_task_struct(rp);
  29.139 +    evtchn_set_pending(rp, rport);
  29.140  
  29.141      spin_unlock(&lp->event_channel_lock);
  29.142  
  29.143 -    evtchn_set_pending(rp, rport);
  29.144 -
  29.145 -    put_task_struct(rp);
  29.146 -
  29.147      return 0;
  29.148  }
  29.149  
  29.150  
  29.151  static long evtchn_status(evtchn_status_t *status)
  29.152  {
  29.153 -    struct task_struct *p;
  29.154 +    struct domain *p;
  29.155      domid_t             dom = status->dom;
  29.156      int                 port = status->port;
  29.157      event_channel_t    *chn;
  29.158 @@ -420,7 +426,7 @@ static long evtchn_status(evtchn_status_
  29.159  
  29.160   out:
  29.161      spin_unlock(&p->event_channel_lock);
  29.162 -    put_task_struct(p);
  29.163 +    put_domain(p);
  29.164      return rc;
  29.165  }
  29.166  
  29.167 @@ -476,7 +482,7 @@ long do_event_channel_op(evtchn_op_t *uo
  29.168  }
  29.169  
  29.170  
  29.171 -int init_event_channels(struct task_struct *p)
  29.172 +int init_event_channels(struct domain *p)
  29.173  {
  29.174      spin_lock_init(&p->event_channel_lock);
  29.175      p->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t), 
  29.176 @@ -491,7 +497,7 @@ int init_event_channels(struct task_stru
  29.177  }
  29.178  
  29.179  
  29.180 -void destroy_event_channels(struct task_struct *p)
  29.181 +void destroy_event_channels(struct domain *p)
  29.182  {
  29.183      int i;
  29.184      if ( p->event_channel != NULL )
    30.1 --- a/xen/common/kernel.c	Thu Jun 17 16:33:33 2004 +0000
    30.2 +++ b/xen/common/kernel.c	Fri Jun 18 14:46:29 2004 +0000
    30.3 @@ -28,7 +28,7 @@
    30.4  #include <asm/domain_page.h>
    30.5  #include <hypervisor-ifs/dom0_ops.h>
    30.6  
    30.7 -kmem_cache_t *task_struct_cachep;
    30.8 +kmem_cache_t *domain_struct_cachep;
    30.9  
   30.10  struct e820entry {
   30.11      unsigned long addr_lo, addr_hi;        /* start of memory segment */
   30.12 @@ -103,7 +103,7 @@ static struct {
   30.13  
   30.14  void cmain(unsigned long magic, multiboot_info_t *mbi)
   30.15  {
   30.16 -    struct task_struct *new_dom;
   30.17 +    struct domain *new_dom;
   30.18      unsigned long max_page;
   30.19      unsigned char *cmdline;
   30.20      module_t *mod = (module_t *)__va(mbi->mods_addr);
   30.21 @@ -245,10 +245,10 @@ void cmain(unsigned long magic, multiboo
   30.22      kmem_cache_init();
   30.23      kmem_cache_sizes_init(max_page);
   30.24  
   30.25 -    task_struct_cachep = kmem_cache_create(
   30.26 -        "task_struct_cache", sizeof(struct task_struct),
   30.27 +    domain_struct_cachep = kmem_cache_create(
   30.28 +        "domain_cache", sizeof(struct domain),
   30.29          0, SLAB_HWCACHE_ALIGN, NULL, NULL);
   30.30 -    if ( task_struct_cachep == NULL )
   30.31 +    if ( domain_struct_cachep == NULL )
   30.32          panic("No slab cache for task structs.");
   30.33  
   30.34      start_of_day();
   30.35 @@ -261,7 +261,7 @@ void cmain(unsigned long magic, multiboo
   30.36      if ( new_dom == NULL )
   30.37          panic("Error creating domain 0\n");
   30.38  
   30.39 -    set_bit(PF_PRIVILEGED, &new_dom->flags);
   30.40 +    set_bit(DF_PRIVILEGED, &new_dom->flags);
   30.41  
   30.42      shadow_mode_init();
   30.43  
   30.44 @@ -287,8 +287,8 @@ void cmain(unsigned long magic, multiboo
   30.45  
   30.46      init_trace_bufs();
   30.47  
   30.48 -    wake_up(new_dom);
   30.49 -
   30.50 +    domain_controller_unpause(current);
   30.51 +    domain_controller_unpause(new_dom);
   30.52      startup_cpu_idle_loop();
   30.53  }
   30.54  
    31.1 --- a/xen/common/keyhandler.c	Thu Jun 17 16:33:33 2004 +0000
    31.2 +++ b/xen/common/keyhandler.c	Fri Jun 18 14:46:29 2004 +0000
    31.3 @@ -69,16 +69,10 @@ static void halt_machine(u_char key, voi
    31.4      machine_restart(NULL); 
    31.5  }
    31.6  
    31.7 -static void kill_dom0(u_char key, void *dev_id, struct pt_regs *regs) 
    31.8 -{
    31.9 -    printk("'%c' pressed -> gracefully rebooting machine\n", key); 
   31.10 -    kill_other_domain(0, 0);
   31.11 -}
   31.12 -
   31.13  void do_task_queues(u_char key, void *dev_id, struct pt_regs *regs) 
   31.14  {
   31.15      unsigned long       flags;
   31.16 -    struct task_struct *p; 
   31.17 +    struct domain *p; 
   31.18      shared_info_t      *s; 
   31.19      s_time_t            now = NOW();
   31.20  
   31.21 @@ -89,10 +83,8 @@ void do_task_queues(u_char key, void *de
   31.22  
   31.23      for_each_domain ( p )
   31.24      {
   31.25 -        printk("Xen: DOM %u, CPU %d [has=%c], state = ",
   31.26 +        printk("Xen: DOM %u, CPU %d [has=%c]\n",
   31.27                 p->domain, p->processor, p->has_cpu ? 'T':'F'); 
   31.28 -        sched_prn_state(p ->state);
   31.29 -        printk(", hyp_events = %08x\n", p->hyp_events);
   31.30          s = p->shared_info; 
   31.31          printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n", 
   31.32                 s->vcpu_data[0].evtchn_upcall_pending, 
   31.33 @@ -132,8 +124,7 @@ void initialize_keytable(void)
   31.34      add_key_handler('L', reset_sched_histo, "reset sched latency histogram");
   31.35      add_key_handler('q', do_task_queues, "dump task queues + guest state");
   31.36      add_key_handler('r', dump_runq,      "dump run queues");
   31.37 -    add_key_handler('B', kill_dom0,      "reboot machine gracefully"); 
   31.38 -    add_key_handler('R', halt_machine,   "reboot machine ungracefully"); 
   31.39 +    add_key_handler('R', halt_machine,   "reboot machine"); 
   31.40  #ifdef PERF_COUNTERS
   31.41      add_key_handler('p', perfc_printall, "print performance counters"); 
   31.42      add_key_handler('P', perfc_reset,    "reset performance counters"); 
    32.1 --- a/xen/common/memory.c	Thu Jun 17 16:33:33 2004 +0000
    32.2 +++ b/xen/common/memory.c	Fri Jun 18 14:46:29 2004 +0000
    32.3 @@ -151,10 +151,10 @@
    32.4  
    32.5  static int alloc_l2_table(struct pfn_info *page);
    32.6  static int alloc_l1_table(struct pfn_info *page);
    32.7 -static int get_page_from_pagenr(unsigned long page_nr, struct task_struct *p);
    32.8 +static int get_page_from_pagenr(unsigned long page_nr, struct domain *p);
    32.9  static int get_page_and_type_from_pagenr(unsigned long page_nr, 
   32.10                                           u32 type,
   32.11 -                                         struct task_struct *p);
   32.12 +                                         struct domain *p);
   32.13  
   32.14  static void free_l2_table(struct pfn_info *page);
   32.15  static void free_l1_table(struct pfn_info *page);
   32.16 @@ -178,7 +178,7 @@ static struct {
   32.17      unsigned long       deferred_ops;
   32.18      unsigned long       cr0;
   32.19      /* General-Purpose Subject, Page-Table Subject */
   32.20 -    struct task_struct *gps, *pts;
   32.21 +    struct domain *gps, *pts;
   32.22  } percpu_info[NR_CPUS] __cacheline_aligned;
   32.23  
   32.24  /* Determine the current General-Purpose Subject or Page-Table Subject. */
   32.25 @@ -241,7 +241,7 @@ void add_to_domain_alloc_list(unsigned l
   32.26      spin_unlock_irqrestore(&free_list_lock, flags);
   32.27  }
   32.28  
   32.29 -static void __invalidate_shadow_ldt(struct task_struct *p)
   32.30 +static void __invalidate_shadow_ldt(struct domain *p)
   32.31  {
   32.32      int i;
   32.33      unsigned long pfn;
   32.34 @@ -267,7 +267,7 @@ static void __invalidate_shadow_ldt(stru
   32.35  
   32.36  static inline void invalidate_shadow_ldt(void)
   32.37  {
   32.38 -    struct task_struct *p = current;
   32.39 +    struct domain *p = current;
   32.40      if ( p->mm.shadow_ldt_mapcnt != 0 )
   32.41          __invalidate_shadow_ldt(p);
   32.42  }
   32.43 @@ -294,10 +294,10 @@ int alloc_segdesc_page(struct pfn_info *
   32.44  /* Map shadow page at offset @off. */
   32.45  int map_ldt_shadow_page(unsigned int off)
   32.46  {
   32.47 -    struct task_struct *p = current;
   32.48 +    struct domain *p = current;
   32.49      unsigned long l1e;
   32.50  
   32.51 -    if ( unlikely(in_interrupt()) )
   32.52 +    if ( unlikely(in_irq()) )
   32.53          BUG();
   32.54  
   32.55      __get_user(l1e, (unsigned long *)&linear_pg_table[(p->mm.ldt_base >> 
   32.56 @@ -315,7 +315,7 @@ int map_ldt_shadow_page(unsigned int off
   32.57  }
   32.58  
   32.59  
   32.60 -static int get_page_from_pagenr(unsigned long page_nr, struct task_struct *p)
   32.61 +static int get_page_from_pagenr(unsigned long page_nr, struct domain *p)
   32.62  {
   32.63      struct pfn_info *page = &frame_table[page_nr];
   32.64  
   32.65 @@ -337,7 +337,7 @@ static int get_page_from_pagenr(unsigned
   32.66  
   32.67  static int get_page_and_type_from_pagenr(unsigned long page_nr, 
   32.68                                           u32 type,
   32.69 -                                         struct task_struct *p)
   32.70 +                                         struct domain *p)
   32.71  {
   32.72      struct pfn_info *page = &frame_table[page_nr];
   32.73  
   32.74 @@ -412,7 +412,7 @@ static int get_page_from_l1e(l1_pgentry_
   32.75  {
   32.76      unsigned long l1v = l1_pgentry_val(l1e);
   32.77      unsigned long pfn = l1_pgentry_to_pagenr(l1e);
   32.78 -    extern int domain_iomem_in_pfn(struct task_struct *p, unsigned long pfn);
   32.79 +    extern int domain_iomem_in_pfn(struct domain *p, unsigned long pfn);
   32.80  
   32.81      if ( !(l1v & _PAGE_PRESENT) )
   32.82          return 1;
   32.83 @@ -719,7 +719,7 @@ int alloc_page_type(struct pfn_info *pag
   32.84      if ( unlikely(test_and_clear_bit(_PGC_tlb_flush_on_type_change, 
   32.85                                       &page->count_and_flags)) )
   32.86      {
   32.87 -        struct task_struct *p = page->u.domain;
   32.88 +        struct domain *p = page->u.domain;
   32.89          mb(); /* Check zombie status before using domain ptr. */
   32.90          /*
   32.91           * NB. 'p' may no longer be valid by time we dereference it, so
   32.92 @@ -803,7 +803,7 @@ static int do_extended_command(unsigned 
   32.93      unsigned long pfn = ptr >> PAGE_SHIFT;
   32.94      unsigned long old_base_pfn;
   32.95      struct pfn_info *page = &frame_table[pfn];
   32.96 -    struct task_struct *p = current, *q;
   32.97 +    struct domain *p = current, *q;
   32.98      domid_t domid;
   32.99  
  32.100      switch ( cmd )
  32.101 @@ -926,7 +926,7 @@ static int do_extended_command(unsigned 
  32.102          else
  32.103          {
  32.104              if ( percpu_info[cpu].gps != NULL )
  32.105 -                put_task_struct(percpu_info[cpu].gps);
  32.106 +                put_domain(percpu_info[cpu].gps);
  32.107              percpu_info[cpu].gps = find_domain_by_id(domid);
  32.108              percpu_info[cpu].pts = (val & SET_PAGETABLE_SUBJECTDOM) ? 
  32.109                  percpu_info[cpu].gps : NULL;
  32.110 @@ -968,7 +968,7 @@ static int do_extended_command(unsigned 
  32.111  
  32.112      case MMUEXT_RESET_SUBJECTDOM:
  32.113          if ( percpu_info[cpu].gps != NULL )
  32.114 -            put_task_struct(percpu_info[cpu].gps);
  32.115 +            put_domain(percpu_info[cpu].gps);
  32.116          percpu_info[cpu].gps = percpu_info[cpu].pts = NULL;
  32.117          break;
  32.118  
  32.119 @@ -1146,7 +1146,7 @@ int do_mmu_update(mmu_update_t *ureqs, i
  32.120  
  32.121      if ( unlikely(percpu_info[cpu].gps != NULL) )
  32.122      {
  32.123 -        put_task_struct(percpu_info[cpu].gps);
  32.124 +        put_domain(percpu_info[cpu].gps);
  32.125          percpu_info[cpu].gps = percpu_info[cpu].pts = NULL;
  32.126      }
  32.127  
  32.128 @@ -1161,7 +1161,7 @@ int do_update_va_mapping(unsigned long p
  32.129                           unsigned long val, 
  32.130                           unsigned long flags)
  32.131  {
  32.132 -    struct task_struct *p = current;
  32.133 +    struct domain *p = current;
  32.134      int err = 0;
  32.135      unsigned int cpu = p->processor;
  32.136      unsigned long deferred_ops;
  32.137 @@ -1228,7 +1228,7 @@ int do_update_va_mapping_otherdomain(uns
  32.138                                       domid_t domid)
  32.139  {
  32.140      unsigned int cpu = smp_processor_id();
  32.141 -    struct task_struct *p;
  32.142 +    struct domain *p;
  32.143      int rc;
  32.144  
  32.145      if ( unlikely(!IS_PRIV(current)) )
  32.146 @@ -1243,7 +1243,7 @@ int do_update_va_mapping_otherdomain(uns
  32.147  
  32.148      rc = do_update_va_mapping(page_nr, val, flags);
  32.149  
  32.150 -    put_task_struct(p);
  32.151 +    put_domain(p);
  32.152      percpu_info[cpu].gps = NULL;
  32.153  
  32.154      return rc;
    33.1 --- a/xen/common/physdev.c	Thu Jun 17 16:33:33 2004 +0000
    33.2 +++ b/xen/common/physdev.c	Fri Jun 18 14:46:29 2004 +0000
    33.3 @@ -62,13 +62,13 @@ typedef struct _phys_dev_st {
    33.4      int flags;                       /* flags for access etc */
    33.5      struct pci_dev *dev;             /* the device */
    33.6      struct list_head node;           /* link to the list */
    33.7 -    struct task_struct *owner;       /* 'owner of this device' */
    33.8 +    struct domain *owner;       /* 'owner of this device' */
    33.9      int state;                       /* state for various checks */
   33.10  } phys_dev_t;
   33.11  
   33.12  
   33.13  /* Find a device on a per-domain device list. */
   33.14 -static phys_dev_t *find_pdev(struct task_struct *p, struct pci_dev *dev)
   33.15 +static phys_dev_t *find_pdev(struct domain *p, struct pci_dev *dev)
   33.16  {
   33.17      phys_dev_t *t, *res = NULL;
   33.18      struct list_head *tmp;
   33.19 @@ -86,7 +86,7 @@ static phys_dev_t *find_pdev(struct task
   33.20  }
   33.21  
   33.22  /* Add a device to a per-domain device-access list. */
   33.23 -static void add_dev_to_task(struct task_struct *p, 
   33.24 +static void add_dev_to_task(struct domain *p, 
   33.25                              struct pci_dev *dev, int acc)
   33.26  {
   33.27      phys_dev_t *pdev;
   33.28 @@ -124,7 +124,7 @@ static void add_dev_to_task(struct task_
   33.29  int physdev_pci_access_modify(
   33.30      domid_t dom, int bus, int dev, int func, int enable)
   33.31  {
   33.32 -    struct task_struct *p;
   33.33 +    struct domain *p;
   33.34      struct pci_dev *pdev;
   33.35      int i, j, rc = 0;
   33.36   
   33.37 @@ -146,10 +146,10 @@ int physdev_pci_access_modify(
   33.38          return -ESRCH;
   33.39  
   33.40      /* Make the domain privileged. */
   33.41 -    set_bit(PF_PHYSDEV, &p->flags);
   33.42 +    set_bit(DF_PHYSDEV, &p->flags);
   33.43  	/* FIXME: MAW for now make the domain REALLY privileged so that it
   33.44  	 * can run a backend driver (hw access should work OK otherwise) */
   33.45 -	set_bit(PF_PRIVILEGED, &p->flags);
   33.46 +	set_bit(DF_PRIVILEGED, &p->flags);
   33.47  
   33.48      /* Grant write access to the specified device. */
   33.49      if ( (pdev = pci_find_slot(bus, PCI_DEVFN(dev, func))) == NULL )
   33.50 @@ -209,13 +209,13 @@ int physdev_pci_access_modify(
   33.51          /* rights to IO memory regions are checked when the domain maps them */
   33.52      }
   33.53   out:
   33.54 -    put_task_struct(p);
   33.55 +    put_domain(p);
   33.56      return rc;
   33.57  }
   33.58  
   33.59  /* Check if a domain controls a device with IO memory within frame @pfn.
   33.60   * Returns: 1 if the domain should be allowed to map @pfn, 0 otherwise.  */
   33.61 -int domain_iomem_in_pfn(struct task_struct *p, unsigned long pfn)
   33.62 +int domain_iomem_in_pfn(struct domain *p, unsigned long pfn)
   33.63  {
   33.64      int ret = 0;
   33.65      struct list_head *l;
   33.66 @@ -255,7 +255,7 @@ int domain_iomem_in_pfn(struct task_stru
   33.67  }
   33.68  
   33.69  /* check if a domain has general access to a device */
   33.70 -inline static int check_dev_acc (struct task_struct *p,
   33.71 +inline static int check_dev_acc (struct domain *p,
   33.72                                   int bus, int dev, int func,
   33.73                                   phys_dev_t **pdev) 
   33.74  {
   33.75 @@ -720,7 +720,7 @@ int pcidev_dom0_hidden(struct pci_dev *d
   33.76  
   33.77  
   33.78  /* Domain 0 has read access to all devices. */
   33.79 -void physdev_init_dom0(struct task_struct *p)
   33.80 +void physdev_init_dom0(struct domain *p)
   33.81  {
   33.82      struct pci_dev *dev;
   33.83      phys_dev_t *pdev;
   33.84 @@ -747,6 +747,6 @@ void physdev_init_dom0(struct task_struc
   33.85          }
   33.86      }
   33.87  
   33.88 -    set_bit(PF_PHYSDEV, &p->flags);
   33.89 +    set_bit(DF_PHYSDEV, &p->flags);
   33.90  }
   33.91  
    34.1 --- a/xen/common/sched_atropos.c	Thu Jun 17 16:33:33 2004 +0000
    34.2 +++ b/xen/common/sched_atropos.c	Fri Jun 18 14:46:29 2004 +0000
    34.3 @@ -23,6 +23,12 @@
    34.4  #include <hypervisor-ifs/sched_ctl.h>
    34.5  #include <xen/trace.h>
    34.6  
    34.7 +/*
    34.8 + * KAF -- Atropos is broken by the new scheduler interfaces.
    34.9 + * It'll need fixing to get rid of use of ATROPOS_TASK__*
   34.10 + */
   34.11 +#ifdef KAF_KILLED
   34.12 +
   34.13  #define ATROPOS_TASK_UNBLOCKED 16
   34.14  #define ATROPOS_TASK_WAIT      32
   34.15  
   34.16 @@ -34,7 +40,7 @@
   34.17  struct at_dom_info
   34.18  {
   34.19      /* MAW Xen additions */
   34.20 -    struct task_struct *owner; /* the task_struct this data belongs to */
   34.21 +    struct domain *owner;      /* the domain this data belongs to */
   34.22      struct list_head waitq;    /* wait queue                           */
   34.23      int reason;                /* reason domain was last scheduled     */
   34.24  
   34.25 @@ -82,8 +88,8 @@ static int q_len(struct list_head *q)
   34.26  }
   34.27  
   34.28  
   34.29 -/** waitq_el - get the task_struct that owns a wait queue list element */
   34.30 -static inline struct task_struct * waitq_el(struct list_head *l)
   34.31 +/** waitq_el - get the domain that owns a wait queue list element */
   34.32 +static inline struct domain *waitq_el(struct list_head *l)
   34.33  {
   34.34      struct at_dom_info *inf;
   34.35      inf = list_entry(l, struct at_dom_info, waitq);
   34.36 @@ -105,7 +111,7 @@ static inline struct task_struct * waitq
   34.37   * These are scheduled in preference to domains with remain < 0 
   34.38   * in an attempt to improve interactive performance.
   34.39   */
   34.40 -static void requeue(struct task_struct *sdom)
   34.41 +static void requeue(struct domain *sdom)
   34.42  {
   34.43      struct at_dom_info *inf = DOM_INFO(sdom);
   34.44      struct list_head *prev = WAITQ(sdom->processor);
   34.45 @@ -135,14 +141,14 @@ static void requeue(struct task_struct *
   34.46          if ( next == WAITQ(sdom->processor) )
   34.47              list_add_tail(&inf->waitq, WAITQ(sdom->processor));
   34.48      }
   34.49 -    else if ( sdom->state == TASK_RUNNING )
   34.50 +    else if ( domain_runnable(sdom) )
   34.51      {
   34.52          /* insert into ordered run queue */
   34.53          prev = RUNQ(sdom->processor);
   34.54  
   34.55          list_for_each(next, RUNQ(sdom->processor))
   34.56          {
   34.57 -            struct task_struct *p = list_entry(next, struct task_struct,
   34.58 +            struct domain *p = list_entry(next, struct domain,
   34.59                                                 run_list);
   34.60  
   34.61              if( DOM_INFO(p)->deadline > inf->deadline || is_idle_task(p) )
   34.62 @@ -162,7 +168,7 @@ static void requeue(struct task_struct *
   34.63  }
   34.64  
   34.65  /* prepare a task to be added to scheduling */
   34.66 -static void at_add_task(struct task_struct *p)
   34.67 +static void at_add_task(struct domain *p)
   34.68  {
   34.69      s_time_t now = NOW();
   34.70  
   34.71 @@ -205,7 +211,7 @@ static void at_add_task(struct task_stru
   34.72   * dequeue - remove a domain from any queues it is on.
   34.73   * @sdom:    the task to remove
   34.74   */
   34.75 -static void dequeue(struct task_struct *sdom)
   34.76 +static void dequeue(struct domain *sdom)
   34.77  {
   34.78      struct at_dom_info *inf = DOM_INFO(sdom);
   34.79  
   34.80 @@ -244,7 +250,7 @@ static void dequeue(struct task_struct *
   34.81   *  idea is to give better response times to unblocking whilst preserving QoS
   34.82   *  guarantees to other domains.
   34.83   */
   34.84 -static void unblock(struct task_struct *sdom)
   34.85 +static void unblock(struct domain *sdom)
   34.86  {
   34.87      s_time_t time = NOW();
   34.88      struct at_dom_info *inf = DOM_INFO(sdom);
   34.89 @@ -266,8 +272,6 @@ static void unblock(struct task_struct *
   34.90          inf->slice = inf->nat_slice / ( inf->nat_period / inf->latency );
   34.91          inf->period = inf->latency;
   34.92  	inf->remain = inf->slice;
   34.93 -
   34.94 -        sdom->state = TASK_RUNNING;
   34.95      }
   34.96      else
   34.97      {
   34.98 @@ -293,10 +297,10 @@ static void unblock(struct task_struct *
   34.99   */
  34.100  task_slice_t ksched_scheduler(s_time_t time)
  34.101  {
  34.102 -    struct task_struct	*cur_sdom = current;  /* Current sdom           */
  34.103 +    struct domain	*cur_sdom = current;  /* Current sdom           */
  34.104      s_time_t     newtime;
  34.105      s_time_t      ranfor;	        /* How long the domain ran      */
  34.106 -    struct task_struct	*sdom;	        /* tmp. scheduling domain	*/
  34.107 +    struct domain	*sdom;	        /* tmp. scheduling domain	*/
  34.108      int   reason;                       /* reason for reschedule        */
  34.109      int cpu = cur_sdom->processor;      /* current CPU                  */
  34.110      struct at_dom_info *cur_info;
  34.111 @@ -328,8 +332,8 @@ task_slice_t ksched_scheduler(s_time_t t
  34.112  
  34.113      dequeue(cur_sdom);
  34.114  
  34.115 -    if ((cur_sdom->state == TASK_RUNNING) ||
  34.116 -        (cur_sdom->state == ATROPOS_TASK_UNBLOCKED))
  34.117 +    if ( domain_runnable(cur_sdom) || 
  34.118 +         (cur_sdom->state == ATROPOS_TASK_UNBLOCKED) )
  34.119      {
  34.120  
  34.121  	/* In this block, we are doing accounting for an sdom which has 
  34.122 @@ -399,9 +403,7 @@ task_slice_t ksched_scheduler(s_time_t t
  34.123  	inf->prevddln = inf->deadline;
  34.124  	inf->deadline += inf->period;
  34.125  
  34.126 -        if(inf->remain > 0)
  34.127 -            sdom->state = TASK_RUNNING;
  34.128 -        else
  34.129 +        if ( inf->remain <= 0 )
  34.130              sdom->state = ATROPOS_TASK_WAIT;
  34.131  
  34.132  	/* Place on the appropriate queue */
  34.133 @@ -420,7 +422,7 @@ task_slice_t ksched_scheduler(s_time_t t
  34.134      
  34.135      /* we guarantee there's always something on the runqueue */
  34.136      cur_sdom = list_entry(RUNQ(cpu)->next,
  34.137 -                          struct task_struct, run_list);
  34.138 +                          struct domain, run_list);
  34.139  
  34.140      cur_info = DOM_INFO(cur_sdom);
  34.141      newtime = time + cur_info->remain;
  34.142 @@ -550,7 +552,7 @@ static void at_dump_cpu_state(int cpu)
  34.143  }
  34.144  
  34.145  /* print relevant per-domain info for a run queue dump */
  34.146 -static void at_dump_runq_el(struct task_struct *p)
  34.147 +static void at_dump_runq_el(struct domain *p)
  34.148  {
  34.149      printk("lastschd = %llu, xtratime = %d ",
  34.150             p->lastschd, DOM_INFO(p)->xtratime);
  34.151 @@ -558,7 +560,7 @@ static void at_dump_runq_el(struct task_
  34.152  
  34.153  
  34.154  /* set or fetch domain scheduling parameters */
  34.155 -static int at_adjdom(struct task_struct *p, struct sched_adjdom_cmd *cmd)
  34.156 +static int at_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd)
  34.157  {
  34.158      if ( cmd->direction == SCHED_INFO_PUT )
  34.159      {
  34.160 @@ -586,7 +588,7 @@ static int at_adjdom(struct task_struct 
  34.161  
  34.162  
  34.163  /** at_alloc_task - allocate private info for a task */
  34.164 -static int at_alloc_task(struct task_struct *p)
  34.165 +static int at_alloc_task(struct domain *p)
  34.166  {
  34.167      ASSERT(p != NULL);
  34.168  
  34.169 @@ -601,7 +603,7 @@ static int at_alloc_task(struct task_str
  34.170  
  34.171  
  34.172  /* free memory associated with a task */
  34.173 -static void at_free_task(struct task_struct *p)
  34.174 +static void at_free_task(struct domain *p)
  34.175  {
  34.176      kmem_cache_free( dom_info_cache, DOM_INFO(p) );
  34.177  }
  34.178 @@ -627,12 +629,13 @@ static int at_prn_state(int state)
  34.179      return ret;
  34.180  }
  34.181      
  34.182 +#endif /* KAF_KILLED */
  34.183  
  34.184  struct scheduler sched_atropos_def = {
  34.185      .name           = "Atropos Soft Real Time Scheduler",
  34.186      .opt_name       = "atropos",
  34.187      .sched_id       = SCHED_ATROPOS,
  34.188 -
  34.189 +#ifdef KAF_KILLED
  34.190      .init_scheduler = at_init_scheduler,
  34.191      .alloc_task     = at_alloc_task,
  34.192      .add_task       = at_add_task,
  34.193 @@ -643,4 +646,5 @@ struct scheduler sched_atropos_def = {
  34.194      .dump_cpu_state = at_dump_cpu_state,
  34.195      .dump_runq_el   = at_dump_runq_el,
  34.196      .prn_state      = at_prn_state,
  34.197 +#endif /* KAF_KILLED */
  34.198  };
    35.1 --- a/xen/common/sched_bvt.c	Thu Jun 17 16:33:33 2004 +0000
    35.2 +++ b/xen/common/sched_bvt.c	Fri Jun 18 14:46:29 2004 +0000
    35.3 @@ -95,7 +95,7 @@ static void __calc_evt(struct bvt_dom_in
    35.4   *
    35.5   * Returns non-zero on failure.
    35.6   */
    35.7 -int bvt_alloc_task(struct task_struct *p)
    35.8 +int bvt_alloc_task(struct domain *p)
    35.9  {
   35.10      if ( (BVT_INFO(p) = kmem_cache_alloc(dom_info_cache,GFP_KERNEL)) == NULL )
   35.11          return -1;
   35.12 @@ -106,7 +106,7 @@ int bvt_alloc_task(struct task_struct *p
   35.13  /*
   35.14   * Add and remove a domain
   35.15   */
   35.16 -void bvt_add_task(struct task_struct *p) 
   35.17 +void bvt_add_task(struct domain *p) 
   35.18  {
   35.19      struct bvt_dom_info *inf = BVT_INFO(p);
   35.20  
   35.21 @@ -138,14 +138,14 @@ void bvt_add_task(struct task_struct *p)
   35.22   * bvt_free_task - free BVT private structures for a task
   35.23   * @p:             task
   35.24   */
   35.25 -void bvt_free_task(struct task_struct *p)
   35.26 +void bvt_free_task(struct domain *p)
   35.27  {
   35.28      ASSERT( p->sched_priv != NULL );
   35.29      kmem_cache_free( dom_info_cache, p->sched_priv );
   35.30  }
   35.31  
   35.32  
   35.33 -void bvt_wake_up(struct task_struct *p)
   35.34 +void bvt_wake_up(struct domain *p)
   35.35  {
   35.36      struct bvt_dom_info *inf = BVT_INFO(p);
   35.37  
   35.38 @@ -166,7 +166,7 @@ void bvt_wake_up(struct task_struct *p)
   35.39  /* 
   35.40   * Block the currently-executing domain until a pertinent event occurs.
   35.41   */
   35.42 -static void bvt_do_block(struct task_struct *p)
   35.43 +static void bvt_do_block(struct domain *p)
   35.44  {
   35.45      BVT_INFO(p)->warpback = 0; 
   35.46  }
   35.47 @@ -189,7 +189,7 @@ int bvt_ctl(struct sched_ctl_cmd *cmd)
   35.48  }
   35.49  
   35.50  /* Adjust scheduling parameter for a given domain. */
   35.51 -int bvt_adjdom(struct task_struct *p,
   35.52 +int bvt_adjdom(struct domain *p,
   35.53                 struct sched_adjdom_cmd *cmd)
   35.54  {
   35.55      struct bvt_adjdom *params = &cmd->u.bvt;
   35.56 @@ -251,7 +251,7 @@ int bvt_adjdom(struct task_struct *p,
   35.57   */
   35.58  static task_slice_t bvt_do_schedule(s_time_t now)
   35.59  {
   35.60 -    struct task_struct *prev = current, *next = NULL, *next_prime, *p;
   35.61 +    struct domain *prev = current, *next = NULL, *next_prime, *p;
   35.62      struct list_head   *tmp;
   35.63      int                 cpu = prev->processor;
   35.64      s32                 r_time;     /* time for new dom to run */
   35.65 @@ -278,7 +278,7 @@ static task_slice_t bvt_do_schedule(s_ti
   35.66          
   35.67          __del_from_runqueue(prev);
   35.68          
   35.69 -        if ( likely(prev->state == TASK_RUNNING) )
   35.70 +        if ( domain_runnable(prev) )
   35.71              __add_to_runqueue_tail(prev);
   35.72      }
   35.73  
   35.74 @@ -299,7 +299,7 @@ static task_slice_t bvt_do_schedule(s_ti
   35.75  
   35.76      list_for_each ( tmp, &schedule_data[cpu].runqueue )
   35.77      {
   35.78 -        p     = list_entry(tmp, struct task_struct, run_list);
   35.79 +        p     = list_entry(tmp, struct domain, run_list);
   35.80          p_inf = BVT_INFO(p);
   35.81  
   35.82          if ( p_inf->evt < next_evt )
   35.83 @@ -385,7 +385,7 @@ static task_slice_t bvt_do_schedule(s_ti
   35.84  }
   35.85  
   35.86  
   35.87 -static void bvt_dump_runq_el(struct task_struct *p)
   35.88 +static void bvt_dump_runq_el(struct domain *p)
   35.89  {
   35.90      struct bvt_dom_info *inf = BVT_INFO(p);
   35.91      
   35.92 @@ -435,7 +435,7 @@ int bvt_init_scheduler()
   35.93      return 0;
   35.94  }
   35.95  
   35.96 -static void bvt_pause(struct task_struct *p)
   35.97 +static void bvt_pause(struct domain *p)
   35.98  {
   35.99      if( __task_on_runqueue(p) )
  35.100          __del_from_runqueue(p);
    36.1 --- a/xen/common/sched_rrobin.c	Thu Jun 17 16:33:33 2004 +0000
    36.2 +++ b/xen/common/sched_rrobin.c	Fri Jun 18 14:46:29 2004 +0000
    36.3 @@ -14,17 +14,17 @@ static s_time_t rr_slice = MILLISECS(10)
    36.4  
    36.5  static task_slice_t rr_do_schedule(s_time_t now)
    36.6  {
    36.7 -    struct task_struct *prev = current;
    36.8 +    struct domain *prev = current;
    36.9      int cpu = current->processor;
   36.10      task_slice_t ret;
   36.11   
   36.12      __del_from_runqueue(prev);
   36.13      
   36.14 -    if ( prev->state == TASK_RUNNING )
   36.15 +    if ( domain_runnable(prev) )
   36.16        __add_to_runqueue_tail(prev);
   36.17      
   36.18      ret.task = list_entry(schedule_data[cpu].runqueue.next,
   36.19 -                    struct task_struct, run_list);
   36.20 +                    struct domain, run_list);
   36.21  
   36.22      ret.time = rr_slice;
   36.23  
   36.24 @@ -50,7 +50,7 @@ static void rr_dump_settings()
   36.25      printk("rr_slice = %llu ", rr_slice);
   36.26  }
   36.27  
   36.28 -static void rr_pause(struct task_struct *p)
   36.29 +static void rr_pause(struct domain *p)
   36.30  {
   36.31      if ( __task_on_runqueue(p) )
   36.32          __del_from_runqueue(p);
    37.1 --- a/xen/common/schedule.c	Thu Jun 17 16:33:33 2004 +0000
    37.2 +++ b/xen/common/schedule.c	Fri Jun 18 14:46:29 2004 +0000
    37.3 @@ -98,29 +98,26 @@ static struct ac_timer t_timer[NR_CPUS];
    37.4   */
    37.5  static struct ac_timer fallback_timer[NR_CPUS];
    37.6  
    37.7 -extern kmem_cache_t *task_struct_cachep;
    37.8 +extern kmem_cache_t *domain_struct_cachep;
    37.9  
   37.10 -void free_task_struct(struct task_struct *p)
   37.11 +void free_domain_struct(struct domain *p)
   37.12  {
   37.13      SCHED_OP(free_task, p);
   37.14 -    kmem_cache_free(task_struct_cachep, p);
   37.15 +    kmem_cache_free(domain_struct_cachep, p);
   37.16  }
   37.17  
   37.18 -/**
   37.19 - * alloc_task_struct - allocate a new task_struct and sched private structures
   37.20 - */
   37.21 -struct task_struct *alloc_task_struct(void)
   37.22 +struct domain *alloc_domain_struct(void)
   37.23  {
   37.24 -    struct task_struct *p;
   37.25 +    struct domain *p;
   37.26  
   37.27 -    if ( (p = kmem_cache_alloc(task_struct_cachep,GFP_KERNEL)) == NULL )
   37.28 +    if ( (p = kmem_cache_alloc(domain_struct_cachep,GFP_KERNEL)) == NULL )
   37.29          return NULL;
   37.30      
   37.31      memset(p, 0, sizeof(*p));
   37.32  
   37.33      if ( SCHED_OP(alloc_task, p) < 0 )
   37.34      {
   37.35 -        kmem_cache_free(task_struct_cachep,p);
   37.36 +        kmem_cache_free(domain_struct_cachep,p);
   37.37          return NULL;
   37.38      }
   37.39  
   37.40 @@ -130,9 +127,9 @@ struct task_struct *alloc_task_struct(vo
   37.41  /*
   37.42   * Add and remove a domain
   37.43   */
   37.44 -void sched_add_domain(struct task_struct *p) 
   37.45 +void sched_add_domain(struct domain *p) 
   37.46  {
   37.47 -    p->state = TASK_STOPPED;
   37.48 +    domain_controller_pause(p);
   37.49  
   37.50      if ( p->domain != IDLE_DOMAIN_ID )
   37.51      {
   37.52 @@ -152,26 +149,17 @@ void sched_add_domain(struct task_struct
   37.53      TRACE_3D(TRC_SCHED_DOM_ADD, _HIGH32(p->domain), _LOW32(p->domain), p);
   37.54  }
   37.55  
   37.56 -int sched_rem_domain(struct task_struct *p) 
   37.57 +void sched_rem_domain(struct domain *p) 
   37.58  {
   37.59 -    int x, y = p->state;
   37.60 -    do {
   37.61 -        if ( (x = y) == TASK_DYING ) return 0;
   37.62 -    } while ( (y = cmpxchg(&p->state, x, TASK_DYING)) != x );
   37.63 -
   37.64      rem_ac_timer(&p->timer);
   37.65 -
   37.66      SCHED_OP(rem_task, p);
   37.67 -
   37.68      TRACE_3D(TRC_SCHED_DOM_REM, _HIGH32(p->domain), _LOW32(p->domain), p);
   37.69 -
   37.70 -    return 1;
   37.71  }
   37.72  
   37.73  void init_idle_task(void)
   37.74  {
   37.75      unsigned long flags;
   37.76 -    struct task_struct *p = current;
   37.77 +    struct domain *p = current;
   37.78  
   37.79      if ( SCHED_OP(alloc_task, p) < 0)
   37.80          panic("Failed to allocate scheduler private data for idle task");
   37.81 @@ -179,47 +167,81 @@ void init_idle_task(void)
   37.82  
   37.83      spin_lock_irqsave(&schedule_lock[p->processor], flags);
   37.84      p->has_cpu = 1;
   37.85 -    p->state = TASK_RUNNING;
   37.86      if ( !__task_on_runqueue(p) )
   37.87          __add_to_runqueue_head(p);
   37.88      spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
   37.89  }
   37.90  
   37.91 -void __wake_up(struct task_struct *p)
   37.92 +/* Returns TRUE if the domain was actually woken up. */
   37.93 +int domain_wakeup(struct domain *d)
   37.94  {
   37.95 -    TRACE_3D(TRC_SCHED_WAKE, _HIGH32(p->domain), _LOW32(p->domain), p);
   37.96 -
   37.97 -    ASSERT(p->state != TASK_DYING);
   37.98 +    unsigned long       flags;
   37.99 +    int                 cpu = d->processor, woken_up = 0;
  37.100 +    struct domain      *curr;
  37.101 +    s_time_t            now, min_time;
  37.102  
  37.103 -    if ( unlikely(__task_on_runqueue(p)) )        
  37.104 -        return;
  37.105 -
  37.106 -    p->state = TASK_RUNNING;
  37.107 +    spin_lock_irqsave(&schedule_lock[cpu], flags);
  37.108  
  37.109 -    SCHED_OP(wake_up, p);
  37.110 +    if ( likely(domain_runnable(d)) && likely(!__task_on_runqueue(d)) )
  37.111 +    {
  37.112 +        woken_up = 1;
  37.113  
  37.114 +        TRACE_3D(TRC_SCHED_WAKE, _HIGH32(d->domain), _LOW32(d->domain), d);
  37.115 +        SCHED_OP(wake_up, d);
  37.116  #ifdef WAKEUP_HISTO
  37.117 -    p->wokenup = NOW();
  37.118 +        p->wokenup = NOW();
  37.119  #endif
  37.120 +
  37.121 +        ASSERT(__task_on_runqueue(d));
  37.122 +        ASSERT(!d->has_cpu);
  37.123 +
  37.124 +        now = NOW();
  37.125 +        curr = schedule_data[cpu].curr;
  37.126 +
  37.127 +        /* Currently-running domain should run at least for ctx_allow. */
  37.128 +        min_time = curr->lastschd + curr->min_slice;
  37.129 +
  37.130 +        if ( is_idle_task(curr) || (min_time <= now) )
  37.131 +            cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
  37.132 +        else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
  37.133 +            mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
  37.134 +    }
  37.135 +
  37.136 +    spin_unlock_irqrestore(&schedule_lock[cpu], flags);
  37.137 +
  37.138 +    return woken_up;
  37.139  }
  37.140  
  37.141  
  37.142 -void wake_up(struct task_struct *p)
  37.143 +void __domain_pause(struct domain *d)
  37.144  {
  37.145      unsigned long flags;
  37.146 -    spin_lock_irqsave(&schedule_lock[p->processor], flags);
  37.147 -    __wake_up(p);
  37.148 -    spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
  37.149 +    int           cpu = d->processor;
  37.150 +
  37.151 +    spin_lock_irqsave(&schedule_lock[cpu], flags);
  37.152 +
  37.153 +    if ( d->has_cpu )
  37.154 +        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
  37.155 +    else if ( __task_on_runqueue(d) )
  37.156 +        __del_from_runqueue(d);
  37.157 +
  37.158 +    spin_unlock_irqrestore(&schedule_lock[cpu], flags);
  37.159 +
  37.160 +    /* Synchronous. */
  37.161 +    while ( d->has_cpu )
  37.162 +    {
  37.163 +        smp_mb();
  37.164 +        cpu_relax();
  37.165 +    }
  37.166  }
  37.167  
  37.168 -/* 
  37.169 - * Block the currently-executing domain until a pertinent event occurs.
  37.170 - */
  37.171 +
  37.172 +/* Block the currently-executing domain until a pertinent event occurs. */
  37.173  long do_block(void)
  37.174  {
  37.175      ASSERT(current->domain != IDLE_DOMAIN_ID);
  37.176      current->shared_info->vcpu_data[0].evtchn_upcall_mask = 0;
  37.177 -    current->state = TASK_INTERRUPTIBLE;
  37.178 +    set_bit(DF_BLOCKED, &current->flags);
  37.179      TRACE_2D(TRC_SCHED_BLOCK, current->domain, current);
  37.180      __enter_scheduler();
  37.181      return 0;
  37.182 @@ -258,9 +280,9 @@ long do_sched_op(unsigned long op)
  37.183          break;
  37.184      }
  37.185  
  37.186 -    case SCHEDOP_stop:
  37.187 +    case SCHEDOP_suspend:
  37.188      {
  37.189 -        stop_domain((u8)(op >> SCHEDOP_reasonshift));
  37.190 +        domain_suspend((u8)(op >> SCHEDOP_reasonshift));
  37.191          break;
  37.192      }
  37.193  
  37.194 @@ -271,41 +293,10 @@ long do_sched_op(unsigned long op)
  37.195      return ret;
  37.196  }
  37.197  
  37.198 -
  37.199 -/*
  37.200 - * sched_pause_sync - synchronously pause a domain's execution.
  37.201 - * XXXX This is horribly broken -- here just as a place holder at present,
  37.202 - *                                 do not use.
  37.203 - */
  37.204 -void sched_pause_sync(struct task_struct *p)
  37.205 -{
  37.206 -    unsigned long flags;
  37.207 -    int cpu = p->processor;
  37.208 -
  37.209 -    spin_lock_irqsave(&schedule_lock[cpu], flags);
  37.210 -
  37.211 -    /* If not the current task, we can remove it from scheduling now. */
  37.212 -    if ( schedule_data[cpu].curr != p )
  37.213 -        SCHED_OP(pause, p);
  37.214 -
  37.215 -    p->state = TASK_PAUSED;
  37.216 -    
  37.217 -    spin_unlock_irqrestore(&schedule_lock[cpu], flags);
  37.218 -
  37.219 -    /* Spin until domain is descheduled by its local scheduler. */
  37.220 -    while ( schedule_data[cpu].curr == p )
  37.221 -    {
  37.222 -        send_hyp_event(p, _HYP_EVENT_NEED_RESCHED );
  37.223 -        do_yield();
  37.224 -    }
  37.225 -        
  37.226 -    /* The domain will not be scheduled again until we do a wake_up(). */
  37.227 -}
  37.228 -
  37.229  /* Per-domain one-shot-timer hypercall. */
  37.230  long do_set_timer_op(unsigned long timeout_hi, unsigned long timeout_lo)
  37.231  {
  37.232 -    struct task_struct *p = current;
  37.233 +    struct domain *p = current;
  37.234  
  37.235      rem_ac_timer(&p->timer);
  37.236      
  37.237 @@ -341,7 +332,7 @@ long sched_ctl(struct sched_ctl_cmd *cmd
  37.238  /* Adjust scheduling parameter for a given domain. */
  37.239  long sched_adjdom(struct sched_adjdom_cmd *cmd)
  37.240  {
  37.241 -    struct task_struct *p;    
  37.242 +    struct domain *p;    
  37.243      
  37.244      if ( cmd->sched_id != ops.sched_id )
  37.245          return -EINVAL;
  37.246 @@ -358,72 +349,18 @@ long sched_adjdom(struct sched_adjdom_cm
  37.247  
  37.248      SCHED_OP(adjdom, p, cmd);
  37.249  
  37.250 -    put_task_struct(p); 
  37.251 +    put_domain(p); 
  37.252      return 0;
  37.253  }
  37.254  
  37.255 -/*
  37.256 - * cause a run through the scheduler when appropriate
  37.257 - * Appropriate is:
  37.258 - * - current task is idle task
  37.259 - * - the current task already ran for it's context switch allowance
  37.260 - * Otherwise we do a run through the scheduler after the current tasks 
  37.261 - * context switch allowance is over.
  37.262 - */
  37.263 -unsigned long __reschedule(struct task_struct *p)
  37.264 -{
  37.265 -       int cpu = p->processor;
  37.266 -    struct task_struct *curr;
  37.267 -    s_time_t now, min_time;
  37.268 -
  37.269 -    TRACE_3D(TRC_SCHED_RESCHED, _HIGH32(p->domain), _LOW32(p->domain), p);
  37.270 -
  37.271 -    if ( unlikely(p->has_cpu || !__task_on_runqueue(p)) )
  37.272 -        return 0;
  37.273 -
  37.274 -    now = NOW();
  37.275 -    curr = schedule_data[cpu].curr;
  37.276 -    /* domain should run at least for ctx_allow */
  37.277 -    min_time = curr->lastschd + curr->min_slice;
  37.278 -
  37.279 -    if ( is_idle_task(curr) || (min_time <= now) )
  37.280 -    {
  37.281 -        set_bit(_HYP_EVENT_NEED_RESCHED, &curr->hyp_events);
  37.282 -        return (1 << p->processor);
  37.283 -    }
  37.284 -
  37.285 -    /* current hasn't been running for long enough -> reprogram timer.
  37.286 -     * but don't bother if timer would go off soon anyway */
  37.287 -    if ( schedule_data[cpu].s_timer.expires > min_time + TIME_SLOP )
  37.288 -        mod_ac_timer(&schedule_data[cpu].s_timer, min_time);
  37.289 -
  37.290 -    return SCHED_OP(reschedule, p);
  37.291 -}
  37.292 -
  37.293 -void reschedule(struct task_struct *p)
  37.294 -{
  37.295 -    unsigned long flags, cpu_mask;
  37.296 -
  37.297 -    spin_lock_irqsave(&schedule_lock[p->processor], flags);
  37.298 -    cpu_mask = __reschedule(p);
  37.299 -
  37.300 -    spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
  37.301 -
  37.302 -#ifdef CONFIG_SMP
  37.303 -    cpu_mask &= ~(1 << smp_processor_id());
  37.304 -    if ( cpu_mask != 0 )
  37.305 -        smp_send_event_check_mask(cpu_mask);
  37.306 -#endif
  37.307 -}
  37.308 -
  37.309  /* 
  37.310   * The main function
  37.311   * - deschedule the current domain (scheduler independent).
  37.312   * - pick a new domain (scheduler dependent).
  37.313   */
  37.314 -asmlinkage void __enter_scheduler(void)
  37.315 +void __enter_scheduler(void)
  37.316  {
  37.317 -    struct task_struct *prev = current, *next = NULL;
  37.318 +    struct domain *prev = current, *next = NULL;
  37.319      int                 cpu = prev->processor;
  37.320      s_time_t            now;
  37.321      task_slice_t        next_slice;
  37.322 @@ -431,23 +368,20 @@ asmlinkage void __enter_scheduler(void)
  37.323  
  37.324      perfc_incrc(sched_run);
  37.325  
  37.326 -    clear_bit(_HYP_EVENT_NEED_RESCHED, &prev->hyp_events);
  37.327 -
  37.328      spin_lock_irq(&schedule_lock[cpu]);
  37.329  
  37.330      now = NOW();
  37.331  
  37.332      rem_ac_timer(&schedule_data[cpu].s_timer);
  37.333      
  37.334 -    ASSERT(!in_interrupt());
  37.335 +    ASSERT(!in_irq());
  37.336      ASSERT(__task_on_runqueue(prev));
  37.337 -    ASSERT(prev->state != TASK_UNINTERRUPTIBLE);
  37.338  
  37.339 -    if ( prev->state == TASK_INTERRUPTIBLE )
  37.340 +    if ( test_bit(DF_BLOCKED, &prev->flags) )
  37.341      {
  37.342 -        /* this check is needed to avoid a race condition */
  37.343 -        if ( signal_pending(prev) )
  37.344 -            prev->state = TASK_RUNNING;
  37.345 +        /* This check is needed to avoid a race condition. */
  37.346 +        if ( event_pending(prev) )
  37.347 +            clear_bit(DF_BLOCKED, &prev->flags);
  37.348          else
  37.349              SCHED_OP(do_block, prev);
  37.350      }
  37.351 @@ -504,9 +438,6 @@ asmlinkage void __enter_scheduler(void)
  37.352  
  37.353      switch_to(prev, next);
  37.354      
  37.355 -    if ( unlikely(prev->state == TASK_DYING) ) 
  37.356 -        put_task_struct(prev);
  37.357 -
  37.358      /* Mark a timer event for the newly-scheduled domain. */
  37.359      if ( !is_idle_task(next) )
  37.360          send_guest_virq(next, VIRQ_TIMER);
  37.361 @@ -519,7 +450,7 @@ asmlinkage void __enter_scheduler(void)
  37.362  /* No locking needed -- pointer comparison is safe :-) */
  37.363  int idle_cpu(int cpu)
  37.364  {
  37.365 -    struct task_struct *p = schedule_data[cpu].curr;
  37.366 +    struct domain *p = schedule_data[cpu].curr;
  37.367      return p == idle_task[cpu];
  37.368  }
  37.369  
  37.370 @@ -536,14 +467,14 @@ int idle_cpu(int cpu)
  37.371  static void s_timer_fn(unsigned long unused)
  37.372  {
  37.373      TRACE_0D(TRC_SCHED_S_TIMER_FN);
  37.374 -    set_bit(_HYP_EVENT_NEED_RESCHED, &current->hyp_events);
  37.375 +    raise_softirq(SCHEDULE_SOFTIRQ);
  37.376      perfc_incrc(sched_irq);
  37.377  }
  37.378  
  37.379  /* Periodic tick timer: send timer event to current domain*/
  37.380  static void t_timer_fn(unsigned long unused)
  37.381  {
  37.382 -    struct task_struct *p = current;
  37.383 +    struct domain *p = current;
  37.384  
  37.385      TRACE_0D(TRC_SCHED_T_TIMER_FN);
  37.386  
  37.387 @@ -557,7 +488,7 @@ static void t_timer_fn(unsigned long unu
  37.388  /* Domain timer function, sends a virtual timer interrupt to domain */
  37.389  static void dom_timer_fn(unsigned long data)
  37.390  {
  37.391 -    struct task_struct *p = (struct task_struct *)data;
  37.392 +    struct domain *p = (struct domain *)data;
  37.393      TRACE_0D(TRC_SCHED_DOM_TIMER_FN);
  37.394      send_guest_virq(p, VIRQ_TIMER);
  37.395  }
  37.396 @@ -566,7 +497,7 @@ static void dom_timer_fn(unsigned long d
  37.397  /* Fallback timer to ensure guests get time updated 'often enough'. */
  37.398  static void fallback_timer_fn(unsigned long unused)
  37.399  {
  37.400 -    struct task_struct *p = current;
  37.401 +    struct domain *p = current;
  37.402  
  37.403      TRACE_0D(TRC_SCHED_FALLBACK_TIMER_FN);
  37.404  
  37.405 @@ -582,6 +513,8 @@ void __init scheduler_init(void)
  37.406  {
  37.407      int i;
  37.408  
  37.409 +    open_softirq(SCHEDULE_SOFTIRQ, __enter_scheduler);
  37.410 +
  37.411      for ( i = 0; i < NR_CPUS; i++ )
  37.412      {
  37.413          INIT_LIST_HEAD(&schedule_data[i].runqueue);
  37.414 @@ -645,12 +578,12 @@ static void dump_rqueue(struct list_head
  37.415  {
  37.416      struct list_head *list;
  37.417      int loop = 0;
  37.418 -    struct task_struct  *p;
  37.419 +    struct domain  *p;
  37.420  
  37.421      printk ("QUEUE %s %lx   n: %lx, p: %lx\n", name,  (unsigned long)queue,
  37.422              (unsigned long) queue->next, (unsigned long) queue->prev);
  37.423      list_for_each (list, queue) {
  37.424 -        p = list_entry(list, struct task_struct, run_list);
  37.425 +        p = list_entry(list, struct domain, run_list);
  37.426          printk("%3d: %u has=%c ", loop++, p->domain, p->has_cpu ? 'T':'F');
  37.427          SCHED_OP(dump_runq_el, p);
  37.428          printk("c=0x%X%08X\n", (u32)(p->cpu_time>>32), (u32)p->cpu_time);
  37.429 @@ -680,36 +613,6 @@ void dump_runq(u_char key, void *dev_id,
  37.430      return; 
  37.431  }
  37.432  
  37.433 -/* print human-readable "state", given the numeric code for that state */
  37.434 -void sched_prn_state(int state)
  37.435 -{
  37.436 -    int ret = 0;
  37.437 -    
  37.438 -    switch(state)
  37.439 -    {
  37.440 -    case TASK_RUNNING:
  37.441 -        printk("Running");
  37.442 -        break;
  37.443 -    case TASK_INTERRUPTIBLE:
  37.444 -        printk("Int sleep");
  37.445 -        break;
  37.446 -    case TASK_UNINTERRUPTIBLE:
  37.447 -        printk("UInt sleep");
  37.448 -        break;
  37.449 -    case TASK_STOPPED:
  37.450 -        printk("Stopped");
  37.451 -        break;
  37.452 -    case TASK_DYING:
  37.453 -        printk("Dying");
  37.454 -        break;
  37.455 -    default:
  37.456 -        ret = SCHED_OP(prn_state, state);
  37.457 -    }
  37.458 -
  37.459 -    if ( ret != 0 )
  37.460 -        printk("Unknown");
  37.461 -}
  37.462 -
  37.463  #if defined(WAKEUP_HISTO) || defined(BLOCKTIME_HISTO)
  37.464  void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs)
  37.465  {
    38.1 --- a/xen/common/shadow.c	Thu Jun 17 16:33:33 2004 +0000
    38.2 +++ b/xen/common/shadow.c	Fri Jun 18 14:46:29 2004 +0000
    38.3 @@ -233,7 +233,7 @@ void shadow_mode_init(void)
    38.4  {
    38.5  }
    38.6  
    38.7 -int shadow_mode_enable( struct task_struct *p, unsigned int mode )
    38.8 +int shadow_mode_enable( struct domain *p, unsigned int mode )
    38.9  {
   38.10      struct mm_struct *m = &p->mm;
   38.11      struct shadow_status **fptr;
   38.12 @@ -298,7 +298,7 @@ nomem:
   38.13      return -ENOMEM;
   38.14  }
   38.15  
   38.16 -void shadow_mode_disable( struct task_struct *p )
   38.17 +void shadow_mode_disable( struct domain *p )
   38.18  {
   38.19      struct mm_struct *m = &p->mm;
   38.20      struct shadow_status *next;
   38.21 @@ -331,7 +331,7 @@ void shadow_mode_disable( struct task_st
   38.22      kfree( &m->shadow_ht[0] );
   38.23  }
   38.24  
   38.25 -static int shadow_mode_table_op( struct task_struct *p, 
   38.26 +static int shadow_mode_table_op( struct domain *p, 
   38.27  								 dom0_shadow_control_t *sc )
   38.28  {
   38.29      unsigned int op = sc->op;
   38.30 @@ -415,12 +415,9 @@ static int shadow_mode_table_op( struct 
   38.31  					0, bytes);
   38.32  		}
   38.33  
   38.34 -		if (zero)
   38.35 -		{
   38.36 -			/* might as well stop the domain as an optimization. */
   38.37 -			if ( p->state != TASK_STOPPED )
   38.38 -				send_guest_virq(p, VIRQ_STOP);
   38.39 -		}
   38.40 +        /* Might as well stop the domain as an optimization. */
   38.41 +		if ( zero )
   38.42 +            domain_controller_pause(p);
   38.43  
   38.44  		break;
   38.45      }
   38.46 @@ -471,7 +468,7 @@ out:
   38.47      return rc;
   38.48  }
   38.49  
   38.50 -int shadow_mode_control( struct task_struct *p, dom0_shadow_control_t *sc )
   38.51 +int shadow_mode_control( struct domain *p, dom0_shadow_control_t *sc )
   38.52  {
   38.53      unsigned int cmd = sc->op;
   38.54      int rc = 0;
    39.1 --- a/xen/common/slab.c	Thu Jun 17 16:33:33 2004 +0000
    39.2 +++ b/xen/common/slab.c	Fri Jun 18 14:46:29 2004 +0000
    39.3 @@ -1112,11 +1112,6 @@ static int kmem_cache_grow (kmem_cache_t
    39.4  	if (flags & SLAB_NO_GROW)
    39.5  		return 0;
    39.6  
    39.7 -#if 0
    39.8 -	if (in_interrupt() && (flags & SLAB_LEVEL_MASK) != SLAB_ATOMIC)
    39.9 -		BUG();
   39.10 -#endif
   39.11 -
   39.12  	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
   39.13  	local_flags = (flags & SLAB_LEVEL_MASK);
   39.14  	if (local_flags == SLAB_ATOMIC)
    40.1 --- a/xen/common/softirq.c	Thu Jun 17 16:33:33 2004 +0000
    40.2 +++ b/xen/common/softirq.c	Fri Jun 18 14:46:29 2004 +0000
    40.3 @@ -1,13 +1,12 @@
    40.4  /******************************************************************************
    40.5   * common/softirq.c
    40.6   * 
    40.7 - * Modified from the Linux original. Softirqs in Xen are only executed in
    40.8 - * an outermost activation (e.g., never within an interrupt activation).
    40.9 - * This simplifies some things and generally seems a good thing.
   40.10 + * Softirqs in Xen are only executed in an outermost activation (e.g., never 
   40.11 + * within an interrupt activation). This simplifies some things and generally 
   40.12 + * seems a good thing.
   40.13   * 
   40.14   * Copyright (c) 2003, K A Fraser
   40.15 - * 
   40.16 - * Copyright (C) 1992 Linus Torvalds
   40.17 + * Copyright (c) 1992, Linus Torvalds
   40.18   */
   40.19  
   40.20  #include <xen/config.h>
   40.21 @@ -18,35 +17,24 @@
   40.22  
   40.23  irq_cpustat_t irq_stat[NR_CPUS];
   40.24  
   40.25 -static struct softirq_action softirq_vec[32] __cacheline_aligned;
   40.26 +static softirq_handler softirq_handlers[NR_SOFTIRQS] __cacheline_aligned;
   40.27  
   40.28  asmlinkage void do_softirq()
   40.29  {
   40.30      unsigned int pending, cpu = smp_processor_id();
   40.31 -    struct softirq_action *h;
   40.32 -
   40.33 -    if ( unlikely(in_interrupt()) )
   40.34 -        BUG();
   40.35 -
   40.36 -    /*
   40.37 -     * XEN: This isn't real mutual-exclusion: it just ensures that in_softirq()
   40.38 -     * and in_interrupt() are both TRUE, allowing checks for erroneous reentry.
   40.39 -     */
   40.40 -    cpu_bh_disable(cpu);
   40.41 +    softirq_handler *h;
   40.42  
   40.43      while ( (pending = xchg(&softirq_pending(cpu), 0)) != 0 )
   40.44      {
   40.45 -        h = softirq_vec;
   40.46 +        h = softirq_handlers;
   40.47          while ( pending )
   40.48          {
   40.49              if ( pending & 1 )
   40.50 -                h->action(h);
   40.51 +                (*h)();
   40.52              h++;
   40.53              pending >>= 1;
   40.54          }
   40.55      }
   40.56 -
   40.57 -    cpu_bh_enable(cpu);
   40.58  }
   40.59  
   40.60  inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
   40.61 @@ -63,140 +51,7 @@ void raise_softirq(unsigned int nr)
   40.62      __cpu_raise_softirq(smp_processor_id(), nr);
   40.63  }
   40.64  
   40.65 -void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
   40.66 -{
   40.67 -    softirq_vec[nr].data = data;
   40.68 -    softirq_vec[nr].action = action;
   40.69 -}
   40.70 -
   40.71 -
   40.72 -/* Tasklets */
   40.73 -
   40.74 -struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
   40.75 -struct tasklet_head tasklet_hi_vec[NR_CPUS] __cacheline_aligned;
   40.76 -
   40.77 -void __tasklet_schedule(struct tasklet_struct *t)
   40.78 -{
   40.79 -    int cpu = smp_processor_id();
   40.80 -    unsigned long flags;
   40.81 -
   40.82 -    local_irq_save(flags);
   40.83 -    t->next = tasklet_vec[cpu].list;
   40.84 -    tasklet_vec[cpu].list = t;
   40.85 -    cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
   40.86 -    local_irq_restore(flags);
   40.87 -}
   40.88 -
   40.89 -void __tasklet_hi_schedule(struct tasklet_struct *t)
   40.90 -{
   40.91 -    int cpu = smp_processor_id();
   40.92 -    unsigned long flags;
   40.93 -
   40.94 -    local_irq_save(flags);
   40.95 -    t->next = tasklet_hi_vec[cpu].list;
   40.96 -    tasklet_hi_vec[cpu].list = t;
   40.97 -    cpu_raise_softirq(cpu, HI_SOFTIRQ);
   40.98 -    local_irq_restore(flags);
   40.99 -}
  40.100 -
  40.101 -static void tasklet_action(struct softirq_action *a)
  40.102 +void open_softirq(int nr, softirq_handler handler)
  40.103  {
  40.104 -    int cpu = smp_processor_id();
  40.105 -    struct tasklet_struct *list;
  40.106 -
  40.107 -    local_irq_disable();
  40.108 -    list = tasklet_vec[cpu].list;
  40.109 -    tasklet_vec[cpu].list = NULL;
  40.110 -    local_irq_enable();
  40.111 -
  40.112 -    while ( list != NULL )
  40.113 -    {
  40.114 -        struct tasklet_struct *t = list;
  40.115 -
  40.116 -        list = list->next;
  40.117 -
  40.118 -        if ( likely(tasklet_trylock(t)) )
  40.119 -        {
  40.120 -            if ( likely(!atomic_read(&t->count)) )
  40.121 -            {
  40.122 -                if ( unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, 
  40.123 -                                                  &t->state)) )
  40.124 -                    BUG();
  40.125 -                t->func(t->data);
  40.126 -            }
  40.127 -            tasklet_unlock(t);
  40.128 -            continue;
  40.129 -        }
  40.130 -
  40.131 -        local_irq_disable();
  40.132 -        t->next = tasklet_vec[cpu].list;
  40.133 -        tasklet_vec[cpu].list = t;
  40.134 -        __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
  40.135 -        local_irq_enable();
  40.136 -    }
  40.137 +    softirq_handlers[nr] = handler;
  40.138  }
  40.139 -
  40.140 -static void tasklet_hi_action(struct softirq_action *a)
  40.141 -{
  40.142 -    int cpu = smp_processor_id();
  40.143 -    struct tasklet_struct *list;
  40.144 -
  40.145 -    local_irq_disable();
  40.146 -    list = tasklet_hi_vec[cpu].list;
  40.147 -    tasklet_hi_vec[cpu].list = NULL;
  40.148 -    local_irq_enable();
  40.149 -
  40.150 -    while ( list != NULL )
  40.151 -    {
  40.152 -        struct tasklet_struct *t = list;
  40.153 -
  40.154 -        list = list->next;
  40.155 -
  40.156 -        if ( likely(tasklet_trylock(t)) )
  40.157 -        {
  40.158 -            if ( likely(!atomic_read(&t->count)) )
  40.159 -            {
  40.160 -                if ( unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, 
  40.161 -                                                  &t->state)) )
  40.162 -                    BUG();
  40.163 -                t->func(t->data);
  40.164 -            }
  40.165 -            tasklet_unlock(t);
  40.166 -            continue;
  40.167 -        }
  40.168 -
  40.169 -        local_irq_disable();
  40.170 -        t->next = tasklet_hi_vec[cpu].list;
  40.171 -        tasklet_hi_vec[cpu].list = t;
  40.172 -        __cpu_raise_softirq(cpu, HI_SOFTIRQ);
  40.173 -        local_irq_enable();
  40.174 -    }
  40.175 -}
  40.176 -
  40.177 -
  40.178 -void tasklet_init(struct tasklet_struct *t,
  40.179 -		  void (*func)(unsigned long), unsigned long data)
  40.180 -{
  40.181 -    t->next = NULL;
  40.182 -    t->state = 0;
  40.183 -    atomic_set(&t->count, 0);
  40.184 -    t->func = func;
  40.185 -    t->data = data;
  40.186 -}
  40.187 -
  40.188 -void tasklet_kill(struct tasklet_struct *t)
  40.189 -{
  40.190 -    if ( in_interrupt() )
  40.191 -        BUG();
  40.192 -    while ( test_and_set_bit(TASKLET_STATE_SCHED, &t->state) )
  40.193 -        while ( test_bit(TASKLET_STATE_SCHED, &t->state) )
  40.194 -            do_softirq();
  40.195 -    tasklet_unlock_wait(t);
  40.196 -    clear_bit(TASKLET_STATE_SCHED, &t->state);
  40.197 -}
  40.198 -
  40.199 -void __init softirq_init()
  40.200 -{
  40.201 -    open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
  40.202 -    open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
  40.203 -}
    41.1 --- a/xen/common/trace.c	Thu Jun 17 16:33:33 2004 +0000
    41.2 +++ b/xen/common/trace.c	Fri Jun 18 14:46:29 2004 +0000
    41.3 @@ -48,7 +48,7 @@ void init_trace_bufs(void)
    41.4      unsigned long nr_pages;
    41.5      char         *rawbuf;
    41.6      struct t_buf *buf;
    41.7 -    struct task_struct *dom0;
    41.8 +    struct domain *dom0;
    41.9      
   41.10      if ( opt_tbuf_size == 0 )
   41.11      {
   41.12 @@ -72,7 +72,7 @@ void init_trace_bufs(void)
   41.13      for( i = 0; i < nr_pages; i++)
   41.14          SHARE_PFN_WITH_DOMAIN(virt_to_page(rawbuf+(i*PAGE_SIZE)), dom0);
   41.15      
   41.16 -    put_task_struct(dom0);
   41.17 +    put_domain(dom0);
   41.18  
   41.19      for ( i = 0; i < smp_num_cpus; i++ )
   41.20      {
    42.1 --- a/xen/drivers/char/console.c	Thu Jun 17 16:33:33 2004 +0000
    42.2 +++ b/xen/drivers/char/console.c	Fri Jun 18 14:46:29 2004 +0000
    42.3 @@ -243,7 +243,7 @@ static void switch_serial_input(void)
    42.4  static void __serial_rx(unsigned char c, struct pt_regs *regs)
    42.5  {
    42.6      key_handler *handler;
    42.7 -    struct task_struct *p;
    42.8 +    struct domain *p;
    42.9  
   42.10      if ( xen_rx )
   42.11      {
   42.12 @@ -257,7 +257,7 @@ static void __serial_rx(unsigned char c,
   42.13          {
   42.14              p = find_domain_by_id(0); /* only DOM0 reads the serial buffer */
   42.15              send_guest_virq(p, VIRQ_CONSOLE);
   42.16 -            put_task_struct(p);
   42.17 +            put_domain(p);
   42.18          }
   42.19      }
   42.20  }
   42.21 @@ -445,7 +445,7 @@ long do_console_write(char *str, unsigne
   42.22  
   42.23      return 0;
   42.24  #else
   42.25 -    if ( !test_and_set_bit(PF_CONSOLEWRITEBUG, &current->flags) )
   42.26 +    if ( !test_and_set_bit(DF_CONSOLEWRITEBUG, &current->flags) )
   42.27      {
   42.28          printk("DOM%u is attempting to use the deprecated "
   42.29                 "HYPERVISOR_console_write() interface.\n", current->domain);
    43.1 --- a/xen/drivers/char/keyboard.c	Thu Jun 17 16:33:33 2004 +0000
    43.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    43.3 @@ -1,254 +0,0 @@
    43.4 -/******************************************************************************
    43.5 - * keyboard.c
    43.6 - * 
    43.7 - * Driver for IBM PC AT- and PS/2-compatible keyboards.
    43.8 - * 
    43.9 - * This file contains portions of code from Linux.
   43.10 - */
   43.11 -
   43.12 -#include <asm/io.h>
   43.13 -#include <asm/irq.h>
   43.14 -#include <xen/sched.h>
   43.15 -#include <xen/keyhandler.h>  
   43.16 -#include <hypervisor-ifs/kbd.h>
   43.17 -#include <xen/event.h>
   43.18 -#include <xen/console.h>
   43.19 -#include <xen/interrupt.h>
   43.20 -
   43.21 -/* Hash-defines torn from <xen/pc_keyb.h> and <asm/keyboard.h> */
   43.22 -
   43.23 -#define KBD_STATUS_REG	     0x64 /* Status register (R) */
   43.24 -#define KBD_CNTL_REG	     0x64 /* Controller command register (W) */
   43.25 -#define KBD_DATA_REG	     0x60 /* Keyboard data register (R/W) */
   43.26 -
   43.27 -/* register status bits */
   43.28 -#define KBD_STAT_OBF 	     0x01 /* Keyboard output buffer full */
   43.29 -#define KBD_STAT_IBF 	     0x02 /* Keyboard input buffer full */
   43.30 -#define KBD_STAT_SELFTEST    0x04 /* Self test successful */
   43.31 -#define KBD_STAT_CMD	     0x08 /* Last write was a command write (0=data) */
   43.32 -
   43.33 -#define KBD_STAT_UNLOCKED    0x10 /* Zero if keyboard locked */
   43.34 -#define KBD_STAT_MOUSE_OBF   0x20 /* Mouse output buffer full */
   43.35 -#define KBD_STAT_GTO 	     0x40 /* General receive/xmit timeout */
   43.36 -#define KBD_STAT_PERR 	     0x80 /* Parity error */
   43.37 -
   43.38 -#define kbd_read_input() inb(KBD_DATA_REG)
   43.39 -#define kbd_read_status() inb(KBD_STATUS_REG)
   43.40 -
   43.41 -#define KEYBOARD_IRQ  1
   43.42 -#define AUX_IRQ      12
   43.43 -
   43.44 -#define kbd_write_output(val) outb(val, KBD_DATA_REG)
   43.45 -#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
   43.46 -
   43.47 -#ifdef CONFIG_XEN_ATTENTION_KEY
   43.48 -
   43.49 -static int xen_attention_key_down = 0;
   43.50 -#define XEN_ATTENTION_KEY 0x46 /* Scroll Lock */
   43.51 -#define KBD_SCANCODE_KEYUP_MASK 0x80
   43.52 -
   43.53 -/* Simple scancode-to-key mappings for internal Xen use. */
   43.54 -
   43.55 -static unsigned char keymap_normal[] =
   43.56 -{
   43.57 -    0 , 0 ,'1','2', '3','4','5','6',    '7','8','9','0', '-','=','\b','\t',
   43.58 -    'q','w','e','r', 't','y','u','i',    'o','p','[',']','\r', 0 ,'a','s',
   43.59 -    'd','f','g','h', 'j','k','l',';',   '\'','`', 0 ,'#', 'z','x','c','v',
   43.60 -    'b','n','m',',', '.','/', 0 , 0 ,     0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,
   43.61 -
   43.62 -    0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,     0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,
   43.63 -    0 , 0 , 0 , 0 ,  0 , 0 ,'\\', 0 ,     0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,
   43.64 -    0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,     0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,
   43.65 -    0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,     0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 
   43.66 -};
   43.67 -
   43.68 -static unsigned char keymap_shift[] =
   43.69 -{
   43.70 -    0 , 0 ,'!','"', '#','$','%','^',    '&','*','(',')', '_','+','\b','\t',
   43.71 -    'Q','W','E','R', 'T','Y','U','I',    'O','P','{','}','\r', 0 ,'A','S',
   43.72 -    'D','F','G','H', 'J','K','L',':',    '@', 0 , 0 ,'~', 'Z','X','C','V',
   43.73 -    'B','N','M','<', '>','?', 0 , 0 ,     0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,
   43.74 -
   43.75 -    0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,     0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,
   43.76 -    0 , 0 , 0 , 0 ,  0 , 0 ,'|', 0 ,     0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,
   43.77 -    0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,     0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,
   43.78 -    0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 ,     0 , 0 , 0 , 0 ,  0 , 0 , 0 , 0 
   43.79 -};
   43.80 -
   43.81 -
   43.82 -static int keyboard_shift = 0;
   43.83 -
   43.84 -static unsigned char convert_scancode (unsigned char scancode)
   43.85 -{
   43.86 -    unsigned char value = 0;
   43.87 -
   43.88 -    switch ( scancode ) 
   43.89 -    {
   43.90 -
   43.91 -    case 0xaa: /* shift (left) UP */
   43.92 -    case 0xb6: /* shift (right) UP */
   43.93 -	keyboard_shift = 0;
   43.94 -	break;
   43.95 -
   43.96 -    case 0x2a: /* shift (left) DOWN */
   43.97 -    case 0x36: /* shift (right) DOWN */
   43.98 -	keyboard_shift = 1;
   43.99 -	break;
  43.100 -
  43.101 -    default:
  43.102 -        /* Only process key-up events */
  43.103 -        if(!(scancode & KBD_SCANCODE_KEYUP_MASK)) 
  43.104 -            break; 
  43.105 -	scancode = scancode & ~KBD_SCANCODE_KEYUP_MASK; 
  43.106 -	if (keyboard_shift)
  43.107 -	    value = keymap_shift[scancode];
  43.108 -	else
  43.109 -	    value = keymap_normal[scancode];
  43.110 -        break;
  43.111 -    }
  43.112 -
  43.113 -    return value;
  43.114 -}
  43.115 -
  43.116 -#endif /* CONFIG_XEN_ATTENTION_KEY */
  43.117 -
  43.118 -
  43.119 -/* We store kbd events awaiting receive by a guest OS in a ring buffer. */
  43.120 -#define KBD_RING_SIZE 64 
  43.121 -static int kbd_ring[KBD_RING_SIZE]; 
  43.122 -static int kbd_ring_prod = 0;
  43.123 -static int kbd_ring_cons = 0;
  43.124 -
  43.125 -#define KBD_RING_INC(_i) (((_i)+1) & (KBD_RING_SIZE-1))
  43.126 -#define KBD_RING_FULL    (KBD_RING_INC(kbd_ring_prod) == kbd_ring_cons)
  43.127 -#define KBD_RING_EMPTY   (kbd_ring_prod == kbd_ring_cons)
  43.128 -
  43.129 -static void kbd_ring_push(unsigned char status, unsigned char scancode)
  43.130 -{
  43.131 -    if ( KBD_RING_FULL )
  43.132 -        return;
  43.133 -    
  43.134 -    kbd_ring[kbd_ring_prod] = KBD_CODE(scancode, status);
  43.135 -    kbd_ring_prod = KBD_RING_INC(kbd_ring_prod);
  43.136 -}
  43.137 -
  43.138 -static int kbd_ring_pop(void)
  43.139 -{
  43.140 -    int ret;
  43.141 -
  43.142 -    if ( KBD_RING_EMPTY )
  43.143 -    {
  43.144 -        /* Read directly from controller - no events waiting in ring. */
  43.145 -        unsigned char status = kbd_read_status();
  43.146 -        unsigned char scancode = kbd_read_input(); 
  43.147 -        ret = KBD_CODE(scancode, status);
  43.148 -    }
  43.149 -    else
  43.150 -    {
  43.151 -        ret = kbd_ring[kbd_ring_cons];
  43.152 -        kbd_ring_cons = KBD_RING_INC(kbd_ring_cons);
  43.153 -    }
  43.154 -
  43.155 -    return ret;
  43.156 -}
  43.157 -
  43.158 -
  43.159 -/*
  43.160 - * NB. Lock is essential as there are two distinct interrupts (keyboard + aux).
  43.161 - * Also interrupts may disturb guest OS actions.
  43.162 - */
  43.163 -static spinlock_t kbd_lock;
  43.164 -
  43.165 -long do_kbd_op(unsigned char op, unsigned char val)
  43.166 -{
  43.167 -    unsigned long flags;
  43.168 -    long ret = -EINVAL;
  43.169 -
  43.170 -    if ( !CONSOLE_ISOWNER(current) ) 
  43.171 -        return -EPERM;  
  43.172 -
  43.173 -    spin_lock_irqsave(&kbd_lock, flags);
  43.174 -
  43.175 -    switch ( op )
  43.176 -    {
  43.177 -    case KBD_OP_WRITEOUTPUT:
  43.178 -        kbd_write_output(val);
  43.179 -        ret = 0L;
  43.180 -        break;
  43.181 -    case KBD_OP_WRITECOMMAND:
  43.182 -        kbd_write_command(val);
  43.183 -        ret = 0L;
  43.184 -        break;
  43.185 -    case KBD_OP_READ:
  43.186 -        ret = kbd_ring_pop();
  43.187 -        break;
  43.188 -    }
  43.189 -
  43.190 -    spin_unlock_irqrestore(&kbd_lock, flags);
  43.191 -
  43.192 -    return ret;
  43.193 -}
  43.194 -
  43.195 -
  43.196 -static void keyboard_interrupt(int irq, void *dev_id, struct pt_regs *regs)
  43.197 -{
  43.198 -    unsigned char status=0, scancode;
  43.199 -    unsigned int work = 1000;
  43.200 -    unsigned long flags;
  43.201 -    struct task_struct *p = CONSOLE_OWNER;
  43.202 -
  43.203 -    spin_lock_irqsave(&kbd_lock, flags);
  43.204 -
  43.205 -    while ( (--work > 0) && ((status = kbd_read_status()) & KBD_STAT_OBF) )
  43.206 -    {
  43.207 -        scancode = kbd_read_input();
  43.208 -      
  43.209 -#ifdef CONFIG_XEN_ATTENTION_KEY
  43.210 -        if ( !(status & (KBD_STAT_GTO | KBD_STAT_PERR | KBD_STAT_MOUSE_OBF)) )
  43.211 -        {
  43.212 -            if ( (scancode & ~KBD_SCANCODE_KEYUP_MASK) == XEN_ATTENTION_KEY )
  43.213 -            {
  43.214 -                xen_attention_key_down = !(scancode & KBD_SCANCODE_KEYUP_MASK);
  43.215 -		continue;
  43.216 -            } 
  43.217 -            else if ( xen_attention_key_down )
  43.218 -            {
  43.219 -                key_handler *handler; 
  43.220 -                unsigned char key;
  43.221 -                spin_unlock_irqrestore(&kbd_lock, flags);
  43.222 -                key = convert_scancode(scancode); 
  43.223 -                if ( key && (handler = get_key_handler(key)) )
  43.224 -                    (*handler)(key, dev_id, regs); 
  43.225 -                spin_lock_irqsave(&kbd_lock, flags);
  43.226 -                continue;
  43.227 -            }
  43.228 -        }
  43.229 -#endif
  43.230 -      
  43.231 -        if ( p != NULL )
  43.232 -        {
  43.233 -            kbd_ring_push(status, scancode);
  43.234 -            send_guest_virq(p, VIRQ_PS2);
  43.235 -        }
  43.236 -    }
  43.237 -    
  43.238 -    if ( !work )
  43.239 -        printk(KERN_ERR "xen_keyb: controller jammed (0x%02X).\n", status);
  43.240 -
  43.241 -    spin_unlock_irqrestore(&kbd_lock, flags);
  43.242 -
  43.243 -    if ( p != NULL )
  43.244 -        put_task_struct(p);
  43.245 -}
  43.246 -    
  43.247 -    
  43.248 -static struct irqaction keyb = { keyboard_interrupt, "Keyboard",   NULL };
  43.249 -static struct irqaction aux  = { keyboard_interrupt, "PS/2 Mouse", NULL };
  43.250 -
  43.251 -void initialize_keyboard()
  43.252 -{
  43.253 -    spin_lock_init(&kbd_lock);
  43.254 -    (void)setup_irq(KEYBOARD_IRQ, &keyb);
  43.255 -    (void)setup_irq(AUX_IRQ,      &aux);
  43.256 -}
  43.257 -
    44.1 --- a/xen/include/asm-x86/hardirq.h	Thu Jun 17 16:33:33 2004 +0000
    44.2 +++ b/xen/include/asm-x86/hardirq.h	Fri Jun 18 14:46:29 2004 +0000
    44.3 @@ -8,20 +8,12 @@
    44.4  typedef struct {
    44.5  	unsigned int __softirq_pending;
    44.6  	unsigned int __local_irq_count;
    44.7 -	unsigned int __local_bh_count;
    44.8  	unsigned int __nmi_count;
    44.9  	unsigned long idle_timestamp;
   44.10  } ____cacheline_aligned irq_cpustat_t;
   44.11  
   44.12  #include <xen/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
   44.13  
   44.14 -/*
   44.15 - * Are we in an interrupt context? Either doing bottom half
   44.16 - * or hardware interrupt processing?
   44.17 - */
   44.18 -#define in_interrupt() ({ int __cpu = smp_processor_id(); \
   44.19 -	(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
   44.20 -
   44.21  #define in_irq() (local_irq_count(smp_processor_id()) != 0)
   44.22  
   44.23  #define irq_enter(cpu, irq)	(local_irq_count(cpu)++)
    45.1 --- a/xen/include/asm-x86/i387.h	Thu Jun 17 16:33:33 2004 +0000
    45.2 +++ b/xen/include/asm-x86/i387.h	Fri Jun 18 14:46:29 2004 +0000
    45.3 @@ -15,16 +15,16 @@
    45.4  #include <asm/processor.h>
    45.5  
    45.6  extern void init_fpu(void);
    45.7 -extern void save_init_fpu( struct task_struct *tsk );
    45.8 -extern void restore_fpu( struct task_struct *tsk );
    45.9 +extern void save_init_fpu( struct domain *tsk );
   45.10 +extern void restore_fpu( struct domain *tsk );
   45.11  
   45.12  #define unlazy_fpu( tsk ) do { \
   45.13 -	if ( test_bit(PF_USEDFPU, &tsk->flags) ) \
   45.14 +	if ( test_bit(DF_USEDFPU, &tsk->flags) ) \
   45.15  		save_init_fpu( tsk ); \
   45.16  } while (0)
   45.17  
   45.18  #define clear_fpu( tsk ) do { \
   45.19 -	if ( test_and_clear_bit(PF_USEDFPU, &tsk->flags) ) { \
   45.20 +	if ( test_and_clear_bit(DF_USEDFPU, &tsk->flags) ) { \
   45.21  		asm volatile("fwait"); \
   45.22  		stts(); \
   45.23  	} \
    46.1 --- a/xen/include/asm-x86/ldt.h	Thu Jun 17 16:33:33 2004 +0000
    46.2 +++ b/xen/include/asm-x86/ldt.h	Fri Jun 18 14:46:29 2004 +0000
    46.3 @@ -3,7 +3,7 @@
    46.4  
    46.5  #ifndef __ASSEMBLY__
    46.6  
    46.7 -static inline void load_LDT(struct task_struct *p)
    46.8 +static inline void load_LDT(struct domain *p)
    46.9  {
   46.10      unsigned int cpu;
   46.11      struct desc_struct *desc;
    47.1 --- a/xen/include/asm-x86/pda.h	Thu Jun 17 16:33:33 2004 +0000
    47.2 +++ b/xen/include/asm-x86/pda.h	Fri Jun 18 14:46:29 2004 +0000
    47.3 @@ -9,7 +9,7 @@ struct x8664_pda {
    47.4  	unsigned long kernelstack;  /* TOS for current process */ 
    47.5  	unsigned long oldrsp; 	    /* user rsp for system call */
    47.6  	unsigned long irqrsp;	    /* Old rsp for interrupts. */ 
    47.7 -	struct task_struct *pcurrent;	/* Current process */
    47.8 +	struct domain *pcurrent;	/* Current process */
    47.9          int irqcount;		    /* Irq nesting counter. Starts with -1 */  	
   47.10  	int cpunumber;		    /* Logical CPU number */
   47.11  	char *irqstackptr;	/* top of irqstack */
    48.1 --- a/xen/include/asm-x86/processor.h	Thu Jun 17 16:33:33 2004 +0000
    48.2 +++ b/xen/include/asm-x86/processor.h	Fri Jun 18 14:46:29 2004 +0000
    48.3 @@ -17,7 +17,7 @@
    48.4  #include <xen/spinlock.h>
    48.5  #include <hypervisor-ifs/hypervisor-if.h>
    48.6  
    48.7 -struct task_struct;
    48.8 +struct domain;
    48.9  
   48.10  /*
   48.11   * Default implementation of macro that returns current
   48.12 @@ -233,8 +233,7 @@ static inline void clear_in_cr4 (unsigne
   48.13  /*
   48.14   * Size of io_bitmap in longwords:
   48.15   * For Xen we support the full 8kbyte IO bitmap but use the io_bitmap_sel field
   48.16 - * of the task_struct to avoid a full 8kbyte copy when switching to / from
   48.17 - * domains with bits cleared.
   48.18 + * to avoid a full 8kbyte copy when switching to domains with bits cleared.
   48.19   */
   48.20  #define IO_BITMAP_SIZE	2048
   48.21  #define IO_BITMAP_BYTES (IO_BITMAP_SIZE * 4)
   48.22 @@ -330,7 +329,7 @@ extern struct desc_struct *idt_tables[];
   48.23              &((_p)->fast_trap_desc), 8))
   48.24  #endif
   48.25  
   48.26 -long set_fast_trap(struct task_struct *p, int idx);
   48.27 +long set_fast_trap(struct domain *p, int idx);
   48.28  
   48.29  #define INIT_THREAD  {						\
   48.30  	0, 0,		      		       			\
   48.31 @@ -405,11 +404,11 @@ static inline void write_ptbase(struct m
   48.32  #define GET_GDT_ENTRIES(_p)     ((*(u16 *)((_p)->mm.gdt + 0)))
   48.33  #define GET_GDT_ADDRESS(_p)     ((*(unsigned long *)((_p)->mm.gdt + 2)))
   48.34  
   48.35 -long set_gdt(struct task_struct *p, 
   48.36 +long set_gdt(struct domain *p, 
   48.37               unsigned long *frames, 
   48.38               unsigned int entries);
   48.39  
   48.40 -long set_debugreg(struct task_struct *p, int reg, unsigned long value);
   48.41 +long set_debugreg(struct domain *p, int reg, unsigned long value);
   48.42  
   48.43  struct microcode {
   48.44      unsigned int hdrver;
    49.1 --- a/xen/include/asm-x86/softirq.h	Thu Jun 17 16:33:33 2004 +0000
    49.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    49.3 @@ -1,17 +0,0 @@
    49.4 -#ifndef __ASM_SOFTIRQ_H
    49.5 -#define __ASM_SOFTIRQ_H
    49.6 -
    49.7 -#include <asm/atomic.h>
    49.8 -#include <asm/hardirq.h>
    49.9 -
   49.10 -#define cpu_bh_enable(cpu) \
   49.11 -		do { barrier(); local_bh_count(cpu)--; } while (0)
   49.12 -#define cpu_bh_disable(cpu) \
   49.13 -		do { local_bh_count(cpu)++; barrier(); } while (0)
   49.14 -
   49.15 -#define local_bh_disable()  cpu_bh_disable(smp_processor_id())
   49.16 -#define local_bh_enable()   cpu_bh_enable(smp_processor_id())
   49.17 -
   49.18 -#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
   49.19 -
   49.20 -#endif	/* __ASM_SOFTIRQ_H */
    50.1 --- a/xen/include/asm-x86/x86_32/current.h	Thu Jun 17 16:33:33 2004 +0000
    50.2 +++ b/xen/include/asm-x86/x86_32/current.h	Fri Jun 18 14:46:29 2004 +0000
    50.3 @@ -1,14 +1,14 @@
    50.4  #ifndef _X86_CURRENT_H
    50.5  #define _X86_CURRENT_H
    50.6  
    50.7 -struct task_struct;
    50.8 +struct domain;
    50.9  
   50.10  #define STACK_RESERVED \
   50.11 -    (sizeof(execution_context_t) + sizeof(struct task_struct *))
   50.12 +    (sizeof(execution_context_t) + sizeof(struct domain *))
   50.13  
   50.14 -static inline struct task_struct * get_current(void)
   50.15 +static inline struct domain * get_current(void)
   50.16  {
   50.17 -    struct task_struct *current;
   50.18 +    struct domain *current;
   50.19      __asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0" 
   50.20                : "=r" (current) : "0" (STACK_SIZE-4) );
   50.21      return current;
   50.22 @@ -16,7 +16,7 @@ static inline struct task_struct * get_c
   50.23   
   50.24  #define current get_current()
   50.25  
   50.26 -static inline void set_current(struct task_struct *p)
   50.27 +static inline void set_current(struct domain *p)
   50.28  {
   50.29      __asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)" 
   50.30                : : "r" (STACK_SIZE-4), "r" (p) );    
    51.1 --- a/xen/include/asm-x86/x86_64/current.h	Thu Jun 17 16:33:33 2004 +0000
    51.2 +++ b/xen/include/asm-x86/x86_64/current.h	Fri Jun 18 14:46:29 2004 +0000
    51.3 @@ -2,23 +2,23 @@
    51.4  #define _X86_64_CURRENT_H
    51.5  
    51.6  #if !defined(__ASSEMBLY__)
    51.7 -struct task_struct;
    51.8 +struct domain;
    51.9  
   51.10  #include <asm/pda.h>
   51.11  
   51.12  #define STACK_RESERVED \
   51.13      (sizeof(execution_context_t))
   51.14  
   51.15 -static inline struct task_struct * get_current(void)
   51.16 +static inline struct domain * get_current(void)
   51.17  {
   51.18 -    struct task_struct *current;
   51.19 +    struct domain *current;
   51.20      current = read_pda(pcurrent);
   51.21      return current;
   51.22  }
   51.23   
   51.24  #define current get_current()
   51.25  
   51.26 -static inline void set_current(struct task_struct *p)
   51.27 +static inline void set_current(struct domain *p)
   51.28  {
   51.29      write_pda(pcurrent,p);
   51.30  }
    52.1 --- a/xen/include/asm-x86/x86_64/ldt.h	Thu Jun 17 16:33:33 2004 +0000
    52.2 +++ b/xen/include/asm-x86/x86_64/ldt.h	Fri Jun 18 14:46:29 2004 +0000
    52.3 @@ -3,7 +3,7 @@
    52.4  
    52.5  #ifndef __ASSEMBLY__
    52.6  
    52.7 -static inline void load_LDT(struct task_struct *p)
    52.8 +static inline void load_LDT(struct domain *p)
    52.9  {
   52.10      unsigned long ents;
   52.11  
    53.1 --- a/xen/include/hypervisor-ifs/dom0_ops.h	Thu Jun 17 16:33:33 2004 +0000
    53.2 +++ b/xen/include/hypervisor-ifs/dom0_ops.h	Fri Jun 18 14:46:29 2004 +0000
    53.3 @@ -76,10 +76,7 @@ typedef struct {
    53.4  typedef struct {
    53.5      /* IN parameters. */
    53.6      domid_t domain;                   /*  0 */
    53.7 -    /* hack to indicate that you want to wait for other domain -- replace
    53.8 -       with proper sychronous stop soon! */
    53.9 -    u32     sync;                     /*  4 */
   53.10 -} PACKED dom0_stopdomain_t; /* 8 bytes */
   53.11 +} PACKED dom0_stopdomain_t; /* 4 bytes */
   53.12  
   53.13  #define DOM0_GETDOMAININFO    12
   53.14  typedef struct {
   53.15 @@ -87,7 +84,7 @@ typedef struct {
   53.16      domid_t  domain;                  /*  0 */ /* NB. IN/OUT variable. */
   53.17      /* OUT variables. */
   53.18  #define DOMSTATE_CRASHED     0 /* Crashed domain; frozen for postmortem.     */
   53.19 -#define DOMSTATE_STOPPED     1 /* Domain voluntarily halted it execution.    */
   53.20 +#define DOMSTATE_SUSPENDED   1 /* Domain voluntarily halted it execution.    */
   53.21  #define DOMSTATE_PAUSED      2 /* Currently paused (forced non-schedulable). */
   53.22  #define DOMSTATE_BLOCKED     3 /* Currently blocked pending a wake-up event. */
   53.23  #define DOMSTATE_RUNNABLE    4 /* Currently runnable.                        */
   53.24 @@ -109,8 +106,7 @@ typedef struct {
   53.25      memory_t shared_info_frame;       /* 48: MFN of shared_info struct */
   53.26      MEMORY_PADDING;
   53.27      u64      cpu_time;                /* 56 */
   53.28 -    u32      hyp_events;              /* 64 */
   53.29 -} PACKED dom0_getdomaininfo_t; /* 68 bytes */
   53.30 +} PACKED dom0_getdomaininfo_t; /* 64 bytes */
   53.31  
   53.32  #define DOM0_BUILDDOMAIN      13
   53.33  typedef struct {
    54.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h	Thu Jun 17 16:33:33 2004 +0000
    54.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h	Fri Jun 18 14:46:29 2004 +0000
    54.3 @@ -38,7 +38,6 @@
    54.4  #define __HYPERVISOR_set_fast_trap        15
    54.5  #define __HYPERVISOR_dom_mem_op           16
    54.6  #define __HYPERVISOR_multicall            17
    54.7 -#define __HYPERVISOR_kbd_op               18
    54.8  #define __HYPERVISOR_update_va_mapping    19
    54.9  #define __HYPERVISOR_set_timer_op         20
   54.10  #define __HYPERVISOR_event_channel_op     21
   54.11 @@ -62,20 +61,12 @@
   54.12   * 
   54.13   * Virtual interrupts that a guest OS may receive from the hypervisor.
   54.14   */
   54.15 -#define VIRQ_BLKDEV     0  /* (OBS) A block device response has been queued. */
   54.16 -#define VIRQ_TIMER      1  /* A timeout has been updated. */
   54.17 -#define VIRQ_DIE        2  /* (OBS) OS is about to be killed. Clean up! */
   54.18 -#define VIRQ_DEBUG      3  /* Request guest to dump debug info (gross!) */
   54.19 -#define VIRQ_NET        4  /* (OBS) There are packets for transmission. */
   54.20 -#define VIRQ_PS2        5  /* (OBS) PS/2 keyboard or mouse event(s) */
   54.21 -#define VIRQ_STOP       6  /* (OBS) Prepare for stopping and pickling */
   54.22 -#define VIRQ_EVTCHN     7  /* Event pending on an event channel */
   54.23 -#define VIRQ_VBD_UPD    8  /* (OBS) Event to signal VBDs should be reprobed */
   54.24 -#define VIRQ_CONSOLE    9  /* (DOM0) bytes received on master console. */
   54.25 -#define VIRQ_PHYSIRQ   10  /* Pending physical IRQs. */
   54.26 -#define VIRQ_MISDIRECT 11  /* Catch-all virtual interrupt. */
   54.27 -#define VIRQ_DOM_EXC   12  /* (DOM0) Exceptional event for some domain. */
   54.28 -#define NR_VIRQS       13
   54.29 +#define VIRQ_MISDIRECT  0  /* Catch-all interrupt for unbound VIRQs.      */
   54.30 +#define VIRQ_TIMER      1  /* Timebase update, and/or requested timeout.  */
   54.31 +#define VIRQ_DEBUG      2  /* Request guest to dump debug info.           */
   54.32 +#define VIRQ_CONSOLE    3  /* (DOM0) bytes received on emergency console. */
   54.33 +#define VIRQ_DOM_EXC    4  /* (DOM0) Exceptional event for some domain.   */
   54.34 +#define NR_VIRQS        5
   54.35  
   54.36  /*
   54.37   * MMU-UPDATE REQUESTS
   54.38 @@ -171,11 +162,11 @@
   54.39  /*
   54.40   * Commands to HYPERVISOR_sched_op().
   54.41   */
   54.42 -#define SCHEDOP_yield           0   /* Give up the CPU voluntarily.      */
   54.43 -#define SCHEDOP_block           1   /* Block until an event is received. */
   54.44 -#define SCHEDOP_stop            4   /* Stop executing this domain.       */
   54.45 +#define SCHEDOP_yield           0   /* Give up the CPU voluntarily.       */
   54.46 +#define SCHEDOP_block           1   /* Block until an event is received.  */
   54.47 +#define SCHEDOP_suspend         2   /* Stop executing this domain.        */
   54.48  #define SCHEDOP_cmdmask       255   /* 8-bit command. */
   54.49 -#define SCHEDOP_reasonshift     8   /* 8-bit stop code. (SCHEDOP_stop only) */
   54.50 +#define SCHEDOP_reasonshift     8   /* 8-bit suspend code. (SCHEDOP_suspend) */
   54.51  
   54.52  /*
   54.53   * Commands to HYPERVISOR_console_io().
    55.1 --- a/xen/include/hypervisor-ifs/kbd.h	Thu Jun 17 16:33:33 2004 +0000
    55.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    55.3 @@ -1,20 +0,0 @@
    55.4 -/******************************************************************************
    55.5 - * kbd.h
    55.6 - *
    55.7 - * PS/2 interface definitions
    55.8 - * Copyright (c) 2003 James Scott, Intel Research Cambridge
    55.9 - */
   55.10 -
   55.11 -#ifndef __HYPERVISOR_KBD_H__
   55.12 -#define __HYPERVISOR_KBD_H__
   55.13 -
   55.14 -			 
   55.15 -#define KBD_OP_WRITEOUTPUT   0
   55.16 -#define KBD_OP_WRITECOMMAND  1
   55.17 -#define KBD_OP_READ          2
   55.18 -
   55.19 -#define KBD_CODE_SCANCODE(_r) ((unsigned char)((_r) & 0xff))
   55.20 -#define KBD_CODE_STATUS(_r) ((unsigned char)(((_r) >> 8) & 0xff))
   55.21 -#define KBD_CODE(_c, _s) ((int)(((_c) & 0xff)  | (((_s) & 0xff) << 8)))
   55.22 -
   55.23 -#endif
    56.1 --- a/xen/include/xen/config.h	Thu Jun 17 16:33:33 2004 +0000
    56.2 +++ b/xen/include/xen/config.h	Fri Jun 18 14:46:29 2004 +0000
    56.3 @@ -40,8 +40,6 @@
    56.4  
    56.5  #ifndef __ASSEMBLY__
    56.6  #include <xen/compiler.h>
    56.7 -extern unsigned int opt_ser_baud;
    56.8 -#define SERIAL_ENABLED (opt_ser_baud != 0)
    56.9  #endif
   56.10  
   56.11  #endif /* __XEN_CONFIG_H__ */
    57.1 --- a/xen/include/xen/console.h	Thu Jun 17 16:33:33 2004 +0000
    57.2 +++ b/xen/include/xen/console.h	Fri Jun 18 14:46:29 2004 +0000
    57.3 @@ -2,8 +2,6 @@
    57.4   * xen/console.h
    57.5   * 
    57.6   * Xen header file concerning console access.
    57.7 - * 
    57.8 - * Copyright (c) 2003 James Scott, Intel Research Cambridge
    57.9   */
   57.10  
   57.11  #ifndef __CONSOLE_H__
   57.12 @@ -13,13 +11,6 @@
   57.13  
   57.14  extern spinlock_t console_lock;
   57.15  
   57.16 -/*
   57.17 - * Ownership of console --- currently hardwired to dom0. This is used to see 
   57.18 - * who gets the PS/2 keyboard/mouse events
   57.19 - */
   57.20 -#define CONSOLE_ISOWNER(p) (p->domain == 0) 
   57.21 -#define CONSOLE_OWNER      (find_domain_by_id(0))
   57.22 -
   57.23  void set_printk_prefix(const char *prefix);
   57.24  
   57.25  #define CONSOLE_RING_CLEAR 1
    58.1 --- a/xen/include/xen/event.h	Thu Jun 17 16:33:33 2004 +0000
    58.2 +++ b/xen/include/xen/event.h	Fri Jun 18 14:46:29 2004 +0000
    58.3 @@ -18,27 +18,10 @@
    58.4   */
    58.5  
    58.6  /* Schedule an asynchronous callback for the specified domain. */
    58.7 -static inline void guest_schedule_to_run(struct task_struct *p)
    58.8 +static inline void guest_async_callback(struct domain *p)
    58.9  {
   58.10 -#ifdef CONFIG_SMP
   58.11 -    unsigned long flags, cpu_mask;
   58.12 -
   58.13 -    spin_lock_irqsave(&schedule_lock[p->processor], flags);
   58.14 -    if ( p->state == TASK_INTERRUPTIBLE )
   58.15 -        __wake_up(p);
   58.16 -    cpu_mask = __reschedule(p);
   58.17 -    if ( p->has_cpu )
   58.18 -        cpu_mask |= 1 << p->processor;
   58.19 -    spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
   58.20 -
   58.21 -    cpu_mask &= ~(1 << smp_processor_id());
   58.22 -    if ( cpu_mask != 0 )
   58.23 -        smp_send_event_check_mask(cpu_mask);
   58.24 -#else
   58.25 -    if ( p->state == TASK_INTERRUPTIBLE )
   58.26 -        wake_up(p);
   58.27 -    reschedule(p);
   58.28 -#endif
   58.29 +    if ( !domain_unblock(p) && p->has_cpu && (p != current) )
   58.30 +        smp_send_event_check_mask(1 << p->processor);
   58.31  }
   58.32  
   58.33  /*
   58.34 @@ -48,7 +31,7 @@ static inline void guest_schedule_to_run
   58.35   * may require explicit memory barriers.
   58.36   */
   58.37  
   58.38 -static inline void evtchn_set_pending(struct task_struct *p, int port)
   58.39 +static inline void evtchn_set_pending(struct domain *p, int port)
   58.40  {
   58.41      shared_info_t *s = p->shared_info;
   58.42      if ( !test_and_set_bit(port,    &s->evtchn_pending[0]) &&
   58.43 @@ -57,11 +40,11 @@ static inline void evtchn_set_pending(st
   58.44      {
   58.45          /* The VCPU pending flag must be set /after/ update to evtchn-pend. */
   58.46          s->vcpu_data[0].evtchn_upcall_pending = 1;
   58.47 -        guest_schedule_to_run(p);
   58.48 +        guest_async_callback(p);
   58.49      }
   58.50  }
   58.51  
   58.52 -static inline void evtchn_set_exception(struct task_struct *p, int port)
   58.53 +static inline void evtchn_set_exception(struct domain *p, int port)
   58.54  {
   58.55      if ( !test_and_set_bit(port, &p->shared_info->evtchn_exception[0]) )
   58.56          evtchn_set_pending(p, port);
   58.57 @@ -72,7 +55,7 @@ static inline void evtchn_set_exception(
   58.58   *  @p:        Domain to which virtual IRQ should be sent
   58.59   *  @virq:     Virtual IRQ number (VIRQ_*)
   58.60   */
   58.61 -static inline void send_guest_virq(struct task_struct *p, int virq)
   58.62 +static inline void send_guest_virq(struct domain *p, int virq)
   58.63  {
   58.64      evtchn_set_pending(p, p->virq_to_evtchn[virq]);
   58.65  }
   58.66 @@ -82,23 +65,13 @@ static inline void send_guest_virq(struc
   58.67   *  @p:        Domain to which physical IRQ should be sent
   58.68   *  @pirq:     Physical IRQ number
   58.69   */
   58.70 -static inline void send_guest_pirq(struct task_struct *p, int pirq)
   58.71 +static inline void send_guest_pirq(struct domain *p, int pirq)
   58.72  {
   58.73      evtchn_set_pending(p, p->pirq_to_evtchn[pirq]);
   58.74  }
   58.75  
   58.76 -
   58.77 -/*
   58.78 - * HYPERVISOR-HANDLED EVENTS
   58.79 - */
   58.80 -
   58.81 -static inline void send_hyp_event(struct task_struct *p, int event)
   58.82 -{
   58.83 -    if ( !test_and_set_bit(event, &p->hyp_events) )
   58.84 -        guest_schedule_to_run(p);
   58.85 -}
   58.86 -
   58.87 -/* Called on return from (architecture-dependent) entry.S. */
   58.88 -void do_hyp_events(void);
   58.89 +#define event_pending(_d)                                     \
   58.90 +    ((_d)->shared_info->vcpu_data[0].evtchn_upcall_pending && \
   58.91 +     !(_d)->shared_info->vcpu_data[0].evtchn_upcall_mask)
   58.92  
   58.93  #endif /* __XEN_EVENT_H__ */
    59.1 --- a/xen/include/xen/interrupt.h	Thu Jun 17 16:33:33 2004 +0000
    59.2 +++ b/xen/include/xen/interrupt.h	Fri Jun 18 14:46:29 2004 +0000
    59.3 @@ -10,160 +10,29 @@
    59.4  #include <asm/atomic.h>
    59.5  #include <asm/ptrace.h>
    59.6  
    59.7 -struct irqaction {
    59.8 +struct irqaction
    59.9 +{
   59.10      void (*handler)(int, void *, struct pt_regs *);
   59.11      const char *name;
   59.12      void *dev_id;
   59.13  };
   59.14  
   59.15  #include <asm/hardirq.h>
   59.16 -#include <asm/softirq.h>
   59.17  
   59.18  enum
   59.19  {
   59.20 -    HI_SOFTIRQ=0,
   59.21 -    AC_TIMER_SOFTIRQ,
   59.22 -    TASKLET_SOFTIRQ,
   59.23 -    NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ
   59.24 +    AC_TIMER_SOFTIRQ=0,
   59.25 +    NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ,
   59.26 +    SCHEDULE_SOFTIRQ, /* NB. This must come last or do_softirq() will break! */
   59.27 +    NR_SOFTIRQS
   59.28  };
   59.29  
   59.30 -/* softirq mask and active fields moved to irq_cpustat_t in
   59.31 - * asm/hardirq.h to get better cache usage.  KAO
   59.32 - */
   59.33 -
   59.34 -struct softirq_action
   59.35 -{
   59.36 -    void (*action)(struct softirq_action *);
   59.37 -    void *data;
   59.38 -};
   59.39 +typedef void (*softirq_handler)(void);
   59.40  
   59.41  asmlinkage void do_softirq(void);
   59.42 -extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
   59.43 -extern void softirq_init(void);
   59.44 +extern void open_softirq(int nr, softirq_handler handler);
   59.45  #define __cpu_raise_softirq(cpu, nr) set_bit(nr, &softirq_pending(cpu))
   59.46  extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr));
   59.47  extern void FASTCALL(raise_softirq(unsigned int nr));
   59.48  
   59.49 -/* Tasklets --- multithreaded analogue of BHs.
   59.50 -
   59.51 -   Main feature differing them of generic softirqs: tasklet
   59.52 -   is running only on one CPU simultaneously.
   59.53 -
   59.54 -   Main feature differing them of BHs: different tasklets
   59.55 -   may be run simultaneously on different CPUs.
   59.56 -
   59.57 -   Properties:
   59.58 -   * If tasklet_schedule() is called, then tasklet is guaranteed
   59.59 -     to be executed on some cpu at least once after this.
   59.60 -   * If the tasklet is already scheduled, but its excecution is still not
   59.61 -     started, it will be executed only once.
   59.62 -   * If this tasklet is already running on another CPU (or schedule is called
   59.63 -     from tasklet itself), it is rescheduled for later.
   59.64 -   * Tasklet is strictly serialized wrt itself, but not
   59.65 -     wrt another tasklets. If client needs some intertask synchronization,
   59.66 -     he makes it with spinlocks.
   59.67 - */
   59.68 -
   59.69 -struct tasklet_struct
   59.70 -{
   59.71 -    struct tasklet_struct *next;
   59.72 -    unsigned long state;
   59.73 -    atomic_t count;
   59.74 -    void (*func)(unsigned long);
   59.75 -    unsigned long data;
   59.76 -};
   59.77 -
   59.78 -#define DECLARE_TASKLET(name, func, data) \
   59.79 -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
   59.80 -
   59.81 -#define DECLARE_TASKLET_DISABLED(name, func, data) \
   59.82 -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
   59.83 -
   59.84 -
   59.85 -enum
   59.86 -{
   59.87 -    TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
   59.88 -    TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
   59.89 -};
   59.90 -
   59.91 -struct tasklet_head
   59.92 -{
   59.93 -    struct tasklet_struct *list;
   59.94 -} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
   59.95 -
   59.96 -extern struct tasklet_head tasklet_vec[NR_CPUS];
   59.97 -extern struct tasklet_head tasklet_hi_vec[NR_CPUS];
   59.98 -
   59.99 -#ifdef CONFIG_SMP
  59.100 -static inline int tasklet_trylock(struct tasklet_struct *t)
  59.101 -{
  59.102 -    return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
  59.103 -}
  59.104 -
  59.105 -static inline void tasklet_unlock(struct tasklet_struct *t)
  59.106 -{
  59.107 -    smp_mb__before_clear_bit(); 
  59.108 -    clear_bit(TASKLET_STATE_RUN, &(t)->state);
  59.109 -}
  59.110 -
  59.111 -static inline void tasklet_unlock_wait(struct tasklet_struct *t)
  59.112 -{
  59.113 -    while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
  59.114 -}
  59.115 -#else
  59.116 -#define tasklet_trylock(t) 1
  59.117 -#define tasklet_unlock_wait(t) do { } while (0)
  59.118 -#define tasklet_unlock(t) do { } while (0)
  59.119  #endif
  59.120 -
  59.121 -extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
  59.122 -
  59.123 -static inline void tasklet_schedule(struct tasklet_struct *t)
  59.124 -{
  59.125 -    if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  59.126 -        __tasklet_schedule(t);
  59.127 -}
  59.128 -
  59.129 -extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
  59.130 -
  59.131 -static inline void tasklet_hi_schedule(struct tasklet_struct *t)
  59.132 -{
  59.133 -    if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  59.134 -        __tasklet_hi_schedule(t);
  59.135 -}
  59.136 -
  59.137 -
  59.138 -static inline void tasklet_disable_nosync(struct tasklet_struct *t)
  59.139 -{
  59.140 -    atomic_inc(&t->count);
  59.141 -    smp_mb__after_atomic_inc();
  59.142 -}
  59.143 -
  59.144 -static inline void tasklet_disable(struct tasklet_struct *t)
  59.145 -{
  59.146 -    tasklet_disable_nosync(t);
  59.147 -    tasklet_unlock_wait(t);
  59.148 -    smp_mb();
  59.149 -}
  59.150 -
  59.151 -static inline void tasklet_enable(struct tasklet_struct *t)
  59.152 -{
  59.153 -    smp_mb__before_atomic_dec();
  59.154 -    if (atomic_dec_and_test(&t->count) &&
  59.155 -        test_bit(TASKLET_STATE_SCHED, &t->state))
  59.156 -        __tasklet_schedule(t);
  59.157 -}
  59.158 -
  59.159 -static inline void tasklet_hi_enable(struct tasklet_struct *t)
  59.160 -{
  59.161 -    smp_mb__before_atomic_dec();
  59.162 -    if (atomic_dec_and_test(&t->count) &&
  59.163 -        test_bit(TASKLET_STATE_SCHED, &t->state))
  59.164 -        __tasklet_hi_schedule(t);
  59.165 -}
  59.166 -
  59.167 -extern void tasklet_kill(struct tasklet_struct *t);
  59.168 -extern void tasklet_init(struct tasklet_struct *t,
  59.169 -			 void (*func)(unsigned long), unsigned long data);
  59.170 -
  59.171 -#endif
    60.1 --- a/xen/include/xen/irq.h	Thu Jun 17 16:33:33 2004 +0000
    60.2 +++ b/xen/include/xen/irq.h	Fri Jun 18 14:46:29 2004 +0000
    60.3 @@ -56,10 +56,10 @@ extern void free_irq(unsigned int);
    60.4  extern hw_irq_controller no_irq_type;
    60.5  extern void no_action(int cpl, void *dev_id, struct pt_regs *regs);
    60.6  
    60.7 -struct task_struct;
    60.8 -extern int pirq_guest_unmask(struct task_struct *p);
    60.9 -extern int pirq_guest_bind(struct task_struct *p, int irq, int will_share);
   60.10 -extern int pirq_guest_unbind(struct task_struct *p, int irq);
   60.11 +struct domain;
   60.12 +extern int pirq_guest_unmask(struct domain *p);
   60.13 +extern int pirq_guest_bind(struct domain *p, int irq, int will_share);
   60.14 +extern int pirq_guest_unbind(struct domain *p, int irq);
   60.15  extern int pirq_guest_bindable(int irq, int will_share);
   60.16  
   60.17  #endif /* __XEN_IRQ_H__ */
    61.1 --- a/xen/include/xen/irq_cpustat.h	Thu Jun 17 16:33:33 2004 +0000
    61.2 +++ b/xen/include/xen/irq_cpustat.h	Fri Jun 18 14:46:29 2004 +0000
    61.3 @@ -28,7 +28,6 @@ extern irq_cpustat_t irq_stat[];			/* de
    61.4    /* arch independent irq_stat fields */
    61.5  #define softirq_pending(cpu)	__IRQ_STAT((cpu), __softirq_pending)
    61.6  #define local_irq_count(cpu)	__IRQ_STAT((cpu), __local_irq_count)
    61.7 -#define local_bh_count(cpu)	__IRQ_STAT((cpu), __local_bh_count)
    61.8  #define nmi_count(cpu)		__IRQ_STAT((cpu), __nmi_count)
    61.9  
   61.10  #endif	/* __irq_cpustat_h */
    62.1 --- a/xen/include/xen/mm.h	Thu Jun 17 16:33:33 2004 +0000
    62.2 +++ b/xen/include/xen/mm.h	Fri Jun 18 14:46:29 2004 +0000
    62.3 @@ -55,7 +55,7 @@ struct pfn_info
    62.4      /* The following possible uses are context-dependent. */
    62.5      union {
    62.6          /* Page is in use and not a zombie: we keep a pointer to its owner. */
    62.7 -        struct task_struct *domain;
    62.8 +        struct domain *domain;
    62.9          /* Page is not currently allocated: mask of possibly-tainted TLBs. */
   62.10          unsigned long cpu_mask;
   62.11          /* Page is a zombie: this word currently has no use. */
   62.12 @@ -127,7 +127,7 @@ extern unsigned long max_page;
   62.13  void init_frametable(unsigned long nr_pages);
   62.14  void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
   62.15  
   62.16 -struct pfn_info *alloc_domain_page(struct task_struct *p);
   62.17 +struct pfn_info *alloc_domain_page(struct domain *p);
   62.18  void free_domain_page(struct pfn_info *page);
   62.19  
   62.20  int alloc_page_type(struct pfn_info *page, unsigned int type);
   62.21 @@ -149,10 +149,10 @@ static inline void put_page(struct pfn_i
   62.22  
   62.23  
   62.24  static inline int get_page(struct pfn_info *page,
   62.25 -                           struct task_struct *domain)
   62.26 +                           struct domain *domain)
   62.27  {
   62.28      u32 x, nx, y = page->count_and_flags;
   62.29 -    struct task_struct *p, *np = page->u.domain;
   62.30 +    struct domain *p, *np = page->u.domain;
   62.31  
   62.32      do {
   62.33          x  = y;
   62.34 @@ -283,7 +283,7 @@ static inline void put_page_and_type(str
   62.35  
   62.36  
   62.37  static inline int get_page_and_type(struct pfn_info *page,
   62.38 -                                    struct task_struct *domain,
   62.39 +                                    struct domain *domain,
   62.40                                      u32 type)
   62.41  {
   62.42      int rc = get_page(page, domain);
    63.1 --- a/xen/include/xen/sched-if.h	Thu Jun 17 16:33:33 2004 +0000
    63.2 +++ b/xen/include/xen/sched-if.h	Fri Jun 18 14:46:29 2004 +0000
    63.3 @@ -12,8 +12,8 @@
    63.4  typedef struct schedule_data_st
    63.5  {
    63.6      struct list_head    runqueue;       /* runqueue */
    63.7 -    struct task_struct *curr;           /* current task */
    63.8 -    struct task_struct *idle;           /* idle task for this cpu */
    63.9 +    struct domain *curr;           /* current task */
   63.10 +    struct domain *idle;           /* idle task for this cpu */
   63.11      void *              sched_priv;
   63.12      struct ac_timer     s_timer;        /* scheduling timer  */
   63.13  #ifdef BUCKETS
   63.14 @@ -24,7 +24,7 @@ typedef struct schedule_data_st
   63.15  
   63.16  typedef struct task_slice_st
   63.17  {
   63.18 -    struct task_struct *task;
   63.19 +    struct domain *task;
   63.20      s_time_t            time;
   63.21  } task_slice_t;
   63.22  
   63.23 @@ -35,22 +35,21 @@ struct scheduler
   63.24      unsigned int sched_id;  /* ID for this scheduler             */
   63.25  
   63.26      int          (*init_scheduler) ();
   63.27 -    int          (*alloc_task)     (struct task_struct *);
   63.28 -    void         (*add_task)       (struct task_struct *);
   63.29 -    void         (*free_task)      (struct task_struct *);
   63.30 -    void         (*rem_task)       (struct task_struct *);
   63.31 -    void         (*wake_up)        (struct task_struct *);
   63.32 -    void         (*do_block)       (struct task_struct *);
   63.33 +    int          (*alloc_task)     (struct domain *);
   63.34 +    void         (*add_task)       (struct domain *);
   63.35 +    void         (*free_task)      (struct domain *);
   63.36 +    void         (*rem_task)       (struct domain *);
   63.37 +    void         (*wake_up)        (struct domain *);
   63.38 +    void         (*do_block)       (struct domain *);
   63.39      task_slice_t (*do_schedule)    (s_time_t);
   63.40      int          (*control)        (struct sched_ctl_cmd *);
   63.41 -    int          (*adjdom)         (struct task_struct *,
   63.42 +    int          (*adjdom)         (struct domain *,
   63.43                                      struct sched_adjdom_cmd *);
   63.44 -    s32          (*reschedule)     (struct task_struct *);
   63.45      void         (*dump_settings)  (void);
   63.46      void         (*dump_cpu_state) (int);
   63.47 -    void         (*dump_runq_el)   (struct task_struct *);
   63.48 +    void         (*dump_runq_el)   (struct domain *);
   63.49      int          (*prn_state)      (int);
   63.50 -    void         (*pause)          (struct task_struct *);
   63.51 +    void         (*pause)          (struct domain *);
   63.52  };
   63.53  
   63.54  /* per CPU scheduler information */
   63.55 @@ -60,29 +59,29 @@ extern schedule_data_t schedule_data[];
   63.56   * Wrappers for run-queue management. Must be called with the schedule_lock
   63.57   * held.
   63.58   */
   63.59 -static inline void __add_to_runqueue_head(struct task_struct * p)
   63.60 +static inline void __add_to_runqueue_head(struct domain * p)
   63.61  {    
   63.62      list_add(&p->run_list, &schedule_data[p->processor].runqueue);
   63.63  }
   63.64  
   63.65 -static inline void __add_to_runqueue_tail(struct task_struct * p)
   63.66 +static inline void __add_to_runqueue_tail(struct domain * p)
   63.67  {
   63.68      list_add_tail(&p->run_list, &schedule_data[p->processor].runqueue);
   63.69  }
   63.70  
   63.71 -static inline void __del_from_runqueue(struct task_struct * p)
   63.72 +static inline void __del_from_runqueue(struct domain * p)
   63.73  {
   63.74      list_del(&p->run_list);
   63.75      p->run_list.next = NULL;
   63.76  }
   63.77  
   63.78 -static inline int __task_on_runqueue(struct task_struct *p)
   63.79 +static inline int __task_on_runqueue(struct domain *p)
   63.80  {
   63.81      return p->run_list.next != NULL;
   63.82  }
   63.83  
   63.84  #define next_domain(p) \\
   63.85 -        list_entry((p)->run_list.next, struct task_struct, run_list)
   63.86 +        list_entry((p)->run_list.next, struct domain, run_list)
   63.87  
   63.88  
   63.89  static inline int __runqueue_empty(int cpu)
    64.1 --- a/xen/include/xen/sched.h	Thu Jun 17 16:33:33 2004 +0000
    64.2 +++ b/xen/include/xen/sched.h	Fri Jun 18 14:46:29 2004 +0000
    64.3 @@ -26,22 +26,7 @@ extern rwlock_t tasklist_lock;
    64.4  
    64.5  #include <xen/spinlock.h>
    64.6  
    64.7 -#define _HYP_EVENT_NEED_RESCHED 0
    64.8 -#define _HYP_EVENT_DIE          1
    64.9 -
   64.10 -#define PF_DONEFPUINIT  0 /* Has the FPU been initialised for this task? */
   64.11 -#define PF_USEDFPU      1 /* Has this task used the FPU since last save? */
   64.12 -#define PF_GUEST_STTS   2 /* Has the guest OS requested 'stts'?          */
   64.13 -#define PF_CONSTRUCTED  3 /* Has the guest OS been fully built yet?      */
   64.14 -#define PF_IDLETASK     4 /* Is this one of the per-CPU idle domains?    */
   64.15 -#define PF_PRIVILEGED   5 /* Is this domain privileged?                  */
   64.16 -#define PF_CONSOLEWRITEBUG 6 /* Has this domain used the obsolete console? */
   64.17 -#define PF_PHYSDEV      7 /* May this domain do IO to physical devices? */
   64.18 -
   64.19 -#define IS_PRIV(_p) (test_bit(PF_PRIVILEGED, &(_p)->flags))
   64.20 -#define IS_CAPABLE_PHYSDEV(_p) (test_bit(PF_PHYSDEV, &(_p)->flags))
   64.21 -
   64.22 -struct task_struct;
   64.23 +struct domain;
   64.24  
   64.25  typedef struct event_channel_st
   64.26  {
   64.27 @@ -54,25 +39,24 @@ typedef struct event_channel_st
   64.28      union {
   64.29          struct {
   64.30              u16 port;
   64.31 -            struct task_struct *dom;
   64.32 +            struct domain *dom;
   64.33          } __attribute__ ((packed)) remote; /* state == ECS_CONNECTED */
   64.34          u16 pirq; /* state == ECS_PIRQ */
   64.35          u16 virq; /* state == ECS_VIRQ */
   64.36      } u;
   64.37  } event_channel_t;
   64.38  
   64.39 -int  init_event_channels(struct task_struct *p);
   64.40 -void destroy_event_channels(struct task_struct *p);
   64.41 +int  init_event_channels(struct domain *p);
   64.42 +void destroy_event_channels(struct domain *p);
   64.43  
   64.44 -struct task_struct 
   64.45 +struct domain 
   64.46  {
   64.47      /*
   64.48       * DO NOT CHANGE THE ORDER OF THE FOLLOWING.
   64.49       * Their offsets are hardcoded in entry.S
   64.50       */
   64.51  
   64.52 -    unsigned short processor;    /* 00: current processor */
   64.53 -    unsigned short hyp_events;   /* 02: pending intra-Xen events */
   64.54 +    u32 processor;               /* 00: current processor */
   64.55  
   64.56      /* An unsafe pointer into a shared data area. */
   64.57      shared_info_t *shared_info;  /* 04: shared data area */
   64.58 @@ -111,8 +95,7 @@ struct task_struct
   64.59      /* Scheduling. */
   64.60      struct list_head run_list;
   64.61      int              has_cpu;
   64.62 -    int              state;         /* current run state */
   64.63 -    int              stop_code;     /* stop code from OS (if TASK_STOPPED). */
   64.64 +    int              stop_code;     /* stop code from OS (if DF_STOPPED). */
   64.65      int              cpupinned;     /* true if pinned to curent CPU */
   64.66      s_time_t         lastschd;      /* time this domain was last scheduled */
   64.67      s_time_t         lastdeschd;    /* time this domain was last descheduled */
   64.68 @@ -132,7 +115,7 @@ struct task_struct
   64.69      s_time_t create_time;
   64.70  
   64.71      struct thread_struct thread;
   64.72 -    struct task_struct *next_list, *next_hash;
   64.73 +    struct domain *next_list, *next_hash;
   64.74  
   64.75      /* Event channel information. */
   64.76      event_channel_t *event_channel;
   64.77 @@ -164,80 +147,62 @@ struct task_struct
   64.78      unsigned long flags;
   64.79  
   64.80      atomic_t refcnt;
   64.81 +    atomic_t pausecnt;
   64.82  };
   64.83  
   64.84 -/*
   64.85 - * domain states 
   64.86 - * TASK_RUNNING:         Domain is runable and should be on a run queue
   64.87 - * TASK_INTERRUPTIBLE:   Domain is blocked by may be woken up by an event
   64.88 - *                       or expiring timer
   64.89 - * TASK_UNINTERRUPTIBLE: Domain is blocked but may not be woken up by an
   64.90 - *                       arbitrary event or timer.
   64.91 - * TASK_STOPPED:         Domain is stopped.
   64.92 - * TASK_DYING:           Domain is about to cross over to the land of the dead.
   64.93 - * TASK_PAUSED:          Task currently removed from scheduling.
   64.94 - */
   64.95 -
   64.96 -#define TASK_RUNNING             0
   64.97 -#define TASK_INTERRUPTIBLE       1
   64.98 -#define TASK_UNINTERRUPTIBLE     2
   64.99 -#define TASK_STOPPED             4
  64.100 -#define TASK_DYING               8
  64.101 -#define TASK_PAUSED             16
  64.102 -#define TASK_CRASHED            32
  64.103 -
  64.104  #include <asm/uaccess.h> /* for KERNEL_DS */
  64.105  
  64.106  #define IDLE0_TASK(_t)           \
  64.107  {                                \
  64.108      processor:   0,              \
  64.109      domain:      IDLE_DOMAIN_ID, \
  64.110 -    state:       TASK_RUNNING,   \
  64.111      has_cpu:     0,              \
  64.112      mm:          IDLE0_MM,       \
  64.113      addr_limit:  KERNEL_DS,      \
  64.114      thread:      INIT_THREAD,    \
  64.115 -    flags:       1<<PF_IDLETASK, \
  64.116 +    flags:       1<<DF_IDLETASK, \
  64.117      refcnt:      ATOMIC_INIT(1)  \
  64.118  }
  64.119  
  64.120 -extern struct task_struct idle0_task;
  64.121 +extern struct domain idle0_task;
  64.122  
  64.123 -extern struct task_struct *idle_task[NR_CPUS];
  64.124 +extern struct domain *idle_task[NR_CPUS];
  64.125  #define IDLE_DOMAIN_ID   (0x7FFFFFFFU)
  64.126 -#define is_idle_task(_p) (test_bit(PF_IDLETASK, &(_p)->flags))
  64.127 +#define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->flags))
  64.128  
  64.129  #include <xen/slab.h>
  64.130  
  64.131 -void free_task_struct(struct task_struct *p);
  64.132 -struct task_struct *alloc_task_struct();
  64.133 +void free_domain_struct(struct domain *p);
  64.134 +struct domain *alloc_domain_struct();
  64.135  
  64.136 -#define put_task_struct(_p) \
  64.137 -  if ( atomic_dec_and_test(&(_p)->refcnt) ) release_task(_p)
  64.138 -#define get_task_struct(_p)  \
  64.139 -  atomic_inc(&(_p)->refcnt)
  64.140 -
  64.141 -extern struct task_struct *do_createdomain(
  64.142 +#define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
  64.143 +#define put_domain(_d) \
  64.144 +  if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destruct(_d)
  64.145 +static inline int get_domain(struct domain *d)
  64.146 +{
  64.147 +    atomic_inc(&d->refcnt);
  64.148 +    return !(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED);
  64.149 +}
  64.150 +  
  64.151 +extern struct domain *do_createdomain(
  64.152      domid_t dom_id, unsigned int cpu);
  64.153 -extern int construct_dom0(struct task_struct *p, 
  64.154 +extern int construct_dom0(struct domain *p, 
  64.155                            unsigned long alloc_start,
  64.156                            unsigned long alloc_end,
  64.157                            char *image_start, unsigned long image_len, 
  64.158                            char *initrd_start, unsigned long initrd_len,
  64.159                            char *cmdline);
  64.160 -extern int final_setup_guestos(struct task_struct *p, dom0_builddomain_t *);
  64.161 +extern int final_setup_guestos(struct domain *p, dom0_builddomain_t *);
  64.162  
  64.163 -struct task_struct *find_domain_by_id(domid_t dom);
  64.164 -struct task_struct *find_last_domain(void);
  64.165 -extern void release_task(struct task_struct *);
  64.166 -extern void __kill_domain(struct task_struct *p);
  64.167 -extern void kill_domain(void);
  64.168 -extern long kill_other_domain(domid_t dom, int force);
  64.169 -extern void stop_domain(u8 reason);
  64.170 -extern long stop_other_domain(domid_t dom);
  64.171 +struct domain *find_domain_by_id(domid_t dom);
  64.172 +struct domain *find_last_domain(void);
  64.173 +extern void domain_destruct(struct domain *d);
  64.174 +extern void domain_kill(struct domain *d);
  64.175 +extern void domain_crash(void);
  64.176 +extern void domain_suspend(u8 reason);
  64.177  
  64.178  /* arch/process.c */
  64.179 -void new_thread(struct task_struct *p,
  64.180 +void new_thread(struct domain *p,
  64.181                  unsigned long start_pc,
  64.182                  unsigned long start_stack,
  64.183                  unsigned long start_info);
  64.184 @@ -253,39 +218,20 @@ extern spinlock_t schedule_lock[NR_CPUS]
  64.185  #define set_current_state(_s) do { current->state = (_s); } while (0)
  64.186  void scheduler_init(void);
  64.187  void schedulers_start(void);
  64.188 -void sched_add_domain(struct task_struct *p);
  64.189 -int  sched_rem_domain(struct task_struct *p);
  64.190 +void sched_add_domain(struct domain *p);
  64.191 +void sched_rem_domain(struct domain *p);
  64.192  long sched_ctl(struct sched_ctl_cmd *);
  64.193  long sched_adjdom(struct sched_adjdom_cmd *);
  64.194  int  sched_id();
  64.195 -void sched_pause_sync(struct task_struct *);
  64.196  void init_idle_task(void);
  64.197 -void __wake_up(struct task_struct *p);
  64.198 -void wake_up(struct task_struct *p);
  64.199 -void reschedule(struct task_struct *p);
  64.200 -unsigned long __reschedule(struct task_struct *p);
  64.201 -
  64.202 -/* NB. Limited entry in Xen. Not for arbitrary use! */
  64.203 -asmlinkage void __enter_scheduler(void);
  64.204 -#define schedule() __schedule_not_callable_in_xen()
  64.205 -
  64.206 -extern void switch_to(struct task_struct *prev, 
  64.207 -                      struct task_struct *next);
  64.208 +int domain_wakeup(struct domain *p);
  64.209 +void __domain_pause(struct domain *p);
  64.210  
  64.211 +void __enter_scheduler(void);
  64.212  
  64.213 -/* A compatibility hack for Linux drivers. */
  64.214 -#define MAX_SCHEDULE_TIMEOUT 0UL
  64.215 -static inline long schedule_timeout(long timeout)
  64.216 -{
  64.217 -    set_current_state(TASK_RUNNING);
  64.218 -    mdelay(timeout*(1000/HZ));
  64.219 -    return 0;
  64.220 -}
  64.221 +extern void switch_to(struct domain *prev, 
  64.222 +                      struct domain *next);
  64.223  
  64.224 -#define signal_pending(_p)                                      \
  64.225 -    ( (_p)->hyp_events ||                                       \
  64.226 -      ((_p)->shared_info->vcpu_data[0].evtchn_upcall_pending && \
  64.227 -       !(_p)->shared_info->vcpu_data[0].evtchn_upcall_mask) )
  64.228  
  64.229  void domain_init(void);
  64.230  
  64.231 @@ -295,17 +241,79 @@ void startup_cpu_idle_loop(void);
  64.232  void continue_cpu_idle_loop(void);
  64.233  
  64.234  void continue_nonidle_task(void);
  64.235 -void sched_prn_state(int state);
  64.236  
  64.237  /* This task_hash and task_list are protected by the tasklist_lock. */
  64.238  #define TASK_HASH_SIZE 256
  64.239  #define TASK_HASH(_id) ((int)(_id)&(TASK_HASH_SIZE-1))
  64.240 -extern struct task_struct *task_hash[TASK_HASH_SIZE];
  64.241 -extern struct task_struct *task_list;
  64.242 +extern struct domain *task_hash[TASK_HASH_SIZE];
  64.243 +extern struct domain *task_list;
  64.244  
  64.245  #define for_each_domain(_p) \
  64.246   for ( (_p) = task_list; (_p) != NULL; (_p) = (_p)->next_list )
  64.247  
  64.248 -extern void update_process_times(int user);
  64.249 +#define DF_DONEFPUINIT  0 /* Has the FPU been initialised for this task?    */
  64.250 +#define DF_USEDFPU      1 /* Has this task used the FPU since last save?    */
  64.251 +#define DF_GUEST_STTS   2 /* Has the guest OS requested 'stts'?             */
  64.252 +#define DF_CONSTRUCTED  3 /* Has the guest OS been fully built yet?         */
  64.253 +#define DF_IDLETASK     4 /* Is this one of the per-CPU idle domains?       */
  64.254 +#define DF_PRIVILEGED   5 /* Is this domain privileged?                     */
  64.255 +#define DF_CONSOLEWRITEBUG 6 /* Has this domain used the obsolete console?  */
  64.256 +#define DF_PHYSDEV      7 /* May this domain do IO to physical devices?     */
  64.257 +
  64.258 +#define DF_BLOCKED      8 /* Domain is blocked waiting for an event.        */
  64.259 +#define DF_CONTROLPAUSE 9 /* Domain is paused by control software.          */
  64.260 +#define DF_SUSPENDED   10 /* Guest suspended its execution for some reason. */
  64.261 +#define DF_CRASHED     11 /* Domain crashed inside Xen, cannot continue.    */
  64.262 +#define DF_DYING       12 /* Death rattle.                                  */
  64.263 +
  64.264 +static inline int domain_runnable(struct domain *p)
  64.265 +{
  64.266 +    return ( (atomic_read(&p->pausecnt) == 0) &&
  64.267 +             !(p->flags & ((1<<DF_BLOCKED)|(1<<DF_CONTROLPAUSE)|
  64.268 +                           (1<<DF_SUSPENDED)|(1<<DF_CRASHED)|(1<<DF_DYING))) );
  64.269 +}
  64.270 +
  64.271 +/* Returns TRUE if the domain was actually unblocked and woken. */
  64.272 +static inline int domain_unblock(struct domain *d)
  64.273 +{
  64.274 +    if ( test_and_clear_bit(DF_BLOCKED, &d->flags) )
  64.275 +        return domain_wakeup(d);
  64.276 +    return 0;
  64.277 +}
  64.278 +
  64.279 +static inline void domain_unsuspend(struct domain *d)
  64.280 +{
  64.281 +    if ( test_and_clear_bit(DF_SUSPENDED, &d->flags) )
  64.282 +        (void)domain_wakeup(d);
  64.283 +}
  64.284 +
  64.285 +static inline void domain_controller_pause(struct domain *d)
  64.286 +{
  64.287 +    if ( !test_and_set_bit(DF_CONTROLPAUSE, &d->flags) )
  64.288 +        __domain_pause(d);
  64.289 +}
  64.290 +
  64.291 +static inline void domain_controller_unpause(struct domain *d)
  64.292 +{
  64.293 +    if ( test_and_clear_bit(DF_CONTROLPAUSE, &d->flags) )
  64.294 +        (void)domain_wakeup(d);
  64.295 +}
  64.296 +
  64.297 +static inline void domain_pause(struct domain *d)
  64.298 +{
  64.299 +    if ( d == current ) BUG();
  64.300 +    atomic_inc(&d->pausecnt);
  64.301 +    __domain_pause(d);
  64.302 +}
  64.303 +
  64.304 +static inline void domain_unpause(struct domain *d)
  64.305 +{
  64.306 +    if ( atomic_dec_and_test(&d->pausecnt) )
  64.307 +        (void)domain_wakeup(d);
  64.308 +}
  64.309 +
  64.310 +
  64.311 +#define IS_PRIV(_p) (test_bit(DF_PRIVILEGED, &(_p)->flags))
  64.312 +#define IS_CAPABLE_PHYSDEV(_p) (test_bit(DF_PHYSDEV, &(_p)->flags))
  64.313  
  64.314  #endif /*_LINUX_SCHED_H */
    65.1 --- a/xen/include/xen/shadow.h	Thu Jun 17 16:33:33 2004 +0000
    65.2 +++ b/xen/include/xen/shadow.h	Fri Jun 18 14:46:29 2004 +0000
    65.3 @@ -24,15 +24,15 @@
    65.4  #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START+(SH_LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
    65.5  
    65.6  extern void shadow_mode_init(void);
    65.7 -extern int shadow_mode_control( struct task_struct *p, dom0_shadow_control_t *sc );
    65.8 +extern int shadow_mode_control( struct domain *p, dom0_shadow_control_t *sc );
    65.9  extern int shadow_fault( unsigned long va, long error_code );
   65.10  extern void shadow_l1_normal_pt_update( unsigned long pa, unsigned long gpte, 
   65.11                                          unsigned long *prev_spfn_ptr,
   65.12                                          l1_pgentry_t **prev_spl1e_ptr  );
   65.13  extern void shadow_l2_normal_pt_update( unsigned long pa, unsigned long gpte );
   65.14  extern void unshadow_table( unsigned long gpfn, unsigned int type );
   65.15 -extern int shadow_mode_enable( struct task_struct *p, unsigned int mode );
   65.16 -extern void shadow_mode_disable( struct task_struct *p );
   65.17 +extern int shadow_mode_enable( struct domain *p, unsigned int mode );
   65.18 +extern void shadow_mode_disable( struct domain *p );
   65.19  extern unsigned long shadow_l2_table( 
   65.20      struct mm_struct *m, unsigned long gpfn );
   65.21  
    66.1 --- a/xen/include/xen/spinlock.h	Thu Jun 17 16:33:33 2004 +0000
    66.2 +++ b/xen/include/xen/spinlock.h	Fri Jun 18 14:46:29 2004 +0000
    66.3 @@ -10,31 +10,21 @@
    66.4   */
    66.5  #define spin_lock_irqsave(lock, flags)		do { local_irq_save(flags);       spin_lock(lock); } while (0)
    66.6  #define spin_lock_irq(lock)			do { local_irq_disable();         spin_lock(lock); } while (0)
    66.7 -#define spin_lock_bh(lock)			do { local_bh_disable();          spin_lock(lock); } while (0)
    66.8  
    66.9  #define read_lock_irqsave(lock, flags)		do { local_irq_save(flags);       read_lock(lock); } while (0)
   66.10  #define read_lock_irq(lock)			do { local_irq_disable();         read_lock(lock); } while (0)
   66.11 -#define read_lock_bh(lock)			do { local_bh_disable();          read_lock(lock); } while (0)
   66.12  
   66.13  #define write_lock_irqsave(lock, flags)		do { local_irq_save(flags);      write_lock(lock); } while (0)
   66.14  #define write_lock_irq(lock)			do { local_irq_disable();        write_lock(lock); } while (0)
   66.15 -#define write_lock_bh(lock)			do { local_bh_disable();         write_lock(lock); } while (0)
   66.16  
   66.17  #define spin_unlock_irqrestore(lock, flags)	do { spin_unlock(lock);  local_irq_restore(flags); } while (0)
   66.18  #define spin_unlock_irq(lock)			do { spin_unlock(lock);  local_irq_enable();       } while (0)
   66.19 -#define spin_unlock_bh(lock)			do { spin_unlock(lock);  local_bh_enable();        } while (0)
   66.20  
   66.21  #define read_unlock_irqrestore(lock, flags)	do { read_unlock(lock);  local_irq_restore(flags); } while (0)
   66.22  #define read_unlock_irq(lock)			do { read_unlock(lock);  local_irq_enable();       } while (0)
   66.23 -#define read_unlock_bh(lock)			do { read_unlock(lock);  local_bh_enable();        } while (0)
   66.24  
   66.25  #define write_unlock_irqrestore(lock, flags)	do { write_unlock(lock); local_irq_restore(flags); } while (0)
   66.26  #define write_unlock_irq(lock)			do { write_unlock(lock); local_irq_enable();       } while (0)
   66.27 -#define write_unlock_bh(lock)			do { write_unlock(lock); local_bh_enable();        } while (0)
   66.28 -#define spin_trylock_bh(lock)			({ int __r; local_bh_disable();\
   66.29 -						__r = spin_trylock(lock);      \
   66.30 -						if (!__r) local_bh_enable();   \
   66.31 -						__r; })
   66.32  
   66.33  #ifdef CONFIG_SMP
   66.34  #include <asm/spinlock.h>