ia64/xen-unstable

view xen-2.4.16/include/xeno/sched.h @ 86:4a10fe9b20ec

bitkeeper revision 1.15 (3e24a984iRiWWcgfKCxu2p5q3YbxXw)

Many files:
First half of support for per-domain GDTs and LDTs
author kaf24@labyrinth.cl.cam.ac.uk
date Wed Jan 15 00:21:24 2003 +0000 (2003-01-15)
parents c3e6a52cd801
children 336647fd8f40
line source
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
4 #include <xeno/config.h>
5 #include <xeno/types.h>
6 #include <xeno/spinlock.h>
7 #include <asm/page.h>
8 #include <asm/ptrace.h>
9 #include <xeno/smp.h>
10 #include <asm/processor.h>
11 #include <asm/current.h>
12 #include <hypervisor-ifs/hypervisor-if.h>
13 #include <xeno/dom0_ops.h>
15 extern unsigned long volatile jiffies;
16 extern rwlock_t tasklist_lock;
18 #include <xeno/spinlock.h>
20 struct mm_struct {
21 unsigned long cpu_vm_mask;
22 /*
23 * Every domain has a L1 pagetable of its own. Per-domain mappings
24 * are put in this table (eg. the current GDT is mapped here).
25 */
26 l2_pgentry_t *perdomain_pt;
27 pagetable_t pagetable;
28 /* Current LDT selector. */
29 unsigned int ldt_sel;
30 /* Next entry is passed to LGDT on domain switch. */
31 char gdt[6];
32 };
34 /* Convenient accessor for mm.gdt. */
35 #define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (_e))
36 #define SET_GDT_ADDRESS(_p, _a) ((*(u32 *)((_p)->mm.gdt + 2)) = (_a))
37 #define GET_GDT_ENTRIES(_p) ((*(u16 *)((_p)->mm.gdt + 0)))
38 #define GET_GDT_ADDRESS(_p) ((*(u32 *)((_p)->mm.gdt + 2)))
40 extern struct mm_struct init_mm;
41 #define IDLE0_MM \
42 { \
43 cpu_vm_mask: 0, \
44 perdomain_pt: 0, \
45 pagetable: mk_pagetable(__pa(idle0_pg_table)) \
46 }
48 #define _HYP_EVENT_NEED_RESCHED 0
49 #define _HYP_EVENT_NET_RX 1
50 #define _HYP_EVENT_DIE 2
52 #define PF_DONEFPUINIT 0x1 /* Has the FPU been initialised for this task? */
53 #define PF_USEDFPU 0x2 /* Has this task used the FPU since last save? */
54 #define PF_GUEST_STTS 0x4 /* Has the guest OS requested 'stts'? */
56 #include <xeno/vif.h>
57 #include <xeno/block.h>
59 struct task_struct {
60 int processor;
61 int state, hyp_events;
62 unsigned int domain;
64 /* An unsafe pointer into a shared data area. */
65 shared_info_t *shared_info;
67 /* Pointer to this guest's virtual interfaces. */
68 /* network */
69 net_ring_t *net_ring_base;
70 net_vif_t *net_vif_list[MAX_GUEST_VIFS];
71 int num_net_vifs;
72 /* block io */
73 blk_ring_t *blk_ring_base;
75 int has_cpu, policy, counter;
77 struct list_head run_list;
79 struct mm_struct mm;
81 mm_segment_t addr_limit; /* thread address space:
82 0-0xBFFFFFFF for user-thead
83 0-0xFFFFFFFF for kernel-thread
84 */
86 /*
87 * active_mm stays for now. It's entangled in the tricky TLB flushing
88 * stuff which I haven't addressed yet. It stays until I'm man enough
89 * to venture in.
90 */
91 struct mm_struct *active_mm;
92 struct thread_struct thread;
93 struct task_struct *prev_task, *next_task;
95 /* index into frame_table threading pages belonging to this
96 * domain together
97 */
98 unsigned long pg_head;
99 unsigned int tot_pages;
101 unsigned long flags;
102 };
104 #define TASK_RUNNING 0
105 #define TASK_INTERRUPTIBLE 1
106 #define TASK_UNINTERRUPTIBLE 2
107 #define TASK_STOPPED 4
108 #define TASK_DYING 8
110 #define SCHED_YIELD 0x10
112 #include <asm/uaccess.h> /* for KERNEL_DS */
114 #define IDLE0_TASK(_t) \
115 { \
116 processor: 0, \
117 domain: IDLE_DOMAIN_ID, \
118 state: TASK_RUNNING, \
119 has_cpu: 0, \
120 mm: IDLE0_MM, \
121 addr_limit: KERNEL_DS, \
122 active_mm: &idle0_task.mm, \
123 thread: INIT_THREAD, \
124 prev_task: &(_t), \
125 next_task: &(_t) \
126 }
128 #define IDLE_DOMAIN_ID (~0)
129 #define is_idle_task(_p) ((_p)->domain == IDLE_DOMAIN_ID)
131 #ifndef IDLE0_TASK_SIZE
132 #define IDLE0_TASK_SIZE 2048*sizeof(long)
133 #endif
135 union task_union {
136 struct task_struct task;
137 unsigned long stack[IDLE0_TASK_SIZE/sizeof(long)];
138 };
140 extern union task_union idle0_task_union;
141 extern struct task_struct first_task_struct;
143 extern struct task_struct *do_newdomain(void);
144 extern int setup_guestos(struct task_struct *p, dom0_newdomain_t *params);
146 struct task_struct *find_domain_by_id(unsigned int dom);
147 extern void release_task(struct task_struct *);
148 extern void kill_domain(void);
149 extern void kill_domain_with_errmsg(const char *err);
150 extern long kill_other_domain(unsigned int dom);
152 /* arch/process.c */
153 void new_thread(struct task_struct *p,
154 unsigned long start_pc,
155 unsigned long start_stack,
156 unsigned long start_info);
157 extern void flush_thread(void);
158 extern void exit_thread(void);
160 /* Linux puts these here for some reason! */
161 extern int request_irq(unsigned int,
162 void (*handler)(int, void *, struct pt_regs *),
163 unsigned long, const char *, void *);
164 extern void free_irq(unsigned int, void *);
166 extern unsigned long wait_init_idle;
167 #define init_idle() clear_bit(smp_processor_id(), &wait_init_idle);
169 #define set_current_state(_s) do { current->state = (_s); } while (0)
170 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
171 long schedule_timeout(long timeout);
172 asmlinkage void schedule(void);
174 void reschedule(struct task_struct *p);
176 typedef struct schedule_data_st
177 {
178 spinlock_t lock;
179 struct list_head runqueue;
180 struct task_struct *prev, *curr;
181 } __cacheline_aligned schedule_data_t;
182 extern schedule_data_t schedule_data[NR_CPUS];
184 static inline void __add_to_runqueue(struct task_struct * p)
185 {
186 list_add(&p->run_list, &schedule_data[p->processor].runqueue);
187 }
190 static inline void __move_last_runqueue(struct task_struct * p)
191 {
192 list_del(&p->run_list);
193 list_add_tail(&p->run_list, &schedule_data[p->processor].runqueue);
194 }
197 static inline void __move_first_runqueue(struct task_struct * p)
198 {
199 list_del(&p->run_list);
200 list_add(&p->run_list, &schedule_data[p->processor].runqueue);
201 }
203 static inline void __del_from_runqueue(struct task_struct * p)
204 {
205 list_del(&p->run_list);
206 p->run_list.next = NULL;
207 }
209 static inline int __task_on_runqueue(struct task_struct *p)
210 {
211 return (p->run_list.next != NULL);
212 }
214 int wake_up(struct task_struct *p);
216 #define signal_pending(_p) ((_p)->hyp_events || \
217 (_p)->shared_info->events)
219 void domain_init(void);
221 void cpu_idle(void);
223 #define REMOVE_LINKS(p) do { \
224 (p)->next_task->prev_task = (p)->prev_task; \
225 (p)->prev_task->next_task = (p)->next_task; \
226 } while (0)
228 #define SET_LINKS(p) do { \
229 (p)->next_task = &idle0_task; \
230 (p)->prev_task = idle0_task.prev_task; \
231 idle0_task.prev_task->next_task = (p); \
232 idle0_task.prev_task = (p); \
233 } while (0)
235 extern void update_process_times(int user);
237 #endif