ia64/xen-unstable

view xen/include/xeno/sched.h @ 722:7a9d47fea66c

bitkeeper revision 1.428 (3f677454_j81KDQLm_L7AscjYn2nYg)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
author iap10@labyrinth.cl.cam.ac.uk
date Tue Sep 16 20:36:36 2003 +0000 (2003-09-16)
parents aceefe65b9e5 ec38a236c5db
children 7e78d76f7ba6
line source
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
4 #include <xeno/config.h>
5 #include <xeno/types.h>
6 #include <xeno/spinlock.h>
7 #include <asm/page.h>
8 #include <asm/ptrace.h>
9 #include <xeno/smp.h>
10 #include <asm/processor.h>
11 #include <asm/current.h>
12 #include <hypervisor-ifs/hypervisor-if.h>
13 #include <hypervisor-ifs/dom0_ops.h>
15 #include <xeno/list.h>
16 #include <xeno/time.h>
17 #include <xeno/ac_timer.h>
19 #define MAX_DOMAIN_NAME 16
21 extern unsigned long volatile jiffies;
22 extern rwlock_t tasklist_lock;
24 #include <xeno/spinlock.h>
26 struct mm_struct {
27 unsigned long cpu_vm_mask;
28 /*
29 * Every domain has a L1 pagetable of its own. Per-domain mappings
30 * are put in this table (eg. the current GDT is mapped here).
31 */
32 l1_pgentry_t *perdomain_pt;
33 pagetable_t pagetable;
34 /* Current LDT details. */
35 unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
36 /* Next entry is passed to LGDT on domain switch. */
37 char gdt[6];
38 };
40 /* Convenient accessor for mm.gdt. */
41 #define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (_e))
42 #define SET_GDT_ADDRESS(_p, _a) ((*(u32 *)((_p)->mm.gdt + 2)) = (_a))
43 #define GET_GDT_ENTRIES(_p) ((*(u16 *)((_p)->mm.gdt + 0)))
44 #define GET_GDT_ADDRESS(_p) ((*(u32 *)((_p)->mm.gdt + 2)))
46 extern struct mm_struct init_mm;
47 #define IDLE0_MM \
48 { \
49 cpu_vm_mask: 0, \
50 perdomain_pt: 0, \
51 pagetable: mk_pagetable(__pa(idle0_pg_table)) \
52 }
54 #define _HYP_EVENT_NEED_RESCHED 0
55 #define _HYP_EVENT_DIE 1
56 #define _HYP_EVENT_STOP 2
58 #define PF_DONEFPUINIT 0x1 /* Has the FPU been initialised for this task? */
59 #define PF_USEDFPU 0x2 /* Has this task used the FPU since last save? */
60 #define PF_GUEST_STTS 0x4 /* Has the guest OS requested 'stts'? */
61 #define PF_CONSTRUCTED 0x8 /* Has the guest OS been fully built yet? */
63 #include <xeno/vif.h>
64 #include <xeno/block.h>
65 #include <xeno/segment.h>
67 /* SMH: replace below when have explicit 'priv' flag or bitmask */
68 #define IS_PRIV(_p) ((_p)->domain == 0)
70 struct task_struct
71 {
72 /*
73 * DO NOT CHANGE THE ORDER OF THE FOLLOWING.
74 * Their offsets are hardcoded in entry.S
75 */
77 int processor; /* 00: current processor */
78 int state; /* 04: current run state */
79 int hyp_events; /* 08: pending intra-Xen events */
80 unsigned int domain; /* 12: domain id */
82 /* An unsafe pointer into a shared data area. */
83 shared_info_t *shared_info; /* 16: shared data area */
85 /*
86 * Return vectors pushed to us by guest OS.
87 * The stack frame for events is exactly that of an x86 hardware interrupt.
88 * The stack frame for a failsafe callback is augmented with saved values
89 * for segment registers %ds, %es, %fs and %gs:
90 * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
91 */
92 unsigned long event_selector; /* 20: entry CS */
93 unsigned long event_address; /* 24: entry EIP */
94 unsigned long failsafe_selector; /* 28: entry CS */
95 unsigned long failsafe_address; /* 32: entry EIP */
97 /*
98 * From here on things can be added and shuffled without special attention
99 */
101 struct list_head pg_head;
102 unsigned int tot_pages; /* number of pages currently possesed */
103 unsigned int max_pages; /* max number of pages that can be possesed */
105 /* scheduling */
106 struct list_head run_list; /* the run list */
107 int has_cpu;
108 int policy;
109 int counter;
111 struct ac_timer blt; /* blocked timeout */
113 s_time_t lastschd; /* time this domain was last scheduled */
114 s_time_t cpu_time; /* total CPU time received till now */
115 s_time_t wokenup; /* time domain got woken up */
117 unsigned long mcu_advance; /* inverse of weight */
118 u32 avt; /* actual virtual time */
119 u32 evt; /* effective virtual time */
120 int warpback; /* warp? */
121 long warp; /* virtual time warp */
122 long warpl; /* warp limit */
123 long warpu; /* unwarp time requirement */
124 s_time_t warped; /* time it ran warped last time */
125 s_time_t uwarped; /* time it ran unwarped last time */
127 /* Network I/O */
128 net_vif_t *net_vif_list[MAX_DOMAIN_VIFS];
130 /* Block I/O */
131 blk_ring_t *blk_ring_base;
132 unsigned int blk_req_cons; /* request consumer */
133 unsigned int blk_resp_prod; /* (private version of) response producer */
134 struct list_head blkdev_list;
135 spinlock_t blk_ring_lock;
136 struct list_head physdisk_aces; /* physdisk_ace structures
137 describing what bits of disk
138 the process can do raw access
139 to. */
140 spinlock_t physdev_lock;
141 segment_t *segment_list[XEN_MAX_SEGMENTS]; /* xvd */
143 /* VM */
144 struct mm_struct mm;
145 /* We need this lock to check page types and frob reference counts. */
146 spinlock_t page_lock;
148 mm_segment_t addr_limit; /* thread address space:
149 0-0xBFFFFFFF for user-thead
150 0-0xFFFFFFFF for kernel-thread
151 */
153 char name[MAX_DOMAIN_NAME];
155 /*
156 * active_mm stays for now. It's entangled in the tricky TLB flushing
157 * stuff which I haven't addressed yet. It stays until I'm man enough
158 * to venture in.
159 */
160 struct mm_struct *active_mm;
161 struct thread_struct thread;
162 struct task_struct *prev_task, *next_task, *next_hash;
164 unsigned long flags;
166 atomic_t refcnt;
167 };
169 /*
170 * domain states
171 * TASK_RUNNING: Domain is runable and should be on a run queue
172 * TASK_INTERRUPTIBLE: Domain is blocked by may be woken up by an event
173 * or expiring timer
174 * TASK_UNINTERRUPTIBLE: Domain is blocked but may not be woken up by an
175 * arbitrary event or timer.
176 * TASK_WAIT: Domains CPU allocation expired.
177 * TASK_SUSPENDED: Domain is in supsended state (eg. start of day)
178 * TASK_DYING: Domain is about to cross over to the land of the dead.
179 *
180 * If you update these then please update the mapping to text names in
181 * xi_list.
182 */
184 #define TASK_RUNNING 0
185 #define TASK_INTERRUPTIBLE 1
186 #define TASK_UNINTERRUPTIBLE 2
187 #define TASK_WAIT 4
188 #define TASK_SUSPENDED 8
189 #define TASK_DYING 16
191 #define SCHED_YIELD 0x10
193 #include <asm/uaccess.h> /* for KERNEL_DS */
195 #define IDLE0_TASK(_t) \
196 { \
197 processor: 0, \
198 domain: IDLE_DOMAIN_ID, \
199 state: TASK_RUNNING, \
200 has_cpu: 0, \
201 evt: 0xffffffff, \
202 avt: 0xffffffff, \
203 mm: IDLE0_MM, \
204 addr_limit: KERNEL_DS, \
205 active_mm: &idle0_task.mm, \
206 thread: INIT_THREAD, \
207 prev_task: &(_t), \
208 next_task: &(_t) \
209 }
211 extern struct task_struct *idle_task[NR_CPUS];
212 #define IDLE_DOMAIN_ID (~0)
213 #define is_idle_task(_p) ((_p)->domain == IDLE_DOMAIN_ID)
215 #ifndef IDLE0_TASK_SIZE
216 #define IDLE0_TASK_SIZE 2048*sizeof(long)
217 #endif
219 union task_union {
220 struct task_struct task;
221 unsigned long stack[IDLE0_TASK_SIZE/sizeof(long)];
222 };
224 extern union task_union idle0_task_union;
225 extern struct task_struct first_task_struct;
227 extern struct task_struct *do_newdomain(unsigned int dom_id, unsigned int cpu);
228 extern int setup_guestos(
229 struct task_struct *p, dom0_newdomain_t *params, unsigned int num_vifs,
230 char *data_start, unsigned long data_len,
231 char *cmdline, unsigned long initrd_len);
232 extern int final_setup_guestos(struct task_struct *p, dom_meminfo_t *);
234 struct task_struct *find_domain_by_id(unsigned int dom);
235 extern void release_task(struct task_struct *);
236 extern void __kill_domain(struct task_struct *p);
237 extern void kill_domain(void);
238 extern void kill_domain_with_errmsg(const char *err);
239 extern long kill_other_domain(unsigned int dom, int force);
240 extern void stop_domain(void);
241 extern long stop_other_domain(unsigned int dom);
243 /* arch/process.c */
244 void new_thread(struct task_struct *p,
245 unsigned long start_pc,
246 unsigned long start_stack,
247 unsigned long start_info);
248 extern void flush_thread(void);
249 extern void exit_thread(void);
251 /* Linux puts these here for some reason! */
252 extern int request_irq(unsigned int,
253 void (*handler)(int, void *, struct pt_regs *),
254 unsigned long, const char *, void *);
255 extern void free_irq(unsigned int, void *);
257 extern unsigned long wait_init_idle;
258 #define init_idle() clear_bit(smp_processor_id(), &wait_init_idle);
262 /*
263 * Scheduler functions (in schedule.c)
264 */
265 #define set_current_state(_s) do { current->state = (_s); } while (0)
266 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
267 void scheduler_init(void);
268 void schedulers_start(void);
269 void sched_add_domain(struct task_struct *p);
270 void sched_rem_domain(struct task_struct *p);
271 long sched_bvtctl(unsigned long ctx_allow);
272 long sched_adjdom(int dom, unsigned long mcu_adv, unsigned long warp,
273 unsigned long warpl, unsigned long warpu);
274 void init_idle_task(void);
275 int wake_up(struct task_struct *p);
276 long schedule_timeout(long timeout);
277 long do_yield(void);
278 void reschedule(struct task_struct *p);
279 asmlinkage void schedule(void);
282 #define signal_pending(_p) ((_p)->hyp_events || \
283 (_p)->shared_info->events)
285 void domain_init(void);
287 int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
288 void cpu_idle(void); /* Idle loop. */
290 /* This hash table is protected by the tasklist_lock. */
291 #define TASK_HASH_SIZE 256
292 #define TASK_HASH(_id) ((_id)&(TASK_HASH_SIZE-1))
293 struct task_struct *task_hash[TASK_HASH_SIZE];
295 #define REMOVE_LINKS(p) do { \
296 (p)->next_task->prev_task = (p)->prev_task; \
297 (p)->prev_task->next_task = (p)->next_task; \
298 } while (0)
300 #define SET_LINKS(p) do { \
301 (p)->next_task = &idle0_task; \
302 (p)->prev_task = idle0_task.prev_task; \
303 idle0_task.prev_task->next_task = (p); \
304 idle0_task.prev_task = (p); \
305 } while (0)
307 extern void update_process_times(int user);
309 #include <asm/desc.h>
310 static inline void load_LDT(void)
311 {
312 unsigned int cpu;
313 struct desc_struct *desc;
314 unsigned long ents;
316 if ( (ents = current->mm.ldt_ents) == 0 )
317 {
318 __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
319 }
320 else
321 {
322 cpu = smp_processor_id();
323 desc = (struct desc_struct *)GET_GDT_ADDRESS(current) + __LDT(cpu);
324 desc->a = ((LDT_VIRT_START&0xffff)<<16) | (ents*8-1);
325 desc->b = (LDT_VIRT_START&(0xff<<24)) | 0x8200 |
326 ((LDT_VIRT_START&0xff0000)>>16);
327 __asm__ __volatile__ ( "lldt %%ax" : : "a" (__LDT(cpu)<<3) );
328 }
329 }
331 #endif