direct-io.hg

view xen/include/xeno/sched.h @ 394:d1688684d94d

bitkeeper revision 1.184.1.5 (3ead21b5NyCUjG_aeWuPdnlHr3hUNA)

Merge boulderdash.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into boulderdash.cl.cam.ac.uk:/local/scratch/smh22/xeno.bk
author smh22@boulderdash.cl.cam.ac.uk
date Mon Apr 28 12:42:29 2003 +0000 (2003-04-28)
parents 87ad003d8f24 7c2130bbc0f7
children b42c882f9076 47d2a4460ed1 9a33c17a47a6
line source
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
4 #include <xeno/config.h>
5 #include <xeno/types.h>
6 #include <xeno/spinlock.h>
7 #include <asm/page.h>
8 #include <asm/ptrace.h>
9 #include <xeno/smp.h>
10 #include <asm/processor.h>
11 #include <asm/current.h>
12 #include <hypervisor-ifs/hypervisor-if.h>
13 #include <xeno/dom0_ops.h>
15 #include <xeno/list.h>
16 #include <xeno/time.h>
17 #include <xeno/ac_timer.h>
19 extern unsigned long volatile jiffies;
20 extern rwlock_t tasklist_lock;
22 #include <xeno/spinlock.h>
24 struct mm_struct {
25 unsigned long cpu_vm_mask;
26 /*
27 * Every domain has a L1 pagetable of its own. Per-domain mappings
28 * are put in this table (eg. the current GDT is mapped here).
29 */
30 l1_pgentry_t *perdomain_pt;
31 pagetable_t pagetable;
32 /* Current LDT details. */
33 unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
34 /* Next entry is passed to LGDT on domain switch. */
35 char gdt[6];
36 };
38 /* Convenient accessor for mm.gdt. */
39 #define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (_e))
40 #define SET_GDT_ADDRESS(_p, _a) ((*(u32 *)((_p)->mm.gdt + 2)) = (_a))
41 #define GET_GDT_ENTRIES(_p) ((*(u16 *)((_p)->mm.gdt + 0)))
42 #define GET_GDT_ADDRESS(_p) ((*(u32 *)((_p)->mm.gdt + 2)))
44 extern struct mm_struct init_mm;
45 #define IDLE0_MM \
46 { \
47 cpu_vm_mask: 0, \
48 perdomain_pt: 0, \
49 pagetable: mk_pagetable(__pa(idle0_pg_table)) \
50 }
52 #define _HYP_EVENT_NEED_RESCHED 0
53 #define _HYP_EVENT_DIE 1
55 #define PF_DONEFPUINIT 0x1 /* Has the FPU been initialised for this task? */
56 #define PF_USEDFPU 0x2 /* Has this task used the FPU since last save? */
57 #define PF_GUEST_STTS 0x4 /* Has the guest OS requested 'stts'? */
59 #include <xeno/vif.h>
60 #include <xeno/block.h>
61 #include <xeno/segment.h>
63 struct task_struct {
65 /*
66 * DO NOT CHANGE THE ORDER OF THE FOLLOWING.
67 * Their offsets are hardcoded in entry.S
68 */
70 int processor; /* 00: current processor */
71 int state; /* 04: current run state */
72 int hyp_events; /* 08: pending intra-Xen events */
73 unsigned int domain; /* 12: domain id */
75 // SMH: replace below when have explicit 'priv' flag or bitmask
76 #define IS_PRIV(_p) ((_p)->domain == 0)
79 /* An unsafe pointer into a shared data area. */
80 shared_info_t *shared_info; /* 16: shared data area */
82 /*
83 * Return vectors pushed to us by guest OS.
84 * The stack frame for events is exactly that of an x86 hardware interrupt.
85 * The stack frame for a failsafe callback is augmented with saved values
86 * for segment registers %ds, %es, %fs and %gs:
87 * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
88 */
89 unsigned long event_selector; /* 20: entry CS */
90 unsigned long event_address; /* 24: entry EIP */
91 unsigned long failsafe_selector; /* 28: entry CS */
92 unsigned long failsafe_address; /* 32: entry EIP */
94 /*
95 * From here on things can be added and shuffled without special attention
96 */
98 struct list_head pg_head;
99 unsigned int tot_pages; /* number of pages currently possesed */
100 unsigned int max_pages; /* max number of pages that can be possesed */
102 /* scheduling */
103 struct list_head run_list; /* the run list */
104 int has_cpu;
105 int policy;
106 int counter;
108 struct ac_timer blt; /* blocked timeout */
110 s_time_t lastschd; /* time this domain was last scheduled */
111 s_time_t cpu_time; /* total CPU time received till now */
112 s_time_t wokenup; /* time domain got woken up */
114 unsigned long mcu_advance; /* inverse of weight */
115 s32 avt; /* actual virtual time */
116 s32 evt; /* effective virtual time */
117 int warpback; /* warp? */
118 long warp; /* virtual time warp */
119 long warpl; /* warp limit */
120 long warpu; /* unwarp time requirement */
121 s_time_t warped; /* time it ran warped last time */
122 s_time_t uwarped; /* time it ran unwarped last time */
124 /* Network I/O */
125 net_vif_t *net_vif_list[MAX_DOMAIN_VIFS];
127 /* Block I/O */
128 blk_ring_t *blk_ring_base;
129 unsigned int blk_req_cons; /* request consumer */
130 unsigned int blk_resp_prod; /* (private version of) response producer */
131 struct list_head blkdev_list;
132 spinlock_t blk_ring_lock;
133 segment_t *segment_list[XEN_MAX_SEGMENTS]; /* vhd */
134 int segment_count;
136 /* VM */
137 struct mm_struct mm;
138 /* We need this lock to check page types and frob reference counts. */
139 spinlock_t page_lock;
141 mm_segment_t addr_limit; /* thread address space:
142 0-0xBFFFFFFF for user-thead
143 0-0xFFFFFFFF for kernel-thread
144 */
146 /*
147 * active_mm stays for now. It's entangled in the tricky TLB flushing
148 * stuff which I haven't addressed yet. It stays until I'm man enough
149 * to venture in.
150 */
151 struct mm_struct *active_mm;
152 struct thread_struct thread;
153 struct task_struct *prev_task, *next_task, *next_hash;
155 unsigned long flags;
157 atomic_t refcnt;
158 };
160 /*
161 * domain states
162 * TASK_RUNNING: Domain is runable and should be on a run queue
163 * TASK_INTERRUPTIBLE: Domain is blocked by may be woken up by an event
164 * or expiring timer
165 * TASK_UNINTERRUPTIBLE: Domain is blocked but may not be woken up by an
166 * arbitrary event or timer.
167 * TASK_WAIT: Domains CPU allocation expired.
168 * TASK_SUSPENDED: Domain is in supsended state (eg. start of day)
169 * TASK_DYING: Domain is about to cross over to the land of the dead.
170 */
172 #define TASK_RUNNING 0
173 #define TASK_INTERRUPTIBLE 1
174 #define TASK_UNINTERRUPTIBLE 2
175 #define TASK_WAIT 4
176 #define TASK_SUSPENDED 8
177 #define TASK_DYING 16
179 #define SCHED_YIELD 0x10
181 #include <asm/uaccess.h> /* for KERNEL_DS */
183 #define IDLE0_TASK(_t) \
184 { \
185 processor: 0, \
186 domain: IDLE_DOMAIN_ID, \
187 state: TASK_RUNNING, \
188 has_cpu: 0, \
189 evt: 0xffffffff, \
190 avt: 0xffffffff, \
191 mm: IDLE0_MM, \
192 addr_limit: KERNEL_DS, \
193 active_mm: &idle0_task.mm, \
194 thread: INIT_THREAD, \
195 prev_task: &(_t), \
196 next_task: &(_t) \
197 }
199 extern struct task_struct *idle_task[NR_CPUS];
200 #define IDLE_DOMAIN_ID (~0)
201 #define is_idle_task(_p) ((_p)->domain == IDLE_DOMAIN_ID)
203 #ifndef IDLE0_TASK_SIZE
204 #define IDLE0_TASK_SIZE 2048*sizeof(long)
205 #endif
207 union task_union {
208 struct task_struct task;
209 unsigned long stack[IDLE0_TASK_SIZE/sizeof(long)];
210 };
212 extern union task_union idle0_task_union;
213 extern struct task_struct first_task_struct;
215 extern struct task_struct *do_newdomain(unsigned int dom_id, unsigned int cpu);
216 extern int setup_guestos(
217 struct task_struct *p, dom0_newdomain_t *params,
218 char *data_start, unsigned long data_len,
219 char *cmdline, unsigned long initrd_len);
220 extern int final_setup_guestos(struct task_struct *p, dom_meminfo_t *);
222 struct task_struct *find_domain_by_id(unsigned int dom);
223 extern void release_task(struct task_struct *);
224 extern void __kill_domain(struct task_struct *p);
225 extern void kill_domain(void);
226 extern void kill_domain_with_errmsg(const char *err);
227 extern long kill_other_domain(unsigned int dom, int force);
229 /* arch/process.c */
230 void new_thread(struct task_struct *p,
231 unsigned long start_pc,
232 unsigned long start_stack,
233 unsigned long start_info);
234 extern void flush_thread(void);
235 extern void exit_thread(void);
237 /* Linux puts these here for some reason! */
238 extern int request_irq(unsigned int,
239 void (*handler)(int, void *, struct pt_regs *),
240 unsigned long, const char *, void *);
241 extern void free_irq(unsigned int, void *);
243 extern unsigned long wait_init_idle;
244 #define init_idle() clear_bit(smp_processor_id(), &wait_init_idle);
248 /*
249 * Scheduler functions (in schedule.c)
250 */
251 #define set_current_state(_s) do { current->state = (_s); } while (0)
252 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
253 void scheduler_init(void);
254 void schedulers_start(void);
255 void sched_add_domain(struct task_struct *p);
256 void sched_rem_domain(struct task_struct *p);
257 long sched_bvtctl(unsigned long ctx_allow);
258 long sched_adjdom(int dom, unsigned long mcu_adv, unsigned long warp,
259 unsigned long warpl, unsigned long warpu);
260 int wake_up(struct task_struct *p);
261 long schedule_timeout(long timeout);
262 long do_yield(void);
263 void reschedule(struct task_struct *p);
264 asmlinkage void schedule(void);
267 #define signal_pending(_p) ((_p)->hyp_events || \
268 (_p)->shared_info->events)
270 void domain_init(void);
272 int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
273 void cpu_idle(void); /* Idle loop. */
275 /* This hash table is protected by the tasklist_lock. */
276 #define TASK_HASH_SIZE 256
277 #define TASK_HASH(_id) ((_id)&(TASK_HASH_SIZE-1))
278 struct task_struct *task_hash[TASK_HASH_SIZE];
280 #define REMOVE_LINKS(p) do { \
281 (p)->next_task->prev_task = (p)->prev_task; \
282 (p)->prev_task->next_task = (p)->next_task; \
283 } while (0)
285 #define SET_LINKS(p) do { \
286 (p)->next_task = &idle0_task; \
287 (p)->prev_task = idle0_task.prev_task; \
288 idle0_task.prev_task->next_task = (p); \
289 idle0_task.prev_task = (p); \
290 } while (0)
292 extern void update_process_times(int user);
294 #include <asm/desc.h>
295 static inline void load_LDT(void)
296 {
297 unsigned int cpu;
298 struct desc_struct *desc;
299 unsigned long ents;
301 if ( (ents = current->mm.ldt_ents) == 0 )
302 {
303 __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
304 }
305 else
306 {
307 cpu = smp_processor_id();
308 desc = (struct desc_struct *)GET_GDT_ADDRESS(current) + __LDT(cpu);
309 desc->a = ((LDT_VIRT_START&0xffff)<<16) | (ents*8-1);
310 desc->b = (LDT_VIRT_START&(0xff<<24)) | 0x8200 |
311 ((LDT_VIRT_START&0xff0000)>>16);
312 __asm__ __volatile__ ( "lldt %%ax" : : "a" (__LDT(cpu)<<3) );
313 }
314 }
316 #endif