ia64/xen-unstable

view xen/include/xen/sched.h @ 5288:0c246cd111e1

bitkeeper revision 1.1652 (429f70f9moC9qZBswXttF9sYcHtxTA)

sched.h:
g/c unneeded include.
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Thu Jun 02 20:50:01 2005 +0000 (2005-06-02)
parents 529d5150d163
children 8651a99cdc09
line source
2 #ifndef __SCHED_H__
3 #define __SCHED_H__
5 #include <xen/config.h>
6 #include <xen/types.h>
7 #include <xen/spinlock.h>
8 #include <xen/smp.h>
9 #include <public/xen.h>
10 #include <public/dom0_ops.h>
11 #include <xen/grant_table.h>
12 #include <asm/domain.h>
14 extern unsigned long volatile jiffies;
15 extern rwlock_t domlist_lock;
17 /* A global pointer to the initial domain (DOM0). */
18 extern struct domain *dom0;
20 typedef struct event_channel_st
21 {
22 #define ECS_FREE 0 /* Channel is available for use. */
23 #define ECS_RESERVED 1 /* Channel is reserved. */
24 #define ECS_UNBOUND 2 /* Channel is waiting to bind to a remote domain. */
25 #define ECS_INTERDOMAIN 3 /* Channel is bound to another domain. */
26 #define ECS_PIRQ 4 /* Channel is bound to a physical IRQ line. */
27 #define ECS_VIRQ 5 /* Channel is bound to a virtual IRQ line. */
28 #define ECS_IPI 6 /* Channel is bound to a virtual IPI line. */
29 u16 state;
30 union {
31 struct {
32 domid_t remote_domid;
33 } __attribute__ ((packed)) unbound; /* state == ECS_UNBOUND */
34 struct {
35 u16 remote_port;
36 struct exec_domain *remote_dom;
37 } __attribute__ ((packed)) interdomain; /* state == ECS_INTERDOMAIN */
38 u16 pirq; /* state == ECS_PIRQ */
39 u16 virq; /* state == ECS_VIRQ */
40 u32 ipi_edom; /* state == ECS_IPI */
41 } u;
42 } event_channel_t;
44 int init_event_channels(struct domain *d);
45 void destroy_event_channels(struct domain *d);
46 int init_exec_domain_event_channels(struct exec_domain *ed);
48 #define CPUMAP_RUNANYWHERE 0xFFFFFFFF
50 struct exec_domain
51 {
52 int vcpu_id;
54 int processor;
56 vcpu_info_t *vcpu_info;
58 struct domain *domain;
59 struct exec_domain *next_in_list;
61 struct ac_timer timer; /* one-shot timer for timeout values */
62 unsigned long sleep_tick; /* tick at which this vcpu started sleep */
64 s_time_t lastschd; /* time this domain was last scheduled */
65 s_time_t lastdeschd; /* time this domain was last descheduled */
66 s_time_t cpu_time; /* total CPU time received till now */
67 s_time_t wokenup; /* time domain got woken up */
68 void *sched_priv; /* scheduler-specific data */
70 unsigned long vcpu_flags;
72 u16 virq_to_evtchn[NR_VIRQS];
74 atomic_t pausecnt;
76 cpumap_t cpumap; /* which cpus this domain can run on */
78 struct arch_exec_domain arch;
79 };
81 /* Per-domain lock can be recursively acquired in fault handlers. */
82 #define LOCK_BIGLOCK(_d) spin_lock_recursive(&(_d)->big_lock)
83 #define UNLOCK_BIGLOCK(_d) spin_unlock_recursive(&(_d)->big_lock)
85 struct domain
86 {
87 domid_t domain_id;
89 shared_info_t *shared_info; /* shared data area */
90 spinlock_t time_lock;
92 spinlock_t big_lock;
94 spinlock_t page_alloc_lock; /* protects all the following fields */
95 struct list_head page_list; /* linked list, of size tot_pages */
96 struct list_head xenpage_list; /* linked list, of size xenheap_pages */
97 unsigned int tot_pages; /* number of pages currently possesed */
98 unsigned int max_pages; /* maximum value for tot_pages */
99 unsigned int next_io_page; /* next io pfn to give to domain */
100 unsigned int xenheap_pages; /* # pages allocated from Xen heap */
102 /* Scheduling. */
103 int shutdown_code; /* code value from OS (if DOMF_shutdown) */
104 void *sched_priv; /* scheduler-specific data */
106 struct domain *next_in_list;
107 struct domain *next_in_hashbucket;
109 /* Event channel information. */
110 event_channel_t *event_channel;
111 unsigned int max_event_channel;
112 spinlock_t event_channel_lock;
114 grant_table_t *grant_table;
116 /*
117 * Interrupt to event-channel mappings. Updates should be protected by the
118 * domain's event-channel spinlock. Read accesses can also synchronise on
119 * the lock, but races don't usually matter.
120 */
121 #define NR_PIRQS 128 /* Put this somewhere sane! */
122 u16 pirq_to_evtchn[NR_PIRQS];
123 u32 pirq_mask[NR_PIRQS/32];
125 unsigned long domain_flags;
126 unsigned long vm_assist;
128 atomic_t refcnt;
130 struct exec_domain *exec_domain[MAX_VIRT_CPUS];
132 /* Bitmask of CPUs on which this domain is running. */
133 unsigned long cpuset;
135 struct arch_domain arch;
136 };
138 struct domain_setup_info
139 {
140 /* Initialised by caller. */
141 unsigned long image_addr;
142 unsigned long image_len;
143 /* Initialised by loader: Public. */
144 unsigned long v_start;
145 unsigned long v_end;
146 unsigned long v_kernstart;
147 unsigned long v_kernend;
148 unsigned long v_kernentry;
149 /* Initialised by loader: Private. */
150 unsigned int load_symtab;
151 unsigned long symtab_addr;
152 unsigned long symtab_len;
153 /* Indicate whether it's xen specific image */
154 unsigned int xen_elf_image;
155 };
157 extern struct domain idle0_domain;
158 extern struct exec_domain idle0_exec_domain;
160 extern struct exec_domain *idle_task[NR_CPUS];
161 #define IDLE_DOMAIN_ID (0x7FFFU)
162 #define is_idle_task(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
164 struct exec_domain *alloc_exec_domain_struct(struct domain *d,
165 unsigned long vcpu);
167 void free_domain_struct(struct domain *d);
168 struct domain *alloc_domain_struct();
170 #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
171 #define put_domain(_d) \
172 if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destruct(_d)
174 /*
175 * Use this when you don't have an existing reference to @d. It returns
176 * FALSE if @d is being destructed.
177 */
178 static always_inline int get_domain(struct domain *d)
179 {
180 atomic_t old, new, seen = d->refcnt;
181 do
182 {
183 old = seen;
184 if ( unlikely(_atomic_read(old) & DOMAIN_DESTRUCTED) )
185 return 0;
186 _atomic_set(new, _atomic_read(old) + 1);
187 seen = atomic_compareandswap(old, new, &d->refcnt);
188 }
189 while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
190 return 1;
191 }
193 /*
194 * Use this when you already have, or are borrowing, a reference to @d.
195 * In this case we know that @d cannot be destructed under our feet.
196 */
197 static inline void get_knownalive_domain(struct domain *d)
198 {
199 atomic_inc(&d->refcnt);
200 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
201 }
203 extern struct domain *do_createdomain(
204 domid_t dom_id, unsigned int cpu);
205 extern int construct_dom0(
206 struct domain *d,
207 unsigned long image_start, unsigned long image_len,
208 unsigned long initrd_start, unsigned long initrd_len,
209 char *cmdline);
210 extern int set_info_guest(struct domain *d, dom0_setdomaininfo_t *);
212 struct domain *find_domain_by_id(domid_t dom);
213 extern void domain_destruct(struct domain *d);
214 extern void domain_kill(struct domain *d);
215 extern void domain_shutdown(u8 reason);
217 /*
218 * Mark current domain as crashed. This function returns: the domain is not
219 * synchronously descheduled from any processor.
220 */
221 extern void domain_crash(void);
223 /*
224 * Mark current domain as crashed and synchronously deschedule from the local
225 * processor. This function never returns.
226 */
227 extern void domain_crash_synchronous(void) __attribute__((noreturn));
229 void new_thread(struct exec_domain *d,
230 unsigned long start_pc,
231 unsigned long start_stack,
232 unsigned long start_info);
234 #define set_current_state(_s) do { current->state = (_s); } while (0)
235 void scheduler_init(void);
236 void schedulers_start(void);
237 void sched_add_domain(struct exec_domain *);
238 void sched_rem_domain(struct exec_domain *);
239 long sched_ctl(struct sched_ctl_cmd *);
240 long sched_adjdom(struct sched_adjdom_cmd *);
241 int sched_id();
242 void domain_wake(struct exec_domain *d);
243 void domain_sleep_nosync(struct exec_domain *d);
244 void domain_sleep_sync(struct exec_domain *d);
246 /*
247 * Force loading of currently-executing domain state on the specified set
248 * of CPUs. This is used to counteract lazy state switching where required.
249 */
250 extern void sync_lazy_execstate_cpuset(unsigned long cpuset);
251 extern void sync_lazy_execstate_all(void);
252 extern int __sync_lazy_execstate(void);
254 /* Called by the scheduler to switch to another exec_domain. */
255 extern void context_switch(
256 struct exec_domain *prev,
257 struct exec_domain *next);
259 /* Called by the scheduler to continue running the current exec_domain. */
260 extern void continue_running(
261 struct exec_domain *same);
263 int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
265 void startup_cpu_idle_loop(void);
267 unsigned long __hypercall_create_continuation(
268 unsigned int op, unsigned int nr_args, ...);
269 #define hypercall0_create_continuation(_op) \
270 __hypercall_create_continuation((_op), 0)
271 #define hypercall1_create_continuation(_op, _a1) \
272 __hypercall_create_continuation((_op), 1, \
273 (unsigned long)(_a1))
274 #define hypercall2_create_continuation(_op, _a1, _a2) \
275 __hypercall_create_continuation((_op), 2, \
276 (unsigned long)(_a1), (unsigned long)(_a2))
277 #define hypercall3_create_continuation(_op, _a1, _a2, _a3) \
278 __hypercall_create_continuation((_op), 3, \
279 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3))
280 #define hypercall4_create_continuation(_op, _a1, _a2, _a3, _a4) \
281 __hypercall_create_continuation((_op), 4, \
282 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
283 (unsigned long)(_a4))
284 #define hypercall5_create_continuation(_op, _a1, _a2, _a3, _a4, _a5) \
285 __hypercall_create_continuation((_op), 5, \
286 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
287 (unsigned long)(_a4), (unsigned long)(_a5))
288 #define hypercall6_create_continuation(_op, _a1, _a2, _a3, _a4, _a5, _a6) \
289 __hypercall_create_continuation((_op), 6, \
290 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
291 (unsigned long)(_a4), (unsigned long)(_a5), (unsigned long)(_a6))
293 #define hypercall_preempt_check() (unlikely( \
294 softirq_pending(smp_processor_id()) | \
295 (!!current->vcpu_info->evtchn_upcall_pending & \
296 !current->vcpu_info->evtchn_upcall_mask) \
297 ))
299 /* This domain_hash and domain_list are protected by the domlist_lock. */
300 #define DOMAIN_HASH_SIZE 256
301 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
302 extern struct domain *domain_hash[DOMAIN_HASH_SIZE];
303 extern struct domain *domain_list;
305 #define for_each_domain(_d) \
306 for ( (_d) = domain_list; (_d) != NULL; (_d) = (_d)->next_in_list )
308 #define for_each_exec_domain(_d,_ed) \
309 for ( (_ed) = (_d)->exec_domain[0]; \
310 (_ed) != NULL; \
311 (_ed) = (_ed)->next_in_list )
313 /*
314 * Per-VCPU flags (vcpu_flags).
315 */
316 /* Has the FPU been initialised? */
317 #define _VCPUF_fpu_initialised 0
318 #define VCPUF_fpu_initialised (1UL<<_VCPUF_fpu_initialised)
319 /* Has the FPU been used since it was last saved? */
320 #define _VCPUF_fpu_dirtied 1
321 #define VCPUF_fpu_dirtied (1UL<<_VCPUF_fpu_dirtied)
322 /* Has the guest OS requested 'stts'? */
323 #define _VCPUF_guest_stts 2
324 #define VCPUF_guest_stts (1UL<<_VCPUF_guest_stts)
325 /* Domain is blocked waiting for an event. */
326 #define _VCPUF_blocked 3
327 #define VCPUF_blocked (1UL<<_VCPUF_blocked)
328 /* Domain is paused by controller software. */
329 #define _VCPUF_ctrl_pause 4
330 #define VCPUF_ctrl_pause (1UL<<_VCPUF_ctrl_pause)
331 /* Currently running on a CPU? */
332 #define _VCPUF_running 5
333 #define VCPUF_running (1UL<<_VCPUF_running)
334 /* Disables auto-migration between CPUs. */
335 #define _VCPUF_cpu_pinned 6
336 #define VCPUF_cpu_pinned (1UL<<_VCPUF_cpu_pinned)
337 /* Domain migrated between CPUs. */
338 #define _VCPUF_cpu_migrated 7
339 #define VCPUF_cpu_migrated (1UL<<_VCPUF_cpu_migrated)
340 /* Initialization completed. */
341 #define _VCPUF_initialised 8
342 #define VCPUF_initialised (1UL<<_VCPUF_initialised)
344 /*
345 * Per-domain flags (domain_flags).
346 */
347 /* Has the guest OS been fully built yet? */
348 #define _DOMF_constructed 0
349 #define DOMF_constructed (1UL<<_DOMF_constructed)
350 /* Is this one of the per-CPU idle domains? */
351 #define _DOMF_idle_domain 1
352 #define DOMF_idle_domain (1UL<<_DOMF_idle_domain)
353 /* Is this domain privileged? */
354 #define _DOMF_privileged 2
355 #define DOMF_privileged (1UL<<_DOMF_privileged)
356 /* May this domain do IO to physical devices? */
357 #define _DOMF_physdev_access 3
358 #define DOMF_physdev_access (1UL<<_DOMF_physdev_access)
359 /* Guest shut itself down for some reason. */
360 #define _DOMF_shutdown 4
361 #define DOMF_shutdown (1UL<<_DOMF_shutdown)
362 /* Guest is in process of shutting itself down (becomes DOMF_shutdown). */
363 #define _DOMF_shuttingdown 5
364 #define DOMF_shuttingdown (1UL<<_DOMF_shuttingdown)
365 /* Death rattle. */
366 #define _DOMF_dying 6
367 #define DOMF_dying (1UL<<_DOMF_dying)
369 static inline int domain_runnable(struct exec_domain *ed)
370 {
371 return ( (atomic_read(&ed->pausecnt) == 0) &&
372 !(ed->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause)) &&
373 !(ed->domain->domain_flags & (DOMF_shutdown|DOMF_shuttingdown)) );
374 }
376 void exec_domain_pause(struct exec_domain *ed);
377 void domain_pause(struct domain *d);
378 void exec_domain_unpause(struct exec_domain *ed);
379 void domain_unpause(struct domain *d);
380 void domain_pause_by_systemcontroller(struct domain *d);
381 void domain_unpause_by_systemcontroller(struct domain *d);
383 static inline void exec_domain_unblock(struct exec_domain *ed)
384 {
385 if ( test_and_clear_bit(_VCPUF_blocked, &ed->vcpu_flags) )
386 domain_wake(ed);
387 }
389 #define IS_PRIV(_d) \
390 (test_bit(_DOMF_privileged, &(_d)->domain_flags))
391 #define IS_CAPABLE_PHYSDEV(_d) \
392 (test_bit(_DOMF_physdev_access, &(_d)->domain_flags))
394 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
396 #endif /* __SCHED_H__ */
398 /*
399 * Local variables:
400 * mode: C
401 * c-set-style: "BSD"
402 * c-basic-offset: 4
403 * tab-width: 4
404 * indent-tabs-mode: nil
405 * End:
406 */