ia64/xen-unstable

view xen/include/xen/sched.h @ 4639:31a334290f9c

bitkeeper revision 1.1369 (426927a5No_sHhV62q0TEbxYu-jcHA)

Hypercall preemption check also looks for pending event upcalls to
guest kernel.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Apr 22 16:34:45 2005 +0000 (2005-04-22)
parents ae3b1e86f62d
children e686528abbfc 123bd8c4b408 65b28c74cec2
line source
2 #ifndef __SCHED_H__
3 #define __SCHED_H__
5 #include <xen/config.h>
6 #include <xen/types.h>
7 #include <xen/spinlock.h>
8 #include <xen/cache.h>
9 #include <asm/regs.h>
10 #include <xen/smp.h>
11 #include <asm/page.h>
12 #include <asm/processor.h>
13 #include <public/xen.h>
14 #include <public/dom0_ops.h>
15 #include <xen/list.h>
16 #include <xen/time.h>
17 #include <xen/ac_timer.h>
18 #include <xen/delay.h>
19 #include <asm/atomic.h>
20 #include <asm/current.h>
21 #include <xen/spinlock.h>
22 #include <xen/grant_table.h>
23 #include <asm/hardirq.h>
24 #include <asm/domain.h>
25 #include <asm/bitops.h>
27 extern unsigned long volatile jiffies;
28 extern rwlock_t domlist_lock;
30 /* A global pointer to the initial domain (DOM0). */
31 extern struct domain *dom0;
33 typedef struct event_channel_st
34 {
35 #define ECS_FREE 0 /* Channel is available for use. */
36 #define ECS_RESERVED 1 /* Channel is reserved. */
37 #define ECS_UNBOUND 2 /* Channel is waiting to bind to a remote domain. */
38 #define ECS_INTERDOMAIN 3 /* Channel is bound to another domain. */
39 #define ECS_PIRQ 4 /* Channel is bound to a physical IRQ line. */
40 #define ECS_VIRQ 5 /* Channel is bound to a virtual IRQ line. */
41 #define ECS_IPI 6 /* Channel is bound to a virtual IPI line. */
42 u16 state;
43 union {
44 struct {
45 domid_t remote_domid;
46 } __attribute__ ((packed)) unbound; /* state == ECS_UNBOUND */
47 struct {
48 u16 remote_port;
49 struct exec_domain *remote_dom;
50 } __attribute__ ((packed)) interdomain; /* state == ECS_INTERDOMAIN */
51 u16 pirq; /* state == ECS_PIRQ */
52 u16 virq; /* state == ECS_VIRQ */
53 u32 ipi_edom; /* state == ECS_IPI */
54 } u;
55 } event_channel_t;
57 int init_event_channels(struct domain *d);
58 void destroy_event_channels(struct domain *d);
59 int init_exec_domain_event_channels(struct exec_domain *ed);
61 struct exec_domain
62 {
63 u32 processor;
65 vcpu_info_t *vcpu_info;
67 struct domain *domain;
68 struct exec_domain *ed_next_list;
69 int eid;
71 struct ac_timer timer; /* one-shot timer for timeout values */
72 unsigned long sleep_tick; /* tick at which this vcpu started sleep */
74 s_time_t lastschd; /* time this domain was last scheduled */
75 s_time_t lastdeschd; /* time this domain was last descheduled */
76 s_time_t cpu_time; /* total CPU time received till now */
77 s_time_t wokenup; /* time domain got woken up */
78 void *ed_sched_priv; /* scheduler-specific data */
80 unsigned long ed_flags;
82 u16 virq_to_evtchn[NR_VIRQS];
84 atomic_t pausecnt;
86 struct arch_exec_domain arch;
87 };
89 /* Per-domain lock can be recursively acquired in fault handlers. */
90 #define LOCK_BIGLOCK(_d) spin_lock_recursive(&(_d)->big_lock)
91 #define UNLOCK_BIGLOCK(_d) spin_unlock_recursive(&(_d)->big_lock)
93 struct domain
94 {
95 domid_t id;
97 shared_info_t *shared_info; /* shared data area */
98 spinlock_t time_lock;
100 spinlock_t big_lock;
102 spinlock_t page_alloc_lock; /* protects all the following fields */
103 struct list_head page_list; /* linked list, of size tot_pages */
104 struct list_head xenpage_list; /* linked list, of size xenheap_pages */
105 unsigned int tot_pages; /* number of pages currently possesed */
106 unsigned int max_pages; /* maximum value for tot_pages */
107 unsigned int next_io_page; /* next io pfn to give to domain */
108 unsigned int xenheap_pages; /* # pages allocated from Xen heap */
110 /* Scheduling. */
111 int shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
112 void *sched_priv; /* scheduler-specific data */
114 struct domain *next_list, *next_hash;
116 /* Event channel information. */
117 event_channel_t *event_channel;
118 unsigned int max_event_channel;
119 spinlock_t event_channel_lock;
121 grant_table_t *grant_table;
123 /*
124 * Interrupt to event-channel mappings. Updates should be protected by the
125 * domain's event-channel spinlock. Read accesses can also synchronise on
126 * the lock, but races don't usually matter.
127 */
128 #define NR_PIRQS 128 /* Put this somewhere sane! */
129 u16 pirq_to_evtchn[NR_PIRQS];
130 u32 pirq_mask[NR_PIRQS/32];
132 /* Physical I/O */
133 spinlock_t pcidev_lock;
134 struct list_head pcidev_list;
136 unsigned long d_flags;
137 unsigned long vm_assist;
139 atomic_t refcnt;
141 struct exec_domain *exec_domain[MAX_VIRT_CPUS];
143 /* Bitmask of CPUs on which this domain is running. */
144 unsigned long cpuset;
146 struct arch_domain arch;
147 };
149 struct domain_setup_info
150 {
151 /* Initialised by caller. */
152 unsigned long image_addr;
153 unsigned long image_len;
154 /* Initialised by loader: Public. */
155 unsigned long v_start;
156 unsigned long v_end;
157 unsigned long v_kernstart;
158 unsigned long v_kernend;
159 unsigned long v_kernentry;
160 /* Initialised by loader: Private. */
161 unsigned int load_symtab;
162 unsigned long symtab_addr;
163 unsigned long symtab_len;
164 };
166 #include <asm/uaccess.h> /* for KERNEL_DS */
168 extern struct domain idle0_domain;
169 extern struct exec_domain idle0_exec_domain;
171 extern struct exec_domain *idle_task[NR_CPUS];
172 #define IDLE_DOMAIN_ID (0x7FFFU)
173 #define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->d_flags))
175 struct exec_domain *alloc_exec_domain_struct(struct domain *d,
176 unsigned long vcpu);
178 void free_domain_struct(struct domain *d);
179 struct domain *alloc_domain_struct();
181 #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
182 #define put_domain(_d) \
183 if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destruct(_d)
185 /*
186 * Use this when you don't have an existing reference to @d. It returns
187 * FALSE if @d is being destructed.
188 */
189 static always_inline int get_domain(struct domain *d)
190 {
191 atomic_t old, new, seen = d->refcnt;
192 do
193 {
194 old = seen;
195 if ( unlikely(_atomic_read(old) & DOMAIN_DESTRUCTED) )
196 return 0;
197 _atomic_set(new, _atomic_read(old) + 1);
198 seen = atomic_compareandswap(old, new, &d->refcnt);
199 }
200 while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
201 return 1;
202 }
204 /*
205 * Use this when you already have, or are borrowing, a reference to @d.
206 * In this case we know that @d cannot be destructed under our feet.
207 */
208 static inline void get_knownalive_domain(struct domain *d)
209 {
210 atomic_inc(&d->refcnt);
211 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
212 }
214 extern struct domain *do_createdomain(
215 domid_t dom_id, unsigned int cpu);
216 extern int construct_dom0(
217 struct domain *d,
218 unsigned long image_start, unsigned long image_len,
219 unsigned long initrd_start, unsigned long initrd_len,
220 char *cmdline);
221 extern int set_info_guest(struct domain *d, dom0_setdomaininfo_t *);
223 struct domain *find_domain_by_id(domid_t dom);
224 extern void domain_destruct(struct domain *d);
225 extern void domain_kill(struct domain *d);
226 extern void domain_shutdown(u8 reason);
228 /*
229 * Mark current domain as crashed. This function returns: the domain is not
230 * synchronously descheduled from any processor.
231 */
232 extern void domain_crash(void);
234 /*
235 * Mark current domain as crashed and synchronously deschedule from the local
236 * processor. This function never returns.
237 */
238 extern void domain_crash_synchronous(void) __attribute__((noreturn));
240 void new_thread(struct exec_domain *d,
241 unsigned long start_pc,
242 unsigned long start_stack,
243 unsigned long start_info);
245 extern unsigned long wait_init_idle;
246 #define init_idle() clear_bit(smp_processor_id(), &wait_init_idle);
248 #define set_current_state(_s) do { current->state = (_s); } while (0)
249 void scheduler_init(void);
250 void schedulers_start(void);
251 void sched_add_domain(struct exec_domain *);
252 void sched_rem_domain(struct exec_domain *);
253 long sched_ctl(struct sched_ctl_cmd *);
254 long sched_adjdom(struct sched_adjdom_cmd *);
255 int sched_id();
256 void init_idle_task(void);
257 void domain_wake(struct exec_domain *d);
258 void domain_sleep(struct exec_domain *d);
260 /*
261 * Force loading of currently-executing domain state on the specified set
262 * of CPUs. This is used to counteract lazy state switching where required.
263 */
264 extern void sync_lazy_execstate_cpuset(unsigned long cpuset);
265 extern void sync_lazy_execstate_all(void);
266 extern int __sync_lazy_execstate(void);
268 extern void context_switch(
269 struct exec_domain *prev,
270 struct exec_domain *next);
272 void domain_init(void);
274 int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
276 void startup_cpu_idle_loop(void);
278 unsigned long __hypercall_create_continuation(
279 unsigned int op, unsigned int nr_args, ...);
280 #define hypercall0_create_continuation(_op) \
281 __hypercall_create_continuation((_op), 0)
282 #define hypercall1_create_continuation(_op, _a1) \
283 __hypercall_create_continuation((_op), 1, \
284 (unsigned long)(_a1))
285 #define hypercall2_create_continuation(_op, _a1, _a2) \
286 __hypercall_create_continuation((_op), 2, \
287 (unsigned long)(_a1), (unsigned long)(_a2))
288 #define hypercall3_create_continuation(_op, _a1, _a2, _a3) \
289 __hypercall_create_continuation((_op), 3, \
290 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3))
291 #define hypercall4_create_continuation(_op, _a1, _a2, _a3, _a4) \
292 __hypercall_create_continuation((_op), 4, \
293 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
294 (unsigned long)(_a4))
295 #define hypercall5_create_continuation(_op, _a1, _a2, _a3, _a4, _a5) \
296 __hypercall_create_continuation((_op), 5, \
297 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
298 (unsigned long)(_a4), (unsigned long)(_a5))
299 #define hypercall6_create_continuation(_op, _a1, _a2, _a3, _a4, _a5, _a6) \
300 __hypercall_create_continuation((_op), 6, \
301 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
302 (unsigned long)(_a4), (unsigned long)(_a5), (unsigned long)(_a6))
304 #define hypercall_preempt_check() (unlikely( \
305 softirq_pending(smp_processor_id()) | \
306 (!!current->vcpu_info->evtchn_upcall_pending & \
307 !current->vcpu_info->evtchn_upcall_mask) \
308 ))
310 /* This domain_hash and domain_list are protected by the domlist_lock. */
311 #define DOMAIN_HASH_SIZE 256
312 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
313 extern struct domain *domain_hash[DOMAIN_HASH_SIZE];
314 extern struct domain *domain_list;
316 #define for_each_domain(_d) \
317 for ( (_d) = domain_list; (_d) != NULL; (_d) = (_d)->next_list )
319 #define for_each_exec_domain(_d,_ed) \
320 for ( (_ed) = (_d)->exec_domain[0]; \
321 (_ed) != NULL; \
322 (_ed) = (_ed)->ed_next_list )
324 #define EDF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
325 #define EDF_USEDFPU 1 /* Has this task used the FPU since last save? */
326 #define EDF_GUEST_STTS 2 /* Has the guest OS requested 'stts'? */
327 #define DF_CONSTRUCTED 3 /* Has the guest OS been fully built yet? */
328 #define DF_IDLETASK 4 /* Is this one of the per-CPU idle domains? */
329 #define DF_PRIVILEGED 5 /* Is this domain privileged? */
330 #define DF_PHYSDEV 6 /* May this domain do IO to physical devices? */
331 #define EDF_BLOCKED 7 /* Domain is blocked waiting for an event. */
332 #define EDF_CTRLPAUSE 8 /* Domain is paused by controller software. */
333 #define DF_SHUTDOWN 9 /* Guest shut itself down for some reason. */
334 #define DF_CRASHED 10 /* Domain crashed inside Xen, cannot continue. */
335 #define DF_DYING 11 /* Death rattle. */
336 #define EDF_RUNNING 12 /* Currently running on a CPU. */
337 #define EDF_CPUPINNED 13 /* Disables auto-migration. */
338 #define EDF_MIGRATED 14 /* Domain migrated between CPUs. */
339 #define EDF_DONEINIT 15 /* Initialization completed . */
341 static inline int domain_runnable(struct exec_domain *d)
342 {
343 return ( (atomic_read(&d->pausecnt) == 0) &&
344 !(d->ed_flags & ((1<<EDF_BLOCKED)|(1<<EDF_CTRLPAUSE))) &&
345 !(d->domain->d_flags & ((1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
346 }
348 static inline void exec_domain_pause(struct exec_domain *ed)
349 {
350 ASSERT(ed != current);
351 atomic_inc(&ed->pausecnt);
352 domain_sleep(ed);
353 sync_lazy_execstate_cpuset(ed->domain->cpuset & (1UL << ed->processor));
354 }
356 static inline void domain_pause(struct domain *d)
357 {
358 struct exec_domain *ed;
360 for_each_exec_domain( d, ed )
361 {
362 ASSERT(ed != current);
363 atomic_inc(&ed->pausecnt);
364 domain_sleep(ed);
365 }
367 sync_lazy_execstate_cpuset(d->cpuset);
368 }
370 static inline void exec_domain_unpause(struct exec_domain *ed)
371 {
372 ASSERT(ed != current);
373 if ( atomic_dec_and_test(&ed->pausecnt) )
374 domain_wake(ed);
375 }
377 static inline void domain_unpause(struct domain *d)
378 {
379 struct exec_domain *ed;
381 for_each_exec_domain( d, ed )
382 exec_domain_unpause(ed);
383 }
385 static inline void exec_domain_unblock(struct exec_domain *ed)
386 {
387 if ( test_and_clear_bit(EDF_BLOCKED, &ed->ed_flags) )
388 domain_wake(ed);
389 }
391 static inline void domain_pause_by_systemcontroller(struct domain *d)
392 {
393 struct exec_domain *ed;
395 for_each_exec_domain ( d, ed )
396 {
397 ASSERT(ed != current);
398 if ( !test_and_set_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
399 domain_sleep(ed);
400 }
402 sync_lazy_execstate_cpuset(d->cpuset);
403 }
405 static inline void domain_unpause_by_systemcontroller(struct domain *d)
406 {
407 struct exec_domain *ed;
409 for_each_exec_domain ( d, ed )
410 {
411 if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
412 domain_wake(ed);
413 }
414 }
417 #define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->d_flags))
418 #define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->d_flags))
420 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
422 #include <xen/slab.h>
423 #include <xen/domain.h>
425 #endif /* __SCHED_H__ */
427 /*
428 * Local variables:
429 * mode: C
430 * c-set-style: "BSD"
431 * c-basic-offset: 4
432 * tab-width: 4
433 * indent-tabs-mode: nil
434 * End:
435 */