direct-io.hg

view xen/include/xen/sched.h @ 8500:dd5649730b32

Fix a couple of bogus dom0_op names:
setdomaininfo -> setvcpucontext
pincpudomain -> setvcpuaffinity

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jan 06 12:53:19 2006 +0100 (2006-01-06)
parents 84cf56328ce0
children 3eeabf448f91
line source
2 #ifndef __SCHED_H__
3 #define __SCHED_H__
5 #include <xen/config.h>
6 #include <xen/types.h>
7 #include <xen/spinlock.h>
8 #include <xen/smp.h>
9 #include <public/xen.h>
10 #include <public/dom0_ops.h>
11 #include <xen/time.h>
12 #include <xen/ac_timer.h>
13 #include <xen/grant_table.h>
14 #include <xen/rangeset.h>
15 #include <asm/domain.h>
17 extern unsigned long volatile jiffies;
18 extern rwlock_t domlist_lock;
20 /* A global pointer to the initial domain (DOM0). */
21 extern struct domain *dom0;
23 #define MAX_EVTCHNS NR_EVENT_CHANNELS
24 #define EVTCHNS_PER_BUCKET 128
25 #define NR_EVTCHN_BUCKETS (MAX_EVTCHNS / EVTCHNS_PER_BUCKET)
27 struct evtchn
28 {
29 #define ECS_FREE 0 /* Channel is available for use. */
30 #define ECS_RESERVED 1 /* Channel is reserved. */
31 #define ECS_UNBOUND 2 /* Channel is waiting to bind to a remote domain. */
32 #define ECS_INTERDOMAIN 3 /* Channel is bound to another domain. */
33 #define ECS_PIRQ 4 /* Channel is bound to a physical IRQ line. */
34 #define ECS_VIRQ 5 /* Channel is bound to a virtual IRQ line. */
35 #define ECS_IPI 6 /* Channel is bound to a virtual IPI line. */
36 u16 state; /* ECS_* */
37 u16 notify_vcpu_id; /* VCPU for local delivery notification */
38 union {
39 struct {
40 domid_t remote_domid;
41 } unbound; /* state == ECS_UNBOUND */
42 struct {
43 u16 remote_port;
44 struct domain *remote_dom;
45 } interdomain; /* state == ECS_INTERDOMAIN */
46 u16 pirq; /* state == ECS_PIRQ */
47 u16 virq; /* state == ECS_VIRQ */
48 } u;
49 };
51 int evtchn_init(struct domain *d);
52 void evtchn_destroy(struct domain *d);
54 struct vcpu
55 {
56 int vcpu_id;
58 int processor;
60 vcpu_info_t *vcpu_info;
62 struct domain *domain;
64 struct vcpu *next_in_list;
66 struct ac_timer timer; /* one-shot timer for timeout values */
67 unsigned long sleep_tick; /* tick at which this vcpu started sleep */
69 s_time_t lastschd; /* time this domain was last scheduled */
70 s_time_t lastdeschd; /* time this domain was last descheduled */
71 s_time_t cpu_time; /* total CPU time received till now */
72 s_time_t wokenup; /* time domain got woken up */
73 void *sched_priv; /* scheduler-specific data */
75 unsigned long vcpu_flags;
77 u16 virq_to_evtchn[NR_VIRQS];
79 atomic_t pausecnt;
81 cpumask_t cpu_affinity;
83 struct arch_vcpu arch;
84 };
86 /* Per-domain lock can be recursively acquired in fault handlers. */
87 #define LOCK_BIGLOCK(_d) spin_lock_recursive(&(_d)->big_lock)
88 #define UNLOCK_BIGLOCK(_d) spin_unlock_recursive(&(_d)->big_lock)
90 struct domain
91 {
92 domid_t domain_id;
94 shared_info_t *shared_info; /* shared data area */
96 spinlock_t big_lock;
98 spinlock_t page_alloc_lock; /* protects all the following fields */
99 struct list_head page_list; /* linked list, of size tot_pages */
100 struct list_head xenpage_list; /* linked list, of size xenheap_pages */
101 unsigned int tot_pages; /* number of pages currently possesed */
102 unsigned int max_pages; /* maximum value for tot_pages */
103 unsigned int xenheap_pages; /* # pages allocated from Xen heap */
105 /* Scheduling. */
106 int shutdown_code; /* code value from OS (if DOMF_shutdown) */
107 void *sched_priv; /* scheduler-specific data */
109 struct domain *next_in_list;
110 struct domain *next_in_hashbucket;
112 struct list_head rangesets;
113 spinlock_t rangesets_lock;
115 /* Event channel information. */
116 struct evtchn *evtchn[NR_EVTCHN_BUCKETS];
117 spinlock_t evtchn_lock;
119 grant_table_t *grant_table;
121 /*
122 * Interrupt to event-channel mappings. Updates should be protected by the
123 * domain's event-channel spinlock. Read accesses can also synchronise on
124 * the lock, but races don't usually matter.
125 */
126 #define NR_PIRQS 256 /* Put this somewhere sane! */
127 u16 pirq_to_evtchn[NR_PIRQS];
128 u32 pirq_mask[NR_PIRQS/32];
130 /* I/O capabilities (access to IRQs and memory-mapped I/O). */
131 struct rangeset *iomem_caps;
132 struct rangeset *irq_caps;
134 unsigned long domain_flags;
135 unsigned long vm_assist;
137 atomic_t refcnt;
139 struct vcpu *vcpu[MAX_VIRT_CPUS];
141 /* Bitmask of CPUs which are holding onto this domain's state. */
142 cpumask_t cpumask;
144 struct arch_domain arch;
146 void *ssid; /* sHype security subject identifier */
148 /* Control-plane tools handle for this domain. */
149 xen_domain_handle_t handle;
150 };
152 struct domain_setup_info
153 {
154 /* Initialised by caller. */
155 unsigned long image_addr;
156 unsigned long image_len;
157 /* Initialised by loader: Public. */
158 unsigned long v_start;
159 unsigned long v_end;
160 unsigned long v_kernstart;
161 unsigned long v_kernend;
162 unsigned long v_kernentry;
163 /* Initialised by loader: Private. */
164 unsigned int load_symtab;
165 unsigned long symtab_addr;
166 unsigned long symtab_len;
167 /* Indicate whether it's xen specific image */
168 char *xen_section_string;
169 };
171 extern struct domain idle0_domain;
172 extern struct vcpu idle0_vcpu;
174 extern struct vcpu *idle_domain[NR_CPUS];
175 #define IDLE_DOMAIN_ID (0x7FFFU)
176 #define is_idle_domain(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
178 struct vcpu *alloc_vcpu(
179 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
181 struct domain *alloc_domain(void);
182 void free_domain(struct domain *d);
184 #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
185 #define put_domain(_d) \
186 if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destruct(_d)
188 /*
189 * Use this when you don't have an existing reference to @d. It returns
190 * FALSE if @d is being destructed.
191 */
192 static always_inline int get_domain(struct domain *d)
193 {
194 atomic_t old, new, seen = d->refcnt;
195 do
196 {
197 old = seen;
198 if ( unlikely(_atomic_read(old) & DOMAIN_DESTRUCTED) )
199 return 0;
200 _atomic_set(new, _atomic_read(old) + 1);
201 seen = atomic_compareandswap(old, new, &d->refcnt);
202 }
203 while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
204 return 1;
205 }
207 /*
208 * Use this when you already have, or are borrowing, a reference to @d.
209 * In this case we know that @d cannot be destructed under our feet.
210 */
211 static inline void get_knownalive_domain(struct domain *d)
212 {
213 atomic_inc(&d->refcnt);
214 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
215 }
217 extern struct domain *do_createdomain(
218 domid_t dom_id, unsigned int cpu);
219 extern int construct_dom0(
220 struct domain *d,
221 unsigned long image_start, unsigned long image_len,
222 unsigned long initrd_start, unsigned long initrd_len,
223 char *cmdline);
224 extern int set_info_guest(struct domain *d, dom0_setvcpucontext_t *);
226 struct domain *find_domain_by_id(domid_t dom);
227 extern void domain_destruct(struct domain *d);
228 extern void domain_kill(struct domain *d);
229 extern void domain_shutdown(struct domain *d, u8 reason);
230 extern void domain_pause_for_debugger(void);
232 /*
233 * Mark specified domain as crashed. This function always returns, even if the
234 * caller is the specified domain. The domain is not synchronously descheduled
235 * from any processor.
236 */
237 extern void domain_crash(struct domain *d);
239 /*
240 * Mark current domain as crashed and synchronously deschedule from the local
241 * processor. This function never returns.
242 */
243 extern void domain_crash_synchronous(void) __attribute__((noreturn));
245 void new_thread(struct vcpu *d,
246 unsigned long start_pc,
247 unsigned long start_stack,
248 unsigned long start_info);
250 #define set_current_state(_s) do { current->state = (_s); } while (0)
251 void scheduler_init(void);
252 void schedulers_start(void);
253 void sched_add_domain(struct vcpu *);
254 void sched_rem_domain(struct vcpu *);
255 long sched_ctl(struct sched_ctl_cmd *);
256 long sched_adjdom(struct sched_adjdom_cmd *);
257 int sched_id();
258 void vcpu_wake(struct vcpu *d);
259 void vcpu_sleep_nosync(struct vcpu *d);
260 void vcpu_sleep_sync(struct vcpu *d);
262 /*
263 * Force synchronisation of given VCPU's state. If it is currently descheduled,
264 * this call will ensure that all its state is committed to memory and that
265 * no CPU is using critical state (e.g., page tables) belonging to the VCPU.
266 */
267 extern void sync_vcpu_execstate(struct vcpu *v);
269 /*
270 * Called by the scheduler to switch to another VCPU. On entry, although
271 * VCPUF_running is no longer asserted for @prev, its context is still running
272 * on the local CPU and is not committed to memory. The local scheduler lock
273 * is therefore still held, and interrupts are disabled, because the local CPU
274 * is in an inconsistent state.
275 *
276 * The callee must ensure that the local CPU is no longer running in @prev's
277 * context, and that the context is saved to memory, before returning.
278 * Alternatively, if implementing lazy context switching, it suffices to ensure
279 * that invoking sync_vcpu_execstate() will switch and commit @prev's state.
280 */
281 extern void context_switch(
282 struct vcpu *prev,
283 struct vcpu *next);
285 /*
286 * On some architectures (notably x86) it is not possible to entirely load
287 * @next's context with interrupts disabled. These may implement a function to
288 * finalise loading the new context after interrupts are re-enabled. This
289 * function is not given @prev and is not permitted to access it.
290 */
291 extern void context_switch_finalise(
292 struct vcpu *next);
294 /* Called by the scheduler to continue running the current VCPU. */
295 extern void continue_running(
296 struct vcpu *same);
298 /* Is CPU 'cpu' idle right now? */
299 int idle_cpu(int cpu);
301 void startup_cpu_idle_loop(void);
303 unsigned long __hypercall_create_continuation(
304 unsigned int op, unsigned int nr_args, ...);
305 #define hypercall0_create_continuation(_op) \
306 __hypercall_create_continuation((_op), 0)
307 #define hypercall1_create_continuation(_op, _a1) \
308 __hypercall_create_continuation((_op), 1, \
309 (unsigned long)(_a1))
310 #define hypercall2_create_continuation(_op, _a1, _a2) \
311 __hypercall_create_continuation((_op), 2, \
312 (unsigned long)(_a1), (unsigned long)(_a2))
313 #define hypercall3_create_continuation(_op, _a1, _a2, _a3) \
314 __hypercall_create_continuation((_op), 3, \
315 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3))
316 #define hypercall4_create_continuation(_op, _a1, _a2, _a3, _a4) \
317 __hypercall_create_continuation((_op), 4, \
318 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
319 (unsigned long)(_a4))
320 #define hypercall5_create_continuation(_op, _a1, _a2, _a3, _a4, _a5) \
321 __hypercall_create_continuation((_op), 5, \
322 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
323 (unsigned long)(_a4), (unsigned long)(_a5))
324 #define hypercall6_create_continuation(_op, _a1, _a2, _a3, _a4, _a5, _a6) \
325 __hypercall_create_continuation((_op), 6, \
326 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
327 (unsigned long)(_a4), (unsigned long)(_a5), (unsigned long)(_a6))
329 #define hypercall_preempt_check() (unlikely( \
330 softirq_pending(smp_processor_id()) | \
331 event_pending(current) \
332 ))
334 /* This domain_hash and domain_list are protected by the domlist_lock. */
335 #define DOMAIN_HASH_SIZE 256
336 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
337 extern struct domain *domain_hash[DOMAIN_HASH_SIZE];
338 extern struct domain *domain_list;
340 #define for_each_domain(_d) \
341 for ( (_d) = domain_list; \
342 (_d) != NULL; \
343 (_d) = (_d)->next_in_list )
345 #define for_each_vcpu(_d,_v) \
346 for ( (_v) = (_d)->vcpu[0]; \
347 (_v) != NULL; \
348 (_v) = (_v)->next_in_list )
350 /*
351 * Per-VCPU flags (vcpu_flags).
352 */
353 /* Has the FPU been initialised? */
354 #define _VCPUF_fpu_initialised 0
355 #define VCPUF_fpu_initialised (1UL<<_VCPUF_fpu_initialised)
356 /* Has the FPU been used since it was last saved? */
357 #define _VCPUF_fpu_dirtied 1
358 #define VCPUF_fpu_dirtied (1UL<<_VCPUF_fpu_dirtied)
359 /* Domain is blocked waiting for an event. */
360 #define _VCPUF_blocked 2
361 #define VCPUF_blocked (1UL<<_VCPUF_blocked)
362 /* Currently running on a CPU? */
363 #define _VCPUF_running 3
364 #define VCPUF_running (1UL<<_VCPUF_running)
365 /* Domain migrated between CPUs. */
366 #define _VCPUF_cpu_migrated 4
367 #define VCPUF_cpu_migrated (1UL<<_VCPUF_cpu_migrated)
368 /* Initialization completed. */
369 #define _VCPUF_initialised 5
370 #define VCPUF_initialised (1UL<<_VCPUF_initialised)
371 /* VCPU is not-runnable */
372 #define _VCPUF_down 6
373 #define VCPUF_down (1UL<<_VCPUF_down)
375 /*
376 * Per-domain flags (domain_flags).
377 */
378 /* Is this one of the per-CPU idle domains? */
379 #define _DOMF_idle_domain 0
380 #define DOMF_idle_domain (1UL<<_DOMF_idle_domain)
381 /* Is this domain privileged? */
382 #define _DOMF_privileged 1
383 #define DOMF_privileged (1UL<<_DOMF_privileged)
384 /* Guest shut itself down for some reason. */
385 #define _DOMF_shutdown 2
386 #define DOMF_shutdown (1UL<<_DOMF_shutdown)
387 /* Guest is in process of shutting itself down (becomes DOMF_shutdown). */
388 #define _DOMF_shuttingdown 3
389 #define DOMF_shuttingdown (1UL<<_DOMF_shuttingdown)
390 /* Death rattle. */
391 #define _DOMF_dying 4
392 #define DOMF_dying (1UL<<_DOMF_dying)
393 /* Domain is paused by controller software. */
394 #define _DOMF_ctrl_pause 5
395 #define DOMF_ctrl_pause (1UL<<_DOMF_ctrl_pause)
396 /* Domain is being debugged by controller software. */
397 #define _DOMF_debugging 6
398 #define DOMF_debugging (1UL<<_DOMF_debugging)
401 static inline int domain_runnable(struct vcpu *v)
402 {
403 return ( (atomic_read(&v->pausecnt) == 0) &&
404 !(v->vcpu_flags & (VCPUF_blocked|VCPUF_down)) &&
405 !(v->domain->domain_flags &
406 (DOMF_shutdown|DOMF_shuttingdown|DOMF_ctrl_pause)) );
407 }
409 void vcpu_pause(struct vcpu *v);
410 void domain_pause(struct domain *d);
411 void vcpu_unpause(struct vcpu *v);
412 void domain_unpause(struct domain *d);
413 void domain_pause_by_systemcontroller(struct domain *d);
414 void domain_unpause_by_systemcontroller(struct domain *d);
415 void cpu_init(void);
417 static inline void vcpu_unblock(struct vcpu *v)
418 {
419 if ( test_and_clear_bit(_VCPUF_blocked, &v->vcpu_flags) )
420 vcpu_wake(v);
421 }
423 #define IS_PRIV(_d) \
424 (test_bit(_DOMF_privileged, &(_d)->domain_flags))
426 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
428 #endif /* __SCHED_H__ */
430 /*
431 * Local variables:
432 * mode: C
433 * c-set-style: "BSD"
434 * c-basic-offset: 4
435 * tab-width: 4
436 * indent-tabs-mode: nil
437 * End:
438 */