direct-io.hg

view xen/include/xen/sched.h @ 8686:c0a0f4db5ab1

Create a block of reserved PFNs in shadow translate mode guests, and
move the shared info and grant table pfns into that block. This
allows us to remove the get_gnttablist dom0 op, and simplifies the
domain creation code slightly. Having the reserved block managed by
Xen may also make it slightly easier to handle the case where the
grant table needs to be extended at run time.

Suggested-by: kaf24
Signed-off-by: Steven Smith, sos22@cam.ac.uk
author sos22@douglas.cl.cam.ac.uk
date Thu Jan 26 19:40:13 2006 +0100 (2006-01-26)
parents c9362a31ba5d
children 1db05e589fa0
line source
2 #ifndef __SCHED_H__
3 #define __SCHED_H__
5 #include <xen/config.h>
6 #include <xen/types.h>
7 #include <xen/spinlock.h>
8 #include <xen/smp.h>
9 #include <public/xen.h>
10 #include <public/dom0_ops.h>
11 #include <xen/time.h>
12 #include <xen/timer.h>
13 #include <xen/grant_table.h>
14 #include <xen/rangeset.h>
15 #include <asm/domain.h>
17 extern unsigned long volatile jiffies;
18 extern rwlock_t domlist_lock;
20 /* A global pointer to the initial domain (DOM0). */
21 extern struct domain *dom0;
23 #define MAX_EVTCHNS NR_EVENT_CHANNELS
24 #define EVTCHNS_PER_BUCKET 128
25 #define NR_EVTCHN_BUCKETS (MAX_EVTCHNS / EVTCHNS_PER_BUCKET)
27 struct evtchn
28 {
29 #define ECS_FREE 0 /* Channel is available for use. */
30 #define ECS_RESERVED 1 /* Channel is reserved. */
31 #define ECS_UNBOUND 2 /* Channel is waiting to bind to a remote domain. */
32 #define ECS_INTERDOMAIN 3 /* Channel is bound to another domain. */
33 #define ECS_PIRQ 4 /* Channel is bound to a physical IRQ line. */
34 #define ECS_VIRQ 5 /* Channel is bound to a virtual IRQ line. */
35 #define ECS_IPI 6 /* Channel is bound to a virtual IPI line. */
36 u16 state; /* ECS_* */
37 u16 notify_vcpu_id; /* VCPU for local delivery notification */
38 union {
39 struct {
40 domid_t remote_domid;
41 } unbound; /* state == ECS_UNBOUND */
42 struct {
43 u16 remote_port;
44 struct domain *remote_dom;
45 } interdomain; /* state == ECS_INTERDOMAIN */
46 u16 pirq; /* state == ECS_PIRQ */
47 u16 virq; /* state == ECS_VIRQ */
48 } u;
49 };
51 int evtchn_init(struct domain *d);
52 void evtchn_destroy(struct domain *d);
54 struct vcpu
55 {
56 int vcpu_id;
58 int processor;
60 vcpu_info_t *vcpu_info;
62 struct domain *domain;
64 struct vcpu *next_in_list;
66 struct timer timer; /* one-shot timer for timeout values */
67 unsigned long sleep_tick; /* tick at which this vcpu started sleep */
69 s_time_t lastschd; /* time this domain was last scheduled */
70 s_time_t lastdeschd; /* time this domain was last descheduled */
71 s_time_t cpu_time; /* total CPU time received till now */
72 s_time_t wokenup; /* time domain got woken up */
73 void *sched_priv; /* scheduler-specific data */
75 unsigned long vcpu_flags;
77 u16 virq_to_evtchn[NR_VIRQS];
79 atomic_t pausecnt;
81 /* Bitmask of CPUs on which this VCPU may run. */
82 cpumask_t cpu_affinity;
84 unsigned long nmi_addr; /* NMI callback address. */
86 /* Bitmask of CPUs which are holding onto this VCPU's state. */
87 cpumask_t vcpu_dirty_cpumask;
89 struct arch_vcpu arch;
90 };
92 /* Per-domain lock can be recursively acquired in fault handlers. */
93 #define LOCK_BIGLOCK(_d) spin_lock_recursive(&(_d)->big_lock)
94 #define UNLOCK_BIGLOCK(_d) spin_unlock_recursive(&(_d)->big_lock)
96 struct domain
97 {
98 domid_t domain_id;
100 shared_info_t *shared_info; /* shared data area */
102 spinlock_t big_lock;
104 spinlock_t page_alloc_lock; /* protects all the following fields */
105 struct list_head page_list; /* linked list, of size tot_pages */
106 struct list_head xenpage_list; /* linked list, of size xenheap_pages */
107 unsigned int tot_pages; /* number of pages currently possesed */
108 unsigned int max_pages; /* maximum value for tot_pages */
109 unsigned int xenheap_pages; /* # pages allocated from Xen heap */
111 /* Scheduling. */
112 int shutdown_code; /* code value from OS (if DOMF_shutdown) */
113 void *sched_priv; /* scheduler-specific data */
115 struct domain *next_in_list;
116 struct domain *next_in_hashbucket;
118 struct list_head rangesets;
119 spinlock_t rangesets_lock;
121 /* Event channel information. */
122 struct evtchn *evtchn[NR_EVTCHN_BUCKETS];
123 spinlock_t evtchn_lock;
125 grant_table_t *grant_table;
127 /*
128 * Interrupt to event-channel mappings. Updates should be protected by the
129 * domain's event-channel spinlock. Read accesses can also synchronise on
130 * the lock, but races don't usually matter.
131 */
132 #define NR_PIRQS 256 /* Put this somewhere sane! */
133 u16 pirq_to_evtchn[NR_PIRQS];
134 u32 pirq_mask[NR_PIRQS/32];
136 /* I/O capabilities (access to IRQs and memory-mapped I/O). */
137 struct rangeset *iomem_caps;
138 struct rangeset *irq_caps;
140 unsigned long domain_flags;
141 unsigned long vm_assist;
143 atomic_t refcnt;
145 struct vcpu *vcpu[MAX_VIRT_CPUS];
147 /* Bitmask of CPUs which are holding onto this domain's state. */
148 cpumask_t domain_dirty_cpumask;
150 struct arch_domain arch;
152 void *ssid; /* sHype security subject identifier */
154 /* Control-plane tools handle for this domain. */
155 xen_domain_handle_t handle;
157 /* Start of the PFN hole */
158 unsigned long start_pfn_hole;
159 };
161 struct domain_setup_info
162 {
163 /* Initialised by caller. */
164 unsigned long image_addr;
165 unsigned long image_len;
166 /* Initialised by loader: Public. */
167 unsigned long v_start;
168 unsigned long v_end;
169 unsigned long v_kernstart;
170 unsigned long v_kernend;
171 unsigned long v_kernentry;
172 /* Initialised by loader: Private. */
173 unsigned int load_symtab;
174 unsigned long symtab_addr;
175 unsigned long symtab_len;
176 /* Indicate whether it's xen specific image */
177 char *xen_section_string;
178 };
180 extern struct vcpu *idle_vcpu[NR_CPUS];
181 #define IDLE_DOMAIN_ID (0x7FFFU)
182 #define is_idle_domain(d) ((d)->domain_id == IDLE_DOMAIN_ID)
183 #define is_idle_vcpu(v) (is_idle_domain((v)->domain))
185 struct vcpu *alloc_vcpu(
186 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
188 struct domain *alloc_domain(void);
189 void free_domain(struct domain *d);
191 #define DOMAIN_DESTROYED (1<<31) /* assumes atomic_t is >= 32 bits */
192 #define put_domain(_d) \
193 if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destroy(_d)
195 /*
196 * Use this when you don't have an existing reference to @d. It returns
197 * FALSE if @d is being destroyed.
198 */
199 static always_inline int get_domain(struct domain *d)
200 {
201 atomic_t old, new, seen = d->refcnt;
202 do
203 {
204 old = seen;
205 if ( unlikely(_atomic_read(old) & DOMAIN_DESTROYED) )
206 return 0;
207 _atomic_set(new, _atomic_read(old) + 1);
208 seen = atomic_compareandswap(old, new, &d->refcnt);
209 }
210 while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
211 return 1;
212 }
214 /*
215 * Use this when you already have, or are borrowing, a reference to @d.
216 * In this case we know that @d cannot be destroyed under our feet.
217 */
218 static inline void get_knownalive_domain(struct domain *d)
219 {
220 atomic_inc(&d->refcnt);
221 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED));
222 }
224 extern struct domain *domain_create(
225 domid_t dom_id, unsigned int cpu);
226 extern int construct_dom0(
227 struct domain *d,
228 unsigned long image_start, unsigned long image_len,
229 unsigned long initrd_start, unsigned long initrd_len,
230 char *cmdline);
231 extern int set_info_guest(struct domain *d, dom0_setvcpucontext_t *);
233 struct domain *find_domain_by_id(domid_t dom);
234 extern void domain_destroy(struct domain *d);
235 extern void domain_kill(struct domain *d);
236 extern void domain_shutdown(struct domain *d, u8 reason);
237 extern void domain_pause_for_debugger(void);
239 /*
240 * Mark specified domain as crashed. This function always returns, even if the
241 * caller is the specified domain. The domain is not synchronously descheduled
242 * from any processor.
243 */
244 extern void __domain_crash(struct domain *d);
245 #define domain_crash(d) do { \
246 printk("domain_crash called from %s:%d\n", __FILE__, __LINE__); \
247 __domain_crash(d); \
248 } while (0)
250 /*
251 * Mark current domain as crashed and synchronously deschedule from the local
252 * processor. This function never returns.
253 */
254 extern void __domain_crash_synchronous(void) __attribute__((noreturn));
255 #define domain_crash_synchronous() do { \
256 printk("domain_crash_sync called from %s:%d\n", __FILE__, __LINE__); \
257 __domain_crash_synchronous(); \
258 } while (0)
260 void new_thread(struct vcpu *d,
261 unsigned long start_pc,
262 unsigned long start_stack,
263 unsigned long start_info);
265 #define set_current_state(_s) do { current->state = (_s); } while (0)
266 void scheduler_init(void);
267 void schedulers_start(void);
268 void sched_add_domain(struct vcpu *);
269 void sched_rem_domain(struct vcpu *);
270 long sched_ctl(struct sched_ctl_cmd *);
271 long sched_adjdom(struct sched_adjdom_cmd *);
272 int sched_id();
273 void vcpu_wake(struct vcpu *d);
274 void vcpu_sleep_nosync(struct vcpu *d);
275 void vcpu_sleep_sync(struct vcpu *d);
277 /*
278 * Force synchronisation of given VCPU's state. If it is currently descheduled,
279 * this call will ensure that all its state is committed to memory and that
280 * no CPU is using critical state (e.g., page tables) belonging to the VCPU.
281 */
282 extern void sync_vcpu_execstate(struct vcpu *v);
284 /*
285 * Called by the scheduler to switch to another VCPU. This function must
286 * call context_saved(@prev) when the local CPU is no longer running in
287 * @prev's context, and that context is saved to memory. Alternatively, if
288 * implementing lazy context switching, it suffices to ensure that invoking
289 * sync_vcpu_execstate() will switch and commit @prev's state.
290 */
291 extern void context_switch(
292 struct vcpu *prev,
293 struct vcpu *next);
295 /*
296 * As described above, context_switch() must call this function when the
297 * local CPU is no longer running in @prev's context, and @prev's context is
298 * saved to memory. Alternatively, if implementing lazy context switching,
299 * ensure that invoking sync_vcpu_execstate() will switch and commit @prev.
300 */
301 #define context_saved(prev) (clear_bit(_VCPUF_running, &(prev)->vcpu_flags))
303 /* Called by the scheduler to continue running the current VCPU. */
304 extern void continue_running(
305 struct vcpu *same);
307 void startup_cpu_idle_loop(void);
309 unsigned long __hypercall_create_continuation(
310 unsigned int op, unsigned int nr_args, ...);
311 #define hypercall0_create_continuation(_op) \
312 __hypercall_create_continuation((_op), 0)
313 #define hypercall1_create_continuation(_op, _a1) \
314 __hypercall_create_continuation((_op), 1, \
315 (unsigned long)(_a1))
316 #define hypercall2_create_continuation(_op, _a1, _a2) \
317 __hypercall_create_continuation((_op), 2, \
318 (unsigned long)(_a1), (unsigned long)(_a2))
319 #define hypercall3_create_continuation(_op, _a1, _a2, _a3) \
320 __hypercall_create_continuation((_op), 3, \
321 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3))
322 #define hypercall4_create_continuation(_op, _a1, _a2, _a3, _a4) \
323 __hypercall_create_continuation((_op), 4, \
324 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
325 (unsigned long)(_a4))
326 #define hypercall5_create_continuation(_op, _a1, _a2, _a3, _a4, _a5) \
327 __hypercall_create_continuation((_op), 5, \
328 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
329 (unsigned long)(_a4), (unsigned long)(_a5))
330 #define hypercall6_create_continuation(_op, _a1, _a2, _a3, _a4, _a5, _a6) \
331 __hypercall_create_continuation((_op), 6, \
332 (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
333 (unsigned long)(_a4), (unsigned long)(_a5), (unsigned long)(_a6))
335 #define hypercall_preempt_check() (unlikely( \
336 softirq_pending(smp_processor_id()) | \
337 event_pending(current) \
338 ))
340 /* This domain_hash and domain_list are protected by the domlist_lock. */
341 #define DOMAIN_HASH_SIZE 256
342 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
343 extern struct domain *domain_hash[DOMAIN_HASH_SIZE];
344 extern struct domain *domain_list;
346 #define for_each_domain(_d) \
347 for ( (_d) = domain_list; \
348 (_d) != NULL; \
349 (_d) = (_d)->next_in_list )
351 #define for_each_vcpu(_d,_v) \
352 for ( (_v) = (_d)->vcpu[0]; \
353 (_v) != NULL; \
354 (_v) = (_v)->next_in_list )
356 /*
357 * Per-VCPU flags (vcpu_flags).
358 */
359 /* Has the FPU been initialised? */
360 #define _VCPUF_fpu_initialised 0
361 #define VCPUF_fpu_initialised (1UL<<_VCPUF_fpu_initialised)
362 /* Has the FPU been used since it was last saved? */
363 #define _VCPUF_fpu_dirtied 1
364 #define VCPUF_fpu_dirtied (1UL<<_VCPUF_fpu_dirtied)
365 /* Domain is blocked waiting for an event. */
366 #define _VCPUF_blocked 2
367 #define VCPUF_blocked (1UL<<_VCPUF_blocked)
368 /* Currently running on a CPU? */
369 #define _VCPUF_running 3
370 #define VCPUF_running (1UL<<_VCPUF_running)
371 /* Initialization completed. */
372 #define _VCPUF_initialised 4
373 #define VCPUF_initialised (1UL<<_VCPUF_initialised)
374 /* VCPU is not-runnable */
375 #define _VCPUF_down 5
376 #define VCPUF_down (1UL<<_VCPUF_down)
377 /* NMI callback pending for this VCPU? */
378 #define _VCPUF_nmi_pending 8
379 #define VCPUF_nmi_pending (1UL<<_VCPUF_nmi_pending)
380 /* Avoid NMI reentry by allowing NMIs to be masked for short periods. */
381 #define _VCPUF_nmi_masked 9
382 #define VCPUF_nmi_masked (1UL<<_VCPUF_nmi_masked)
384 /*
385 * Per-domain flags (domain_flags).
386 */
387 /* Is this domain privileged? */
388 #define _DOMF_privileged 0
389 #define DOMF_privileged (1UL<<_DOMF_privileged)
390 /* Guest shut itself down for some reason. */
391 #define _DOMF_shutdown 1
392 #define DOMF_shutdown (1UL<<_DOMF_shutdown)
393 /* Death rattle. */
394 #define _DOMF_dying 2
395 #define DOMF_dying (1UL<<_DOMF_dying)
396 /* Domain is paused by controller software. */
397 #define _DOMF_ctrl_pause 3
398 #define DOMF_ctrl_pause (1UL<<_DOMF_ctrl_pause)
399 /* Domain is being debugged by controller software. */
400 #define _DOMF_debugging 4
401 #define DOMF_debugging (1UL<<_DOMF_debugging)
404 static inline int vcpu_runnable(struct vcpu *v)
405 {
406 return ( (atomic_read(&v->pausecnt) == 0) &&
407 !(v->vcpu_flags & (VCPUF_blocked|VCPUF_down)) &&
408 !(v->domain->domain_flags & (DOMF_shutdown|DOMF_ctrl_pause)) );
409 }
411 void vcpu_pause(struct vcpu *v);
412 void domain_pause(struct domain *d);
413 void vcpu_unpause(struct vcpu *v);
414 void domain_unpause(struct domain *d);
415 void domain_pause_by_systemcontroller(struct domain *d);
416 void domain_unpause_by_systemcontroller(struct domain *d);
417 void cpu_init(void);
419 int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
421 static inline void vcpu_unblock(struct vcpu *v)
422 {
423 if ( test_and_clear_bit(_VCPUF_blocked, &v->vcpu_flags) )
424 vcpu_wake(v);
425 }
427 #define IS_PRIV(_d) \
428 (test_bit(_DOMF_privileged, &(_d)->domain_flags))
430 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
432 #endif /* __SCHED_H__ */
434 /*
435 * Local variables:
436 * mode: C
437 * c-set-style: "BSD"
438 * c-basic-offset: 4
439 * tab-width: 4
440 * indent-tabs-mode: nil
441 * End:
442 */