ia64/xen-unstable

view xen/include/xen/sched.h @ 3105:f7a9de9a462f

bitkeeper revision 1.1159.189.6 (41a4df56fjKgjR75gUVniMEBSnS-9Q)

Unlock biglock on hypercall preemption.
author cl349@arcadians.cl.cam.ac.uk
date Wed Nov 24 19:21:58 2004 +0000 (2004-11-24)
parents 2fae9947de6f
children 75f82adfcc90
line source
1 #ifndef __SCHED_H__
2 #define __SCHED_H__
4 #define STACK_SIZE (2*PAGE_SIZE)
6 #include <xen/config.h>
7 #include <xen/types.h>
8 #include <xen/spinlock.h>
9 #include <xen/cache.h>
10 #include <asm/regs.h>
11 #include <xen/smp.h>
12 #include <asm/page.h>
13 #include <asm/processor.h>
14 #include <public/xen.h>
15 #include <public/dom0_ops.h>
16 #include <xen/list.h>
17 #include <xen/time.h>
18 #include <xen/ac_timer.h>
19 #include <xen/delay.h>
20 #include <asm/atomic.h>
21 #include <asm/current.h>
22 #include <xen/spinlock.h>
23 #include <xen/grant_table.h>
24 #include <xen/irq_cpustat.h>
26 extern unsigned long volatile jiffies;
27 extern rwlock_t domlist_lock;
29 struct domain;
31 /* A global pointer to the initial domain (DOM0). */
32 extern struct domain *dom0;
34 typedef struct event_channel_st
35 {
36 #define ECS_FREE 0 /* Channel is available for use. */
37 #define ECS_UNBOUND 1 /* Channel is waiting to bind to a remote domain. */
38 #define ECS_INTERDOMAIN 2 /* Channel is bound to another domain. */
39 #define ECS_PIRQ 3 /* Channel is bound to a physical IRQ line. */
40 #define ECS_VIRQ 4 /* Channel is bound to a virtual IRQ line. */
41 #define ECS_IPI 5 /* Channel is bound to a virtual IPI line. */
42 u16 state;
43 union {
44 struct {
45 domid_t remote_domid;
46 } __attribute__ ((packed)) unbound; /* state == ECS_UNBOUND */
47 struct {
48 u16 remote_port;
49 struct exec_domain *remote_dom;
50 } __attribute__ ((packed)) interdomain; /* state == ECS_INTERDOMAIN */
51 u16 pirq; /* state == ECS_PIRQ */
52 u16 virq; /* state == ECS_VIRQ */
53 u32 ipi_edom; /* state == ECS_IPI */
54 } u;
55 } event_channel_t;
57 int init_event_channels(struct domain *d);
58 void destroy_event_channels(struct domain *d);
60 struct exec_domain
61 {
62 u32 processor;
64 vcpu_info_t *vcpu_info;
66 struct domain *domain;
67 struct exec_domain *ed_next_list;
68 int eid;
70 struct mm_struct mm;
72 struct thread_struct thread;
74 struct ac_timer timer; /* one-shot timer for timeout values */
76 s_time_t lastschd; /* time this domain was last scheduled */
77 s_time_t lastdeschd; /* time this domain was last descheduled */
78 s_time_t cpu_time; /* total CPU time received till now */
79 s_time_t wokenup; /* time domain got woken up */
80 void *ed_sched_priv; /* scheduler-specific data */
82 unsigned long ed_flags;
84 u16 virq_to_evtchn[NR_VIRQS];
86 atomic_t pausecnt;
88 };
90 #if 01
91 #define LOCK_BIGLOCK(_d) spin_lock(&(_d)->big_lock)
92 #define UNLOCK_BIGLOCK(_d) spin_unlock(&(_d)->big_lock)
93 #else
94 #define LOCK_BIGLOCK(_d) (void)(_d)
95 #define UNLOCK_BIGLOCK(_d)
96 #endif
98 struct domain {
99 domid_t id;
100 s_time_t create_time;
102 shared_info_t *shared_info; /* shared data area */
103 spinlock_t time_lock;
105 spinlock_t big_lock;
107 l1_pgentry_t *mm_perdomain_pt;
109 spinlock_t page_alloc_lock; /* protects all the following fields */
110 struct list_head page_list; /* linked list, of size tot_pages */
111 struct list_head xenpage_list; /* linked list, of size xenheap_pages */
112 unsigned int tot_pages; /* number of pages currently possesed */
113 unsigned int max_pages; /* maximum value for tot_pages */
114 unsigned int xenheap_pages; /* # pages allocated from Xen heap */
116 /* Scheduling. */
117 int shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
118 void *sched_priv; /* scheduler-specific data */
120 struct domain *next_list, *next_hash;
122 /* Event channel information. */
123 event_channel_t *event_channel;
124 unsigned int max_event_channel;
125 spinlock_t event_channel_lock;
127 grant_table_t *grant_table;
129 /*
130 * Interrupt to event-channel mappings. Updates should be protected by the
131 * domain's event-channel spinlock. Read accesses can also synchronise on
132 * the lock, but races don't usually matter.
133 */
134 #define NR_PIRQS 128 /* Put this somewhere sane! */
135 u16 pirq_to_evtchn[NR_PIRQS];
136 u32 pirq_mask[NR_PIRQS/32];
138 /* Physical I/O */
139 spinlock_t pcidev_lock;
140 struct list_head pcidev_list;
142 unsigned long d_flags;
143 unsigned long vm_assist;
145 atomic_t refcnt;
147 struct exec_domain *exec_domain[MAX_VIRT_CPUS];
148 };
150 struct domain_setup_info
151 {
152 unsigned long v_start;
153 unsigned long v_kernstart;
154 unsigned long v_kernend;
155 unsigned long v_kernentry;
157 unsigned int use_writable_pagetables;
158 };
160 #include <asm/uaccess.h> /* for KERNEL_DS */
162 extern struct domain idle0_domain;
163 extern struct exec_domain idle0_exec_domain;
165 extern struct exec_domain *idle_task[NR_CPUS];
166 #define IDLE_DOMAIN_ID (0x7FFFU)
167 #define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->d_flags))
169 struct exec_domain *alloc_exec_domain_struct(struct domain *d,
170 unsigned long vcpu);
172 void free_domain_struct(struct domain *d);
173 struct domain *alloc_domain_struct();
175 #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
176 #define put_domain(_d) \
177 if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destruct(_d)
179 /*
180 * Use this when you don't have an existing reference to @d. It returns
181 * FALSE if @d is being destructed.
182 */
183 static always_inline int get_domain(struct domain *d)
184 {
185 atomic_t old, new, seen = d->refcnt;
186 do
187 {
188 old = seen;
189 if ( unlikely(_atomic_read(old) & DOMAIN_DESTRUCTED) )
190 return 0;
191 _atomic_set(new, _atomic_read(old) + 1);
192 seen = atomic_compareandswap(old, new, &d->refcnt);
193 }
194 while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
195 return 1;
196 }
198 /*
199 * Use this when you already have, or are borrowing, a reference to @d.
200 * In this case we know that @d cannot be destructed under our feet.
201 */
202 static inline void get_knownalive_domain(struct domain *d)
203 {
204 atomic_inc(&d->refcnt);
205 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
206 }
208 extern struct domain *do_createdomain(
209 domid_t dom_id, unsigned int cpu);
210 extern int construct_dom0(struct domain *d,
211 unsigned long alloc_start,
212 unsigned long alloc_end,
213 char *image_start, unsigned long image_len,
214 char *initrd_start, unsigned long initrd_len,
215 char *cmdline);
216 extern int final_setup_guestos(struct domain *d, dom0_builddomain_t *);
218 struct domain *find_domain_by_id(domid_t dom);
219 struct domain *find_last_domain(void);
220 extern void domain_destruct(struct domain *d);
221 extern void domain_kill(struct domain *d);
222 extern void domain_crash(void);
223 extern void domain_shutdown(u8 reason);
225 void new_thread(struct exec_domain *d,
226 unsigned long start_pc,
227 unsigned long start_stack,
228 unsigned long start_info);
230 extern unsigned long wait_init_idle;
231 #define init_idle() clear_bit(smp_processor_id(), &wait_init_idle);
233 #define set_current_state(_s) do { current->state = (_s); } while (0)
234 void scheduler_init(void);
235 void schedulers_start(void);
236 void sched_add_domain(struct exec_domain *d);
237 void sched_rem_domain(struct domain *d);
238 long sched_ctl(struct sched_ctl_cmd *);
239 long sched_adjdom(struct sched_adjdom_cmd *);
240 int sched_id();
241 void init_idle_task(void);
242 void domain_wake(struct exec_domain *d);
243 void domain_sleep(struct exec_domain *d);
245 void __enter_scheduler(void);
247 extern void switch_to(struct exec_domain *prev,
248 struct exec_domain *next);
250 void domain_init(void);
252 int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
254 void startup_cpu_idle_loop(void);
255 void continue_cpu_idle_loop(void);
257 void continue_nonidle_task(void);
259 void hypercall_create_continuation(unsigned int op, unsigned int nr_args, ...);
260 #define hypercall_may_preempt(_op, _nr_args, _args...) \
261 do { \
262 if ( unlikely(softirq_pending(smp_processor_id())) ) { \
263 hypercall_create_continuation(_op , _nr_args , ##_args); \
264 return _op; \
265 } } while ( 0 )
266 #define locked_hypercall_may_preempt(_d, _op, _nr_args, _args...) \
267 do { \
268 if ( unlikely(softirq_pending(smp_processor_id())) ) { \
269 hypercall_create_continuation(_op , _nr_args , ##_args); \
270 UNLOCK_BIGLOCK(_d); \
271 return _op; \
272 } } while ( 0 )
274 /* This domain_hash and domain_list are protected by the domlist_lock. */
275 #define DOMAIN_HASH_SIZE 256
276 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
277 extern struct domain *domain_hash[DOMAIN_HASH_SIZE];
278 extern struct domain *domain_list;
280 #define for_each_domain(_p) \
281 for ( (_p) = domain_list; (_p) != NULL; (_p) = (_p)->next_list )
283 #define for_each_exec_domain(_d,_ed) \
284 for ( (_ed) = _d->exec_domain[0]; (_ed) != NULL; (_ed) = (_ed)->ed_next_list )
286 #define EDF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
287 #define EDF_USEDFPU 1 /* Has this task used the FPU since last save? */
288 #define EDF_GUEST_STTS 2 /* Has the guest OS requested 'stts'? */
289 #define DF_CONSTRUCTED 3 /* Has the guest OS been fully built yet? */
290 #define DF_IDLETASK 4 /* Is this one of the per-CPU idle domains? */
291 #define DF_PRIVILEGED 5 /* Is this domain privileged? */
292 #define DF_PHYSDEV 6 /* May this domain do IO to physical devices? */
293 #define EDF_BLOCKED 7 /* Domain is blocked waiting for an event. */
294 #define EDF_CTRLPAUSE 8 /* Domain is paused by controller software. */
295 #define DF_SHUTDOWN 9 /* Guest shut itself down for some reason. */
296 #define DF_CRASHED 10 /* Domain crashed inside Xen, cannot continue. */
297 #define DF_DYING 11 /* Death rattle. */
298 #define EDF_RUNNING 12 /* Currently running on a CPU. */
299 #define EDF_CPUPINNED 13 /* Disables auto-migration. */
300 #define EDF_MIGRATED 14 /* Domain migrated between CPUs. */
302 static inline int domain_runnable(struct exec_domain *d)
303 {
304 return ( (atomic_read(&d->pausecnt) == 0) &&
305 !(d->ed_flags & ((1<<EDF_BLOCKED)|(1<<EDF_CTRLPAUSE))) &&
306 !(d->domain->d_flags & ((1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
307 }
309 static inline void exec_domain_pause(struct exec_domain *ed)
310 {
311 ASSERT(ed != current);
312 atomic_inc(&ed->pausecnt);
313 domain_sleep(ed);
314 }
316 static inline void domain_pause(struct domain *d)
317 {
318 struct exec_domain *ed;
320 for_each_exec_domain(d, ed)
321 exec_domain_pause(ed);
322 }
324 static inline void exec_domain_unpause(struct exec_domain *ed)
325 {
326 ASSERT(ed != current);
327 if ( atomic_dec_and_test(&ed->pausecnt) )
328 domain_wake(ed);
329 }
331 static inline void domain_unpause(struct domain *d)
332 {
333 struct exec_domain *ed;
335 for_each_exec_domain(d, ed)
336 exec_domain_unpause(ed);
337 }
339 static inline void exec_domain_unblock(struct exec_domain *ed)
340 {
341 if ( test_and_clear_bit(EDF_BLOCKED, &ed->ed_flags) )
342 domain_wake(ed);
343 }
345 static inline void domain_unblock(struct domain *d)
346 {
347 struct exec_domain *ed;
349 for_each_exec_domain(d, ed)
350 exec_domain_unblock(ed);
351 }
353 static inline void domain_pause_by_systemcontroller(struct domain *d)
354 {
355 struct exec_domain *ed;
357 for_each_exec_domain(d, ed) {
358 ASSERT(ed != current);
359 if ( !test_and_set_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
360 domain_sleep(ed);
361 }
362 }
364 static inline void domain_unpause_by_systemcontroller(struct domain *d)
365 {
366 struct exec_domain *ed;
368 for_each_exec_domain(d, ed) {
369 if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
370 domain_wake(ed);
371 }
372 }
375 #define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->d_flags))
376 #define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->d_flags))
378 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
380 #include <xen/slab.h>
381 #include <asm/domain.h>
383 #endif /* __SCHED_H__ */