direct-io.hg

view xen/include/xen/sched.h @ 2693:2584528df9e1

bitkeeper revision 1.1159.123.2 (4177d169N58TtQXn_XJO4xNBKbMQUw)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into freefall.cl.cam.ac.uk:/local/scratch/kaf24/xeno
author kaf24@freefall.cl.cam.ac.uk
date Thu Oct 21 15:10:33 2004 +0000 (2004-10-21)
parents 92fff25bf21e d8e27145f1eb
children 8aa9d487a8dd
line source
1 #ifndef __SCHED_H__
2 #define __SCHED_H__
4 #define STACK_SIZE (2*PAGE_SIZE)
6 #include <xen/config.h>
7 #include <xen/types.h>
8 #include <xen/spinlock.h>
9 #include <asm/ptrace.h>
10 #include <xen/smp.h>
11 #include <asm/page.h>
12 #include <asm/processor.h>
13 #include <hypervisor-ifs/hypervisor-if.h>
14 #include <hypervisor-ifs/dom0_ops.h>
15 #include <xen/list.h>
16 #include <xen/time.h>
17 #include <xen/ac_timer.h>
18 #include <xen/delay.h>
19 #include <asm/atomic.h>
20 #include <asm/current.h>
21 #include <xen/spinlock.h>
22 #include <xen/grant_table.h>
24 extern unsigned long volatile jiffies;
25 extern rwlock_t tasklist_lock;
27 struct domain;
29 typedef struct event_channel_st
30 {
31 #define ECS_FREE 0 /* Channel is available for use. */
32 #define ECS_UNBOUND 1 /* Channel is not bound to a particular source. */
33 #define ECS_INTERDOMAIN 2 /* Channel is bound to another domain. */
34 #define ECS_PIRQ 3 /* Channel is bound to a physical IRQ line. */
35 #define ECS_VIRQ 4 /* Channel is bound to a virtual IRQ line. */
36 u16 state;
37 union {
38 struct {
39 u16 port;
40 struct domain *dom;
41 } __attribute__ ((packed)) remote; /* state == ECS_CONNECTED */
42 u16 pirq; /* state == ECS_PIRQ */
43 u16 virq; /* state == ECS_VIRQ */
44 } u;
45 } event_channel_t;
47 int init_event_channels(struct domain *d);
48 void destroy_event_channels(struct domain *d);
50 struct domain
51 {
52 /*
53 * DO NOT CHANGE THE ORDER OF THE FOLLOWING.
54 * Their offsets are hardcoded in entry.S
55 */
57 u32 processor; /* 00: current processor */
59 /* An unsafe pointer into a shared data area. */
60 shared_info_t *shared_info; /* 04: shared data area */
62 /*
63 * Return vectors pushed to us by guest OS.
64 * The stack frame for events is exactly that of an x86 hardware interrupt.
65 * The stack frame for a failsafe callback is augmented with saved values
66 * for segment registers %ds, %es, %fs and %gs:
67 * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
68 */
69 unsigned long event_selector; /* 08: entry CS */
70 unsigned long event_address; /* 12: entry EIP */
72 /* Saved DS,ES,FS,GS immediately before return to guest OS. */
73 unsigned long failsafe_selectors[4]; /* 16-32 */
75 /*
76 * END OF FIRST CACHELINE. Stuff above is touched a lot!
77 */
79 unsigned long failsafe_selector; /* 32: entry CS */
80 unsigned long failsafe_address; /* 36: entry EIP */
82 /*
83 * From here on things can be added and shuffled without special attention
84 */
86 domid_t domain;
87 s_time_t create_time;
89 spinlock_t page_alloc_lock; /* protects all the following fields */
90 struct list_head page_list; /* linked list, of size tot_pages */
91 struct list_head xenpage_list; /* linked list, of size xenheap_pages */
92 unsigned int tot_pages; /* number of pages currently possesed */
93 unsigned int max_pages; /* maximum value for tot_pages */
94 unsigned int xenheap_pages; /* # pages allocated from Xen heap */
96 /* Scheduling. */
97 int shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
98 s_time_t lastschd; /* time this domain was last scheduled */
99 s_time_t lastdeschd; /* time this domain was last descheduled */
100 s_time_t cpu_time; /* total CPU time received till now */
101 s_time_t wokenup; /* time domain got woken up */
102 struct ac_timer timer; /* one-shot timer for timeout values */
103 void *sched_priv; /* scheduler-specific data */
105 struct mm_struct mm;
107 struct thread_struct thread;
108 struct domain *next_list, *next_hash;
110 /* Event channel information. */
111 event_channel_t *event_channel;
112 unsigned int max_event_channel;
113 spinlock_t event_channel_lock;
115 grant_table_t *grant_table;
117 /*
118 * Interrupt to event-channel mappings. Updates should be protected by the
119 * domain's event-channel spinlock. Read accesses can also synchronise on
120 * the lock, but races don't usually matter.
121 */
122 #define NR_PIRQS 128 /* Put this somewhere sane! */
123 u16 pirq_to_evtchn[NR_PIRQS];
124 u16 virq_to_evtchn[NR_VIRQS];
125 u32 pirq_mask[NR_PIRQS/32];
127 /* Physical I/O */
128 spinlock_t pcidev_lock;
129 struct list_head pcidev_list;
131 /* The following IO bitmap stuff is x86-dependent. */
132 u64 io_bitmap_sel; /* Selector to tell us which part of the IO bitmap are
133 * "interesting" (i.e. have clear bits) */
135 /* Handy macro - number of bytes of the IO bitmap, per selector bit. */
136 #define IOBMP_SELBIT_LWORDS (IO_BITMAP_SIZE / 64)
137 unsigned long *io_bitmap; /* Pointer to task's IO bitmap or NULL */
139 unsigned long flags;
140 unsigned long vm_assist;
142 atomic_t refcnt;
143 atomic_t pausecnt;
144 };
146 struct domain_setup_info
147 {
148 unsigned long v_start;
149 unsigned long v_kernstart;
150 unsigned long v_kernend;
151 unsigned long v_kernentry;
153 unsigned int use_writable_pagetables;
154 };
156 #include <asm/uaccess.h> /* for KERNEL_DS */
158 extern struct domain idle0_task;
160 extern struct domain *idle_task[NR_CPUS];
161 #define IDLE_DOMAIN_ID (0x7FFFU)
162 #define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->flags))
164 void free_domain_struct(struct domain *d);
165 struct domain *alloc_domain_struct();
167 #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
168 #define put_domain(_d) \
169 if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destruct(_d)
171 /*
172 * Use this when you don't have an existing reference to @d. It returns
173 * FALSE if @d is being destructed.
174 */
175 static always_inline int get_domain(struct domain *d)
176 {
177 atomic_t old, new, seen = d->refcnt;
178 do
179 {
180 old = seen;
181 if ( unlikely(_atomic_read(old) & DOMAIN_DESTRUCTED) )
182 return 0;
183 _atomic_set(new, _atomic_read(old) + 1);
184 seen = atomic_compareandswap(old, new, &d->refcnt);
185 }
186 while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
187 return 1;
188 }
190 /*
191 * Use this when you already have, or are borrowing, a reference to @d.
192 * In this case we know that @d cannot be destructed under our feet.
193 */
194 static inline void get_knownalive_domain(struct domain *d)
195 {
196 atomic_inc(&d->refcnt);
197 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
198 }
200 extern struct domain *do_createdomain(
201 domid_t dom_id, unsigned int cpu);
202 extern int construct_dom0(struct domain *d,
203 unsigned long alloc_start,
204 unsigned long alloc_end,
205 char *image_start, unsigned long image_len,
206 char *initrd_start, unsigned long initrd_len,
207 char *cmdline);
208 extern int final_setup_guestos(struct domain *d, dom0_builddomain_t *);
210 struct domain *find_domain_by_id(domid_t dom);
211 struct domain *find_last_domain(void);
212 extern void domain_destruct(struct domain *d);
213 extern void domain_kill(struct domain *d);
214 extern void domain_crash(void);
215 extern void domain_shutdown(u8 reason);
217 void new_thread(struct domain *d,
218 unsigned long start_pc,
219 unsigned long start_stack,
220 unsigned long start_info);
222 extern unsigned long wait_init_idle;
223 #define init_idle() clear_bit(smp_processor_id(), &wait_init_idle);
225 #define set_current_state(_s) do { current->state = (_s); } while (0)
226 void scheduler_init(void);
227 void schedulers_start(void);
228 void sched_add_domain(struct domain *d);
229 void sched_rem_domain(struct domain *d);
230 long sched_ctl(struct sched_ctl_cmd *);
231 long sched_adjdom(struct sched_adjdom_cmd *);
232 int sched_id();
233 void init_idle_task(void);
234 void domain_wake(struct domain *d);
235 void domain_sleep(struct domain *d);
237 void __enter_scheduler(void);
239 extern void switch_to(struct domain *prev,
240 struct domain *next);
242 void domain_init(void);
244 int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
246 void startup_cpu_idle_loop(void);
247 void continue_cpu_idle_loop(void);
249 void continue_nonidle_task(void);
251 /* This task_hash and task_list are protected by the tasklist_lock. */
252 #define TASK_HASH_SIZE 256
253 #define TASK_HASH(_id) ((int)(_id)&(TASK_HASH_SIZE-1))
254 extern struct domain *task_hash[TASK_HASH_SIZE];
255 extern struct domain *task_list;
257 #define for_each_domain(_p) \
258 for ( (_p) = task_list; (_p) != NULL; (_p) = (_p)->next_list )
260 #define DF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
261 #define DF_USEDFPU 1 /* Has this task used the FPU since last save? */
262 #define DF_GUEST_STTS 2 /* Has the guest OS requested 'stts'? */
263 #define DF_CONSTRUCTED 3 /* Has the guest OS been fully built yet? */
264 #define DF_IDLETASK 4 /* Is this one of the per-CPU idle domains? */
265 #define DF_PRIVILEGED 5 /* Is this domain privileged? */
266 #define DF_PHYSDEV 6 /* May this domain do IO to physical devices? */
267 #define DF_BLOCKED 7 /* Domain is blocked waiting for an event. */
268 #define DF_CTRLPAUSE 8 /* Domain is paused by controller software. */
269 #define DF_SHUTDOWN 9 /* Guest shut itself down for some reason. */
270 #define DF_CRASHED 10 /* Domain crashed inside Xen, cannot continue. */
271 #define DF_DYING 11 /* Death rattle. */
272 #define DF_RUNNING 12 /* Currently running on a CPU. */
273 #define DF_CPUPINNED 13 /* Disables auto-migration. */
274 #define DF_MIGRATED 14 /* Domain migrated between CPUs. */
276 static inline int domain_runnable(struct domain *d)
277 {
278 return ( (atomic_read(&d->pausecnt) == 0) &&
279 !(d->flags & ((1<<DF_BLOCKED)|(1<<DF_CTRLPAUSE)|
280 (1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
281 }
283 static inline void domain_pause(struct domain *d)
284 {
285 ASSERT(d != current);
286 atomic_inc(&d->pausecnt);
287 domain_sleep(d);
288 }
290 static inline void domain_unpause(struct domain *d)
291 {
292 ASSERT(d != current);
293 if ( atomic_dec_and_test(&d->pausecnt) )
294 domain_wake(d);
295 }
297 static inline void domain_unblock(struct domain *d)
298 {
299 if ( test_and_clear_bit(DF_BLOCKED, &d->flags) )
300 domain_wake(d);
301 }
303 static inline void domain_pause_by_systemcontroller(struct domain *d)
304 {
305 ASSERT(d != current);
306 if ( !test_and_set_bit(DF_CTRLPAUSE, &d->flags) )
307 domain_sleep(d);
308 }
310 static inline void domain_unpause_by_systemcontroller(struct domain *d)
311 {
312 if ( test_and_clear_bit(DF_CTRLPAUSE, &d->flags) )
313 domain_wake(d);
314 }
317 #define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
318 #define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
320 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
322 #include <xen/slab.h>
323 #include <asm/domain.h>
325 #endif /* __SCHED_H__ */