ia64/xen-unstable

annotate xen/include/xen/sched.h @ 19738:8dd5c3cae086

x86 hvm: move dirty_vram into struct hvm_domain

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:04:03 2009 +0100 (2009-06-05)
parents 6705898f768d
children 2f9e1348aa98
rev   line source
kaf24@3677 1
kaf24@1506 2 #ifndef __SCHED_H__
kaf24@1506 3 #define __SCHED_H__
kaf24@1210 4
kaf24@1210 5 #include <xen/config.h>
kaf24@1210 6 #include <xen/types.h>
kaf24@1210 7 #include <xen/spinlock.h>
kaf24@1210 8 #include <xen/smp.h>
ack@13292 9 #include <xen/shared.h>
kaf24@2789 10 #include <public/xen.h>
kfraser@11296 11 #include <public/domctl.h>
kaf24@9022 12 #include <public/vcpu.h>
kfraser@15819 13 #include <public/xsm/acm.h>
cl349@5294 14 #include <xen/time.h>
kaf24@8586 15 #include <xen/timer.h>
kaf24@2344 16 #include <xen/grant_table.h>
kaf24@8468 17 #include <xen/rangeset.h>
kaf24@3674 18 #include <asm/domain.h>
ack@9625 19 #include <xen/xenoprof.h>
kfraser@14058 20 #include <xen/rcupdate.h>
kaf24@9897 21 #include <xen/irq.h>
keir@19147 22 #include <xen/mm.h>
kaf24@1210 23
ack@13298 24 #ifdef CONFIG_COMPAT
ack@13298 25 #include <compat/vcpu.h>
ack@13298 26 DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t);
ack@13298 27 #endif
ack@13298 28
kaf24@2806 29 /* A global pointer to the initial domain (DOM0). */
kaf24@2806 30 extern struct domain *dom0;
kaf24@2806 31
ack@13292 32 #ifndef CONFIG_COMPAT
keir@19268 33 #define BITS_PER_EVTCHN_WORD(d) BITS_PER_LONG
ack@13292 34 #else
keir@19268 35 #define BITS_PER_EVTCHN_WORD(d) (has_32bit_shinfo(d) ? 32 : BITS_PER_LONG)
ack@13292 36 #endif
keir@19370 37 #define MAX_EVTCHNS(d) (BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d))
kaf24@5308 38 #define EVTCHNS_PER_BUCKET 128
ack@13292 39 #define NR_EVTCHN_BUCKETS (NR_EVENT_CHANNELS / EVTCHNS_PER_BUCKET)
kaf24@5308 40
kaf24@5308 41 struct evtchn
kaf24@1210 42 {
kaf24@1218 43 #define ECS_FREE 0 /* Channel is available for use. */
cl349@3297 44 #define ECS_RESERVED 1 /* Channel is reserved. */
cl349@3297 45 #define ECS_UNBOUND 2 /* Channel is waiting to bind to a remote domain. */
cl349@3297 46 #define ECS_INTERDOMAIN 3 /* Channel is bound to another domain. */
cl349@3297 47 #define ECS_PIRQ 4 /* Channel is bound to a physical IRQ line. */
cl349@3297 48 #define ECS_VIRQ 5 /* Channel is bound to a virtual IRQ line. */
cl349@3297 49 #define ECS_IPI 6 /* Channel is bound to a virtual IPI line. */
kfraser@10977 50 u8 state; /* ECS_* */
kfraser@10977 51 u8 consumer_is_xen; /* Consumed by Xen or by guest? */
kaf24@5308 52 u16 notify_vcpu_id; /* VCPU for local delivery notification */
kaf24@1218 53 union {
kaf24@1218 54 struct {
kaf24@2710 55 domid_t remote_domid;
kaf24@5308 56 } unbound; /* state == ECS_UNBOUND */
kaf24@2710 57 struct {
kaf24@5308 58 u16 remote_port;
kaf24@5308 59 struct domain *remote_dom;
kaf24@5308 60 } interdomain; /* state == ECS_INTERDOMAIN */
kaf24@5308 61 u16 pirq; /* state == ECS_PIRQ */
kaf24@5308 62 u16 virq; /* state == ECS_VIRQ */
kaf24@1218 63 } u;
kfraser@15819 64 #ifdef FLASK_ENABLE
kfraser@15819 65 void *ssid;
kfraser@15819 66 #endif
kaf24@5308 67 };
kaf24@1210 68
kaf24@5308 69 int evtchn_init(struct domain *d);
kaf24@5308 70 void evtchn_destroy(struct domain *d);
kaf24@1218 71
kaf24@5289 72 struct vcpu
kaf24@1210 73 {
kaf24@4877 74 int vcpu_id;
kaf24@1210 75
kaf24@4798 76 int processor;
kaf24@1210 77
kaf24@4798 78 vcpu_info_t *vcpu_info;
kaf24@4798 79
kaf24@4798 80 struct domain *domain;
kaf24@7364 81
kaf24@7364 82 struct vcpu *next_in_list;
cl349@2919 83
kfraser@14340 84 uint64_t periodic_period;
kfraser@14340 85 uint64_t periodic_last_event;
kfraser@14340 86 struct timer periodic_timer;
kfraser@14340 87 struct timer singleshot_timer;
cl349@2938 88
kaf24@9262 89 struct timer poll_timer; /* timeout for SCHEDOP_poll */
kaf24@9262 90
kaf24@4798 91 void *sched_priv; /* scheduler-specific data */
cl349@2919 92
kaf24@9022 93 struct vcpu_runstate_info runstate;
ack@13298 94 #ifndef CONFIG_COMPAT
ack@13298 95 # define runstate_guest(v) ((v)->runstate_guest)
kfraser@12477 96 XEN_GUEST_HANDLE(vcpu_runstate_info_t) runstate_guest; /* guest address */
ack@13298 97 #else
ack@13298 98 # define runstate_guest(v) ((v)->runstate_guest.native)
ack@13298 99 union {
ack@13298 100 XEN_GUEST_HANDLE(vcpu_runstate_info_t) native;
ack@13298 101 XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t) compat;
ack@13298 102 } runstate_guest; /* guest address */
ack@13298 103 #endif
kaf24@9022 104
keir@19313 105 /* last time when vCPU is scheduled out */
keir@19313 106 uint64_t last_run_time;
keir@19313 107
kfraser@14657 108 /* Has the FPU been initialised? */
kfraser@14657 109 bool_t fpu_initialised;
kfraser@14657 110 /* Has the FPU been used since it was last saved? */
kfraser@14657 111 bool_t fpu_dirtied;
kfraser@14657 112 /* Initialization completed for this VCPU? */
kfraser@14657 113 bool_t is_initialised;
kfraser@14657 114 /* Currently running on a CPU? */
kfraser@14657 115 bool_t is_running;
keir@17971 116 /* MCE callback pending for this VCPU? */
keir@17971 117 bool_t mce_pending;
kfraser@14661 118 /* NMI callback pending for this VCPU? */
kfraser@14661 119 bool_t nmi_pending;
keir@17971 120
keir@17971 121 /* Higher priorized traps may interrupt lower priorized traps,
keir@17971 122 * lower priorized traps wait until higher priorized traps finished.
keir@17971 123 * Note: This concept is known as "system priority level" (spl)
keir@17971 124 * in the UNIX world. */
keir@17971 125 uint16_t old_trap_priority;
keir@17971 126 uint16_t trap_priority;
keir@17971 127 #define VCPU_TRAP_NONE 0
keir@17971 128 #define VCPU_TRAP_NMI 1
keir@17971 129 #define VCPU_TRAP_MCE 2
keir@17971 130
kfraser@14705 131 /* Require shutdown to be deferred for some asynchronous operation? */
kfraser@14705 132 bool_t defer_shutdown;
kfraser@14705 133 /* VCPU is paused following shutdown request (d->is_shutting_down)? */
kfraser@14705 134 bool_t paused_for_shutdown;
keir@16896 135 /* VCPU affinity is temporarily locked from controller changes? */
keir@16896 136 bool_t affinity_locked;
kfraser@14657 137
keir@18441 138 /*
keir@18441 139 * > 0: a single port is being polled;
keir@18441 140 * = 0: nothing is being polled (vcpu should be clear in d->poll_mask);
keir@18441 141 * < 0: multiple ports may be being polled.
keir@18441 142 */
keir@18441 143 int poll_evtchn;
keir@18441 144
kfraser@14663 145 unsigned long pause_flags;
kfraser@14661 146 atomic_t pause_count;
kfraser@10655 147
keir@18208 148 /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */
kaf24@4798 149 u16 virq_to_evtchn[NR_VIRQS];
keir@18208 150 spinlock_t virq_lock;
cl349@2929 151
kaf24@8512 152 /* Bitmask of CPUs on which this VCPU may run. */
kaf24@8507 153 cpumask_t cpu_affinity;
keir@17965 154 /* Used to change affinity temporarily. */
keir@17965 155 cpumask_t cpu_affinity_tmp;
cl349@4845 156
kaf24@8512 157 /* Bitmask of CPUs which are holding onto this VCPU's state. */
kaf24@8512 158 cpumask_t vcpu_dirty_cpumask;
kaf24@8512 159
kaf24@5289 160 struct arch_vcpu arch;
cl349@2919 161 };
cl349@2919 162
kaf24@4193 163 /* Per-domain lock can be recursively acquired in fault handlers. */
keir@17449 164 #define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
keir@17449 165 #define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
keir@17449 166 #define domain_is_locked(d) spin_is_locked(&(d)->domain_lock)
cl349@2998 167
kaf24@3677 168 struct domain
kaf24@3677 169 {
kaf24@4877 170 domid_t domain_id;
kaf24@1210 171
cl349@2944 172 shared_info_t *shared_info; /* shared data area */
cl349@2921 173
keir@17449 174 spinlock_t domain_lock;
kaf24@1210 175
kaf24@1640 176 spinlock_t page_alloc_lock; /* protects all the following fields */
keir@19134 177 struct page_list_head page_list; /* linked list, of size tot_pages */
keir@19134 178 struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
kaf24@1640 179 unsigned int tot_pages; /* number of pages currently possesed */
kaf24@1640 180 unsigned int max_pages; /* maximum value for tot_pages */
kaf24@1640 181 unsigned int xenheap_pages; /* # pages allocated from Xen heap */
kaf24@1210 182
kaf24@1210 183 /* Scheduling. */
kaf24@1506 184 void *sched_priv; /* scheduler-specific data */
kaf24@1210 185
kaf24@4798 186 struct domain *next_in_list;
kaf24@4798 187 struct domain *next_in_hashbucket;
kaf24@1210 188
kaf24@8456 189 struct list_head rangesets;
kaf24@8456 190 spinlock_t rangesets_lock;
kaf24@8456 191
kaf24@1210 192 /* Event channel information. */
kaf24@5308 193 struct evtchn *evtchn[NR_EVTCHN_BUCKETS];
keir@18602 194 spinlock_t event_lock;
kaf24@1210 195
kaf24@9723 196 struct grant_table *grant_table;
kaf24@2322 197
kaf24@1218 198 /*
kaf24@1218 199 * Interrupt to event-channel mappings. Updates should be protected by the
kaf24@1218 200 * domain's event-channel spinlock. Read accesses can also synchronise on
kaf24@1218 201 * the lock, but races don't usually matter.
kaf24@1218 202 */
keir@19650 203 unsigned int nr_pirqs;
keir@19650 204 u16 *pirq_to_evtchn;
keir@19650 205 unsigned long *pirq_mask;
kaf24@1218 206
kaf24@8468 207 /* I/O capabilities (access to IRQs and memory-mapped I/O). */
kaf24@8468 208 struct rangeset *iomem_caps;
kaf24@8468 209 struct rangeset *irq_caps;
kaf24@8468 210
keir@14635 211 /* Is this an HVM guest? */
keir@14635 212 bool_t is_hvm;
keir@17726 213 /* Does this guest need iommu mappings? */
keir@17726 214 bool_t need_iommu;
keir@14635 215 /* Is this guest fully privileged (aka dom0)? */
keir@14635 216 bool_t is_privileged;
keir@16856 217 /* Which guest this guest has privileges on */
keir@16856 218 struct domain *target;
keir@14635 219 /* Is this guest being debugged by dom0? */
keir@14635 220 bool_t debugger_attached;
kfraser@14642 221 /* Is this guest dying (i.e., a zombie)? */
kfraser@15825 222 enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying;
kfraser@14642 223 /* Domain is paused by controller software? */
kfraser@14642 224 bool_t is_paused_by_controller;
keir@17187 225 /* Domain's VCPUs are pinned 1:1 to physical CPUs? */
keir@17187 226 bool_t is_pinned;
kfraser@12210 227
keir@18441 228 /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
keir@18441 229 DECLARE_BITMAP(poll_mask, MAX_VIRT_CPUS);
keir@18441 230
kfraser@14642 231 /* Guest has shut down (inc. reason code)? */
kfraser@14705 232 spinlock_t shutdown_lock;
kfraser@14705 233 bool_t is_shutting_down; /* in process of shutting down? */
kfraser@14705 234 bool_t is_shut_down; /* fully shut down? */
kfraser@14642 235 int shutdown_code;
kfraser@14642 236
keir@17965 237 /* If this is not 0, send suspend notification here instead of
keir@17965 238 * raising DOM_EXC */
keir@17965 239 int suspend_evtchn;
keir@17965 240
kfraser@14642 241 atomic_t pause_count;
kfraser@10655 242
kaf24@4798 243 unsigned long vm_assist;
kaf24@1210 244
kaf24@4798 245 atomic_t refcnt;
cl349@2919 246
kaf24@5289 247 struct vcpu *vcpu[MAX_VIRT_CPUS];
kaf24@3677 248
kaf24@5301 249 /* Bitmask of CPUs which are holding onto this domain's state. */
kaf24@8512 250 cpumask_t domain_dirty_cpumask;
kaf24@4373 251
kaf24@3677 252 struct arch_domain arch;
smh22@5514 253
smh22@5514 254 void *ssid; /* sHype security subject identifier */
kaf24@7388 255
kaf24@7388 256 /* Control-plane tools handle for this domain. */
kaf24@7388 257 xen_domain_handle_t handle;
ack@9625 258
kaf24@9629 259 /* OProfile support. */
kaf24@9629 260 struct xenoprof *xenoprof;
kaf24@10506 261 int32_t time_offset_seconds;
kfraser@14058 262
kfraser@14058 263 struct rcu_head rcu;
keir@16064 264
keir@16064 265 /*
keir@16064 266 * Hypercall deadlock avoidance lock. Used if a hypercall might
keir@16064 267 * cause a deadlock. Acquirers don't spin waiting; they preempt.
keir@16064 268 */
keir@16064 269 spinlock_t hypercall_deadlock_mutex;
keir@17571 270
keir@19646 271 /* transcendent memory, auto-allocated on first tmem op by each domain */
keir@19646 272 void *tmem;
kaf24@1210 273 };
kaf24@1210 274
cl349@2448 275 struct domain_setup_info
cl349@2448 276 {
kaf24@4452 277 /* Initialised by caller. */
kaf24@4452 278 unsigned long image_addr;
kaf24@4452 279 unsigned long image_len;
kaf24@4452 280 /* Initialised by loader: Public. */
cl349@2448 281 unsigned long v_start;
cl349@4447 282 unsigned long v_end;
cl349@2448 283 unsigned long v_kernstart;
cl349@2448 284 unsigned long v_kernend;
cl349@2448 285 unsigned long v_kernentry;
ian@11284 286 #define PAEKERN_no 0
ian@11284 287 #define PAEKERN_yes 1
ian@11284 288 #define PAEKERN_extended_cr3 2
kfraser@13016 289 #define PAEKERN_bimodal 3
ian@11284 290 unsigned int pae_kernel;
kaf24@4452 291 /* Initialised by loader: Private. */
cl349@10048 292 unsigned long elf_paddr_offset;
kaf24@4452 293 unsigned int load_symtab;
cl349@4447 294 unsigned long symtab_addr;
cl349@4447 295 unsigned long symtab_len;
cl349@2448 296 };
cl349@2448 297
kaf24@8533 298 extern struct vcpu *idle_vcpu[NR_CPUS];
kaf24@2110 299 #define IDLE_DOMAIN_ID (0x7FFFU)
kaf24@8533 300 #define is_idle_domain(d) ((d)->domain_id == IDLE_DOMAIN_ID)
kaf24@8533 301 #define is_idle_vcpu(v) (is_idle_domain((v)->domain))
kaf24@1210 302
kaf24@8611 303 #define DOMAIN_DESTROYED (1<<31) /* assumes atomic_t is >= 32 bits */
kaf24@1505 304 #define put_domain(_d) \
kaf24@8611 305 if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destroy(_d)
kaf24@2298 306
kaf24@2298 307 /*
kaf24@2298 308 * Use this when you don't have an existing reference to @d. It returns
kaf24@8611 309 * FALSE if @d is being destroyed.
kaf24@2298 310 */
kaf24@2344 311 static always_inline int get_domain(struct domain *d)
kaf24@1505 312 {
kaf24@2344 313 atomic_t old, new, seen = d->refcnt;
kaf24@2344 314 do
kaf24@2344 315 {
kaf24@2344 316 old = seen;
kaf24@8611 317 if ( unlikely(_atomic_read(old) & DOMAIN_DESTROYED) )
kaf24@2344 318 return 0;
kaf24@2344 319 _atomic_set(new, _atomic_read(old) + 1);
kaf24@2344 320 seen = atomic_compareandswap(old, new, &d->refcnt);
kaf24@2344 321 }
kaf24@2344 322 while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
kaf24@2344 323 return 1;
kaf24@1505 324 }
kaf24@2298 325
kaf24@2298 326 /*
kaf24@2298 327 * Use this when you already have, or are borrowing, a reference to @d.
kaf24@8611 328 * In this case we know that @d cannot be destroyed under our feet.
kaf24@2298 329 */
kaf24@2298 330 static inline void get_knownalive_domain(struct domain *d)
kaf24@2298 331 {
kaf24@2298 332 atomic_inc(&d->refcnt);
kaf24@8611 333 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED));
kaf24@2298 334 }
kaf24@4696 335
kfraser@14196 336 /* Obtain a reference to the currently-running domain. */
kfraser@14196 337 static inline struct domain *get_current_domain(void)
kfraser@14196 338 {
kfraser@14196 339 struct domain *d = current->domain;
kfraser@14196 340 get_knownalive_domain(d);
kfraser@14196 341 return d;
kfraser@14196 342 }
kfraser@14196 343
kfraser@14911 344 struct domain *domain_create(
kfraser@14911 345 domid_t domid, unsigned int domcr_flags, ssidref_t ssidref);
kfraser@12210 346 /* DOMCRF_hvm: Create an HVM domain, as opposed to a PV domain. */
keir@19266 347 #define _DOMCRF_hvm 0
keir@19266 348 #define DOMCRF_hvm (1U<<_DOMCRF_hvm)
keir@17898 349 /* DOMCRF_hap: Create a domain with hardware-assisted paging. */
keir@19266 350 #define _DOMCRF_hap 1
keir@19266 351 #define DOMCRF_hap (1U<<_DOMCRF_hap)
keir@19266 352 /* DOMCRF_s3_integrity: Create a domain with tboot memory integrity protection
keir@19266 353 by tboot */
keir@19266 354 #define _DOMCRF_s3_integrity 2
keir@19266 355 #define DOMCRF_s3_integrity (1U<<_DOMCRF_s3_integrity)
keir@17898 356 /* DOMCRF_dummy: Create a dummy domain (not scheduled; not on domain list) */
keir@19266 357 #define _DOMCRF_dummy 3
keir@19266 358 #define DOMCRF_dummy (1U<<_DOMCRF_dummy)
kfraser@12210 359
kfraser@14059 360 /*
kfraser@14191 361 * rcu_lock_domain_by_id() is more efficient than get_domain_by_id().
kfraser@14059 362 * This is the preferred function if the returned domain reference
kfraser@14059 363 * is short lived, but it cannot be used if the domain reference needs
kfraser@14059 364 * to be kept beyond the current scope (e.g., across a softirq).
kfraser@14191 365 * The returned domain reference must be discarded using rcu_unlock_domain().
kfraser@14059 366 */
kfraser@14191 367 struct domain *rcu_lock_domain_by_id(domid_t dom);
kfraser@14059 368
keir@18574 369 /*
keir@18574 370 * As above function, but accounts for current domain context:
keir@18574 371 * - Translates target DOMID_SELF into caller's domain id; and
keir@18574 372 * - Checks that caller has permission to act on the target domain.
keir@18574 373 */
keir@18574 374 int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d);
keir@18574 375
kfraser@14191 376 /* Finish a RCU critical region started by rcu_lock_domain_by_id(). */
kfraser@14191 377 static inline void rcu_unlock_domain(struct domain *d)
kfraser@14059 378 {
kfraser@14059 379 rcu_read_unlock(&domlist_read_lock);
kfraser@14059 380 }
kfraser@14059 381
kfraser@14197 382 static inline struct domain *rcu_lock_domain(struct domain *d)
kfraser@14197 383 {
kfraser@14197 384 rcu_read_lock(d);
kfraser@14197 385 return d;
kfraser@14197 386 }
kfraser@14197 387
kfraser@14196 388 static inline struct domain *rcu_lock_current_domain(void)
kfraser@14196 389 {
kfraser@14197 390 return rcu_lock_domain(current->domain);
kfraser@14196 391 }
kfraser@14196 392
kaf24@13663 393 struct domain *get_domain_by_id(domid_t dom);
kfraser@12210 394 void domain_destroy(struct domain *d);
kfraser@15825 395 int domain_kill(struct domain *d);
kfraser@12210 396 void domain_shutdown(struct domain *d, u8 reason);
kfraser@14705 397 void domain_resume(struct domain *d);
kfraser@12210 398 void domain_pause_for_debugger(void);
cl349@4339 399
kfraser@14705 400 int vcpu_start_shutdown_deferral(struct vcpu *v);
kfraser@14705 401 void vcpu_end_shutdown_deferral(struct vcpu *v);
kfraser@14705 402
cl349@4339 403 /*
kaf24@7809 404 * Mark specified domain as crashed. This function always returns, even if the
kaf24@7809 405 * caller is the specified domain. The domain is not synchronously descheduled
kaf24@7809 406 * from any processor.
cl349@4339 407 */
kfraser@12210 408 void __domain_crash(struct domain *d);
sos22@8660 409 #define domain_crash(d) do { \
kaf24@8663 410 printk("domain_crash called from %s:%d\n", __FILE__, __LINE__); \
sos22@8660 411 __domain_crash(d); \
sos22@8660 412 } while (0)
cl349@4339 413
cl349@4339 414 /*
cl349@4339 415 * Mark current domain as crashed and synchronously deschedule from the local
cl349@4339 416 * processor. This function never returns.
cl349@4339 417 */
kfraser@12210 418 void __domain_crash_synchronous(void) __attribute__((noreturn));
sos22@8660 419 #define domain_crash_synchronous() do { \
kaf24@8663 420 printk("domain_crash_sync called from %s:%d\n", __FILE__, __LINE__); \
sos22@8660 421 __domain_crash_synchronous(); \
sos22@8660 422 } while (0)
kaf24@1210 423
kaf24@1210 424 #define set_current_state(_s) do { current->state = (_s); } while (0)
kaf24@1210 425 void scheduler_init(void);
ack@11622 426 int sched_init_vcpu(struct vcpu *v, unsigned int processor);
kfraser@12260 427 void sched_destroy_vcpu(struct vcpu *v);
kfraser@12260 428 int sched_init_domain(struct domain *d);
kfraser@12260 429 void sched_destroy_domain(struct domain *d);
kfraser@11296 430 long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
kaf24@9224 431 int sched_id(void);
keir@19462 432 void sched_tick_suspend(void);
keir@19462 433 void sched_tick_resume(void);
kaf24@6445 434 void vcpu_wake(struct vcpu *d);
kaf24@6445 435 void vcpu_sleep_nosync(struct vcpu *d);
kaf24@6445 436 void vcpu_sleep_sync(struct vcpu *d);
kaf24@1210 437
kaf24@4373 438 /*
kaf24@6471 439 * Force synchronisation of given VCPU's state. If it is currently descheduled,
kaf24@6471 440 * this call will ensure that all its state is committed to memory and that
kaf24@6471 441 * no CPU is using critical state (e.g., page tables) belonging to the VCPU.
kaf24@4373 442 */
kfraser@12210 443 void sync_vcpu_execstate(struct vcpu *v);
kaf24@4373 444
kaf24@6214 445 /*
kaf24@8517 446 * Called by the scheduler to switch to another VCPU. This function must
kaf24@8517 447 * call context_saved(@prev) when the local CPU is no longer running in
kaf24@8517 448 * @prev's context, and that context is saved to memory. Alternatively, if
kaf24@8517 449 * implementing lazy context switching, it suffices to ensure that invoking
kaf24@8517 450 * sync_vcpu_execstate() will switch and commit @prev's state.
kaf24@6214 451 */
kfraser@12210 452 void context_switch(
kaf24@5289 453 struct vcpu *prev,
kaf24@5289 454 struct vcpu *next);
kaf24@1210 455
kaf24@6214 456 /*
kaf24@8517 457 * As described above, context_switch() must call this function when the
kaf24@8517 458 * local CPU is no longer running in @prev's context, and @prev's context is
kaf24@8517 459 * saved to memory. Alternatively, if implementing lazy context switching,
kaf24@8517 460 * ensure that invoking sync_vcpu_execstate() will switch and commit @prev.
kaf24@6214 461 */
kfraser@12210 462 void context_saved(struct vcpu *prev);
kaf24@6214 463
kaf24@6214 464 /* Called by the scheduler to continue running the current VCPU. */
kfraser@12210 465 void continue_running(
kaf24@5289 466 struct vcpu *same);
kaf24@4696 467
kaf24@1210 468 void startup_cpu_idle_loop(void);
kaf24@1210 469
kaf24@9068 470 /*
kaf24@9068 471 * Creates a continuation to resume the current hypercall. The caller should
kaf24@9068 472 * return immediately, propagating the value returned from this invocation.
kaf24@9068 473 * The format string specifies the types and number of hypercall arguments.
kaf24@9068 474 * It contains one character per argument as follows:
kaf24@9068 475 * 'i' [unsigned] {char, int}
kaf24@9068 476 * 'l' [unsigned] long
kaf24@9873 477 * 'h' guest handle (XEN_GUEST_HANDLE(foo))
kaf24@9068 478 */
kaf24@9068 479 unsigned long hypercall_create_continuation(
kaf24@9068 480 unsigned int op, const char *format, ...);
kaf24@3702 481
kaf24@6110 482 #define hypercall_preempt_check() (unlikely( \
kaf24@6110 483 softirq_pending(smp_processor_id()) | \
kaf24@10354 484 local_events_need_delivery() \
kaf24@4639 485 ))
kaf24@3091 486
kfraser@14058 487 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
kfraser@14058 488 extern spinlock_t domlist_update_lock;
kfraser@14058 489 extern rcu_read_lock_t domlist_read_lock;
kfraser@14058 490
kaf24@2806 491 extern struct domain *domain_list;
kaf24@1210 492
kfraser@14058 493 /* Caller must hold the domlist_read_lock or domlist_update_lock. */
kaf24@7364 494 #define for_each_domain(_d) \
kfraser@14058 495 for ( (_d) = rcu_dereference(domain_list); \
kaf24@7364 496 (_d) != NULL; \
kfraser@14058 497 (_d) = rcu_dereference((_d)->next_in_list )) \
kaf24@1210 498
kaf24@7364 499 #define for_each_vcpu(_d,_v) \
kaf24@7364 500 for ( (_v) = (_d)->vcpu[0]; \
kaf24@7364 501 (_v) != NULL; \
kaf24@7364 502 (_v) = (_v)->next_in_list )
kaf24@1505 503
kaf24@4877 504 /*
kfraser@14663 505 * Per-VCPU pause flags.
kaf24@4877 506 */
kaf24@4877 507 /* Domain is blocked waiting for an event. */
kfraser@14663 508 #define _VPF_blocked 0
kfraser@14663 509 #define VPF_blocked (1UL<<_VPF_blocked)
kaf24@10357 510 /* VCPU is offline. */
kfraser@14663 511 #define _VPF_down 1
kfraser@14663 512 #define VPF_down (1UL<<_VPF_down)
kfraser@11567 513 /* VCPU is blocked awaiting an event to be consumed by Xen. */
kfraser@14663 514 #define _VPF_blocked_in_xen 2
kfraser@14663 515 #define VPF_blocked_in_xen (1UL<<_VPF_blocked_in_xen)
kfraser@11567 516 /* VCPU affinity has changed: migrating to a new CPU. */
kfraser@14663 517 #define _VPF_migrating 3
kfraser@14663 518 #define VPF_migrating (1UL<<_VPF_migrating)
cl349@2919 519
kaf24@8517 520 static inline int vcpu_runnable(struct vcpu *v)
kaf24@1505 521 {
kfraser@14663 522 return !(v->pause_flags |
kfraser@14663 523 atomic_read(&v->pause_count) |
kfraser@14663 524 atomic_read(&v->domain->pause_count));
cl349@2919 525 }
cl349@2919 526
keir@18441 527 void vcpu_unblock(struct vcpu *v);
kaf24@5289 528 void vcpu_pause(struct vcpu *v);
ack@13011 529 void vcpu_pause_nosync(struct vcpu *v);
cl349@5246 530 void domain_pause(struct domain *d);
kaf24@5289 531 void vcpu_unpause(struct vcpu *v);
cl349@5246 532 void domain_unpause(struct domain *d);
cl349@5246 533 void domain_pause_by_systemcontroller(struct domain *d);
cl349@5246 534 void domain_unpause_by_systemcontroller(struct domain *d);
kaf24@6014 535 void cpu_init(void);
cl349@2919 536
kfraser@14340 537 void vcpu_force_reschedule(struct vcpu *v);
keir@18477 538 void cpu_disable_scheduler(void);
kaf24@8511 539 int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
keir@16896 540 int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity);
keir@18512 541 int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity);
keir@16896 542 void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);
kaf24@8511 543
kaf24@9022 544 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
keir@18908 545 uint64_t get_cpu_idle_time(unsigned int cpu);
kaf24@9022 546
kfraser@12210 547 #define IS_PRIV(_d) ((_d)->is_privileged)
keir@16856 548 #define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t)))
kaf24@1210 549
kaf24@2073 550 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
kaf24@2073 551
kfraser@12238 552 #define is_hvm_domain(d) ((d)->is_hvm)
kfraser@12238 553 #define is_hvm_vcpu(v) (is_hvm_domain(v->domain))
keir@17726 554 #define need_iommu(d) ((d)->need_iommu && !(d)->is_hvm)
kfraser@12238 555
keir@19504 556 void set_vcpu_migration_delay(unsigned int delay);
keir@19504 557 unsigned int get_vcpu_migration_delay(void);
keir@19504 558
keir@19417 559 extern int sched_smt_power_savings;
keir@19417 560
keir@15948 561 extern enum cpufreq_controller {
keir@18181 562 FREQCTL_none, FREQCTL_dom0_kernel, FREQCTL_xen
keir@15948 563 } cpufreq_controller;
keir@15948 564
kaf24@1506 565 #endif /* __SCHED_H__ */
kaf24@3914 566
kaf24@3914 567 /*
kaf24@3914 568 * Local variables:
kaf24@3914 569 * mode: C
kaf24@3914 570 * c-set-style: "BSD"
kaf24@3914 571 * c-basic-offset: 4
kaf24@3914 572 * tab-width: 4
kaf24@3914 573 * indent-tabs-mode: nil
kaf24@3988 574 * End:
kaf24@3914 575 */