ia64/linux-2.6.18-xen.hg

annotate kernel/rtmutex_common.h @ 798:b02a90bf5bbc

ACPI: Backport missing part for T-State MSR support

Part of below kernel commit was missed while packporting T-State
support.

commit f79f06ab9f86d7203006d2ec8992ac80df36a34e
Author: Zhao Yakui <yakui.zhao@intel.com>
Date: Thu Nov 15 17:06:36 2007 +0800

ACPI: Enable MSR (FixedHW) support for T-States

Add throttling control via MSR when T-states uses
the FixHW Control Status registers.

Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
Signed-off-by: Li Shaohua <shaohua.li@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>

Signed-off-by: Wei Gang <gang.wei@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Mar 02 10:53:59 2009 +0000 (2009-03-02)
parents 831230e53067
children
rev   line source
ian@0 1 /*
ian@0 2 * RT Mutexes: blocking mutual exclusion locks with PI support
ian@0 3 *
ian@0 4 * started by Ingo Molnar and Thomas Gleixner:
ian@0 5 *
ian@0 6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
ian@0 7 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
ian@0 8 *
ian@0 9 * This file contains the private data structure and API definitions.
ian@0 10 */
ian@0 11
ian@0 12 #ifndef __KERNEL_RTMUTEX_COMMON_H
ian@0 13 #define __KERNEL_RTMUTEX_COMMON_H
ian@0 14
ian@0 15 #include <linux/rtmutex.h>
ian@0 16
ian@0 17 /*
ian@0 18 * The rtmutex in kernel tester is independent of rtmutex debugging. We
ian@0 19 * call schedule_rt_mutex_test() instead of schedule() for the tasks which
ian@0 20 * belong to the tester. That way we can delay the wakeup path of those
ian@0 21 * threads to provoke lock stealing and testing of complex boosting scenarios.
ian@0 22 */
ian@0 23 #ifdef CONFIG_RT_MUTEX_TESTER
ian@0 24
ian@0 25 extern void schedule_rt_mutex_test(struct rt_mutex *lock);
ian@0 26
ian@0 27 #define schedule_rt_mutex(_lock) \
ian@0 28 do { \
ian@0 29 if (!(current->flags & PF_MUTEX_TESTER)) \
ian@0 30 schedule(); \
ian@0 31 else \
ian@0 32 schedule_rt_mutex_test(_lock); \
ian@0 33 } while (0)
ian@0 34
ian@0 35 #else
ian@0 36 # define schedule_rt_mutex(_lock) schedule()
ian@0 37 #endif
ian@0 38
ian@0 39 /*
ian@0 40 * This is the control structure for tasks blocked on a rt_mutex,
ian@0 41 * which is allocated on the kernel stack on of the blocked task.
ian@0 42 *
ian@0 43 * @list_entry: pi node to enqueue into the mutex waiters list
ian@0 44 * @pi_list_entry: pi node to enqueue into the mutex owner waiters list
ian@0 45 * @task: task reference to the blocked task
ian@0 46 */
ian@0 47 struct rt_mutex_waiter {
ian@0 48 struct plist_node list_entry;
ian@0 49 struct plist_node pi_list_entry;
ian@0 50 struct task_struct *task;
ian@0 51 struct rt_mutex *lock;
ian@0 52 #ifdef CONFIG_DEBUG_RT_MUTEXES
ian@0 53 unsigned long ip;
ian@0 54 pid_t deadlock_task_pid;
ian@0 55 struct rt_mutex *deadlock_lock;
ian@0 56 #endif
ian@0 57 };
ian@0 58
ian@0 59 /*
ian@0 60 * Various helpers to access the waiters-plist:
ian@0 61 */
ian@0 62 static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
ian@0 63 {
ian@0 64 return !plist_head_empty(&lock->wait_list);
ian@0 65 }
ian@0 66
ian@0 67 static inline struct rt_mutex_waiter *
ian@0 68 rt_mutex_top_waiter(struct rt_mutex *lock)
ian@0 69 {
ian@0 70 struct rt_mutex_waiter *w;
ian@0 71
ian@0 72 w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
ian@0 73 list_entry);
ian@0 74 BUG_ON(w->lock != lock);
ian@0 75
ian@0 76 return w;
ian@0 77 }
ian@0 78
ian@0 79 static inline int task_has_pi_waiters(struct task_struct *p)
ian@0 80 {
ian@0 81 return !plist_head_empty(&p->pi_waiters);
ian@0 82 }
ian@0 83
ian@0 84 static inline struct rt_mutex_waiter *
ian@0 85 task_top_pi_waiter(struct task_struct *p)
ian@0 86 {
ian@0 87 return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter,
ian@0 88 pi_list_entry);
ian@0 89 }
ian@0 90
ian@0 91 /*
ian@0 92 * lock->owner state tracking:
ian@0 93 */
ian@0 94 #define RT_MUTEX_OWNER_PENDING 1UL
ian@0 95 #define RT_MUTEX_HAS_WAITERS 2UL
ian@0 96 #define RT_MUTEX_OWNER_MASKALL 3UL
ian@0 97
ian@0 98 static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
ian@0 99 {
ian@0 100 return (struct task_struct *)
ian@0 101 ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
ian@0 102 }
ian@0 103
ian@0 104 static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
ian@0 105 {
ian@0 106 return (struct task_struct *)
ian@0 107 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
ian@0 108 }
ian@0 109
ian@0 110 static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
ian@0 111 {
ian@0 112 return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING;
ian@0 113 }
ian@0 114
ian@0 115 /*
ian@0 116 * PI-futex support (proxy locking functions, etc.):
ian@0 117 */
ian@0 118 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
ian@0 119 extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
ian@0 120 struct task_struct *proxy_owner);
ian@0 121 extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
ian@0 122 struct task_struct *proxy_owner);
ian@0 123 #endif