ia64/xen-unstable

view xen/include/xen/rcupdate.h @ 19800:78962f85c562

IOMMU: Add two generic functions to vendor neutral interface

Add 2 generic functions into the vendor neutral iommu interface, The
reason is that from changeset 19732, there is only one global flag
"iommu_enabled" that controls iommu enablement for both vtd and amd
systems, so we need different code paths for vtd and amd iommu systems
if this flag has been turned on. Also, the early checking of
"iommu_enabled" in iommu_setup() is removed to prevent iommu
functionalities from been disabled on amd systems.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 19 08:41:50 2009 +0100 (2009-06-19)
parents 97826d77bd4d
children
line source
1 /*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2001
19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * http://lse.sourceforge.net/locking/rcupdate.html
30 */
32 #ifndef __XEN_RCUPDATE_H
33 #define __XEN_RCUPDATE_H
35 #include <xen/cache.h>
36 #include <xen/spinlock.h>
37 #include <xen/percpu.h>
38 #include <xen/cpumask.h>
40 /**
41 * struct rcu_head - callback structure for use with RCU
42 * @next: next update requests in a list
43 * @func: actual update function to call after the grace period.
44 */
45 struct rcu_head {
46 struct rcu_head *next;
47 void (*func)(struct rcu_head *head);
48 };
50 #define RCU_HEAD_INIT { .next = NULL, .func = NULL }
51 #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
52 #define INIT_RCU_HEAD(ptr) do { \
53 (ptr)->next = NULL; (ptr)->func = NULL; \
54 } while (0)
58 /* Global control variables for rcupdate callback mechanism. */
59 struct rcu_ctrlblk {
60 long cur; /* Current batch number. */
61 long completed; /* Number of the last completed batch */
62 int next_pending; /* Is the next batch already waiting? */
64 spinlock_t lock __cacheline_aligned;
65 cpumask_t cpumask; /* CPUs that need to switch in order */
66 /* for current batch to proceed. */
67 } __cacheline_aligned;
69 /* Is batch a before batch b ? */
70 static inline int rcu_batch_before(long a, long b)
71 {
72 return (a - b) < 0;
73 }
75 /* Is batch a after batch b ? */
76 static inline int rcu_batch_after(long a, long b)
77 {
78 return (a - b) > 0;
79 }
81 /*
82 * Per-CPU data for Read-Copy Update.
83 * nxtlist - new callbacks are added here
84 * curlist - current batch for which quiescent cycle started if any
85 */
86 struct rcu_data {
87 /* 1) quiescent state handling : */
88 long quiescbatch; /* Batch # for grace period */
89 int qs_pending; /* core waits for quiesc state */
91 /* 2) batch handling */
92 long batch; /* Batch # for current RCU batch */
93 struct rcu_head *nxtlist;
94 struct rcu_head **nxttail;
95 long qlen; /* # of queued callbacks */
96 struct rcu_head *curlist;
97 struct rcu_head **curtail;
98 struct rcu_head *donelist;
99 struct rcu_head **donetail;
100 long blimit; /* Upper limit on a processed batch */
101 int cpu;
102 struct rcu_head barrier;
103 #ifdef CONFIG_SMP
104 long last_rs_qlen; /* qlen during the last resched */
105 #endif
106 };
108 DECLARE_PER_CPU(struct rcu_data, rcu_data);
109 extern struct rcu_ctrlblk rcu_ctrlblk;
111 int rcu_pending(int cpu);
112 int rcu_needs_cpu(int cpu);
114 /*
115 * Dummy lock type for passing to rcu_read_{lock,unlock}. Currently exists
116 * only to document the reason for rcu_read_lock() critical sections.
117 */
118 struct _rcu_read_lock {};
119 typedef struct _rcu_read_lock rcu_read_lock_t;
120 #define DEFINE_RCU_READ_LOCK(x) rcu_read_lock_t x
122 /**
123 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
124 *
125 * When call_rcu() is invoked
126 * on one CPU while other CPUs are within RCU read-side critical
127 * sections, invocation of the corresponding RCU callback is deferred
128 * until after the all the other CPUs exit their critical sections.
129 *
130 * Note, however, that RCU callbacks are permitted to run concurrently
131 * with RCU read-side critical sections. One way that this can happen
132 * is via the following sequence of events: (1) CPU 0 enters an RCU
133 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
134 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
135 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
136 * callback is invoked. This is legal, because the RCU read-side critical
137 * section that was running concurrently with the call_rcu() (and which
138 * therefore might be referencing something that the corresponding RCU
139 * callback would free up) has completed before the corresponding
140 * RCU callback is invoked.
141 *
142 * RCU read-side critical sections may be nested. Any deferred actions
143 * will be deferred until the outermost RCU read-side critical section
144 * completes.
145 *
146 * It is illegal to block while in an RCU read-side critical section.
147 */
148 #define rcu_read_lock(x) do { } while (0)
150 /**
151 * rcu_read_unlock - marks the end of an RCU read-side critical section.
152 *
153 * See rcu_read_lock() for more information.
154 */
155 #define rcu_read_unlock(x) do { } while (0)
157 /*
158 * So where is rcu_write_lock()? It does not exist, as there is no
159 * way for writers to lock out RCU readers. This is a feature, not
160 * a bug -- this property is what provides RCU's performance benefits.
161 * Of course, writers must coordinate with each other. The normal
162 * spinlock primitives work well for this, but any other technique may be
163 * used as well. RCU does not care how the writers keep out of each
164 * others' way, as long as they do so.
165 */
167 /**
168 * rcu_dereference - fetch an RCU-protected pointer in an
169 * RCU read-side critical section. This pointer may later
170 * be safely dereferenced.
171 *
172 * Inserts memory barriers on architectures that require them
173 * (currently only the Alpha), and, more importantly, documents
174 * exactly which pointers are protected by RCU.
175 */
176 #define rcu_dereference(p) (p)
178 /**
179 * rcu_assign_pointer - assign (publicize) a pointer to a newly
180 * initialized structure that will be dereferenced by RCU read-side
181 * critical sections. Returns the value assigned.
182 *
183 * Inserts memory barriers on architectures that require them
184 * (pretty much all of them other than x86), and also prevents
185 * the compiler from reordering the code that initializes the
186 * structure after the pointer assignment. More importantly, this
187 * call documents which pointers will be dereferenced by RCU read-side
188 * code.
189 */
190 #define rcu_assign_pointer(p, v) ({ smp_wmb(); (p) = (v); })
192 void rcu_init(void);
193 void __devinit rcu_online_cpu(int cpu);
194 void rcu_check_callbacks(int cpu);
196 /* Exported interfaces */
197 void fastcall call_rcu(struct rcu_head *head,
198 void (*func)(struct rcu_head *head));
200 #endif /* __XEN_RCUPDATE_H */