ia64/linux-2.6.18-xen.hg

view net/dccp/ccid.c @ 871:9cbcc9008446

xen/x86: don't initialize cpu_data[]'s apicid field on generic code

Afaict, this is not only redundant with the intialization done in
drivers/xen/core/smpboot.c, but actually results - at least for
secondary CPUs - in the Xen-specific value written to be later
overwritten with whatever the generic code determines (with no
guarantee that the two values are identical).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 14 10:09:15 2009 +0100 (2009-05-14)
parents 831230e53067
children
line source
1 /*
2 * net/dccp/ccid.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * CCID infrastructure
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
14 #include "ccid.h"
16 static struct ccid_operations *ccids[CCID_MAX];
17 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
18 static atomic_t ccids_lockct = ATOMIC_INIT(0);
19 static DEFINE_SPINLOCK(ccids_lock);
21 /*
22 * The strategy is: modifications ccids vector are short, do not sleep and
23 * veeery rare, but read access should be free of any exclusive locks.
24 */
25 static void ccids_write_lock(void)
26 {
27 spin_lock(&ccids_lock);
28 while (atomic_read(&ccids_lockct) != 0) {
29 spin_unlock(&ccids_lock);
30 yield();
31 spin_lock(&ccids_lock);
32 }
33 }
35 static inline void ccids_write_unlock(void)
36 {
37 spin_unlock(&ccids_lock);
38 }
40 static inline void ccids_read_lock(void)
41 {
42 atomic_inc(&ccids_lockct);
43 spin_unlock_wait(&ccids_lock);
44 }
46 static inline void ccids_read_unlock(void)
47 {
48 atomic_dec(&ccids_lockct);
49 }
51 #else
52 #define ccids_write_lock() do { } while(0)
53 #define ccids_write_unlock() do { } while(0)
54 #define ccids_read_lock() do { } while(0)
55 #define ccids_read_unlock() do { } while(0)
56 #endif
58 static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
59 {
60 kmem_cache_t *slab;
61 char slab_name_fmt[32], *slab_name;
62 va_list args;
64 va_start(args, fmt);
65 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
66 va_end(args);
68 slab_name = kstrdup(slab_name_fmt, GFP_KERNEL);
69 if (slab_name == NULL)
70 return NULL;
71 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
72 SLAB_HWCACHE_ALIGN, NULL, NULL);
73 if (slab == NULL)
74 kfree(slab_name);
75 return slab;
76 }
78 static void ccid_kmem_cache_destroy(kmem_cache_t *slab)
79 {
80 if (slab != NULL) {
81 const char *name = kmem_cache_name(slab);
83 kmem_cache_destroy(slab);
84 kfree(name);
85 }
86 }
88 int ccid_register(struct ccid_operations *ccid_ops)
89 {
90 int err = -ENOBUFS;
92 ccid_ops->ccid_hc_rx_slab =
93 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
94 "%s_hc_rx_sock",
95 ccid_ops->ccid_name);
96 if (ccid_ops->ccid_hc_rx_slab == NULL)
97 goto out;
99 ccid_ops->ccid_hc_tx_slab =
100 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
101 "%s_hc_tx_sock",
102 ccid_ops->ccid_name);
103 if (ccid_ops->ccid_hc_tx_slab == NULL)
104 goto out_free_rx_slab;
106 ccids_write_lock();
107 err = -EEXIST;
108 if (ccids[ccid_ops->ccid_id] == NULL) {
109 ccids[ccid_ops->ccid_id] = ccid_ops;
110 err = 0;
111 }
112 ccids_write_unlock();
113 if (err != 0)
114 goto out_free_tx_slab;
116 pr_info("CCID: Registered CCID %d (%s)\n",
117 ccid_ops->ccid_id, ccid_ops->ccid_name);
118 out:
119 return err;
120 out_free_tx_slab:
121 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
122 ccid_ops->ccid_hc_tx_slab = NULL;
123 goto out;
124 out_free_rx_slab:
125 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
126 ccid_ops->ccid_hc_rx_slab = NULL;
127 goto out;
128 }
130 EXPORT_SYMBOL_GPL(ccid_register);
132 int ccid_unregister(struct ccid_operations *ccid_ops)
133 {
134 ccids_write_lock();
135 ccids[ccid_ops->ccid_id] = NULL;
136 ccids_write_unlock();
138 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
139 ccid_ops->ccid_hc_tx_slab = NULL;
140 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
141 ccid_ops->ccid_hc_rx_slab = NULL;
143 pr_info("CCID: Unregistered CCID %d (%s)\n",
144 ccid_ops->ccid_id, ccid_ops->ccid_name);
145 return 0;
146 }
148 EXPORT_SYMBOL_GPL(ccid_unregister);
150 struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
151 {
152 struct ccid_operations *ccid_ops;
153 struct ccid *ccid = NULL;
155 ccids_read_lock();
156 #ifdef CONFIG_KMOD
157 if (ccids[id] == NULL) {
158 /* We only try to load if in process context */
159 ccids_read_unlock();
160 if (gfp & GFP_ATOMIC)
161 goto out;
162 request_module("net-dccp-ccid-%d", id);
163 ccids_read_lock();
164 }
165 #endif
166 ccid_ops = ccids[id];
167 if (ccid_ops == NULL)
168 goto out_unlock;
170 if (!try_module_get(ccid_ops->ccid_owner))
171 goto out_unlock;
173 ccids_read_unlock();
175 ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
176 ccid_ops->ccid_hc_tx_slab, gfp);
177 if (ccid == NULL)
178 goto out_module_put;
179 ccid->ccid_ops = ccid_ops;
180 if (rx) {
181 memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
182 if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
183 ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
184 goto out_free_ccid;
185 } else {
186 memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
187 if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
188 ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
189 goto out_free_ccid;
190 }
191 out:
192 return ccid;
193 out_unlock:
194 ccids_read_unlock();
195 goto out;
196 out_free_ccid:
197 kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
198 ccid_ops->ccid_hc_tx_slab, ccid);
199 ccid = NULL;
200 out_module_put:
201 module_put(ccid_ops->ccid_owner);
202 goto out;
203 }
205 EXPORT_SYMBOL_GPL(ccid_new);
207 struct ccid *ccid_hc_rx_new(unsigned char id, struct sock *sk, gfp_t gfp)
208 {
209 return ccid_new(id, sk, 1, gfp);
210 }
212 EXPORT_SYMBOL_GPL(ccid_hc_rx_new);
214 struct ccid *ccid_hc_tx_new(unsigned char id,struct sock *sk, gfp_t gfp)
215 {
216 return ccid_new(id, sk, 0, gfp);
217 }
219 EXPORT_SYMBOL_GPL(ccid_hc_tx_new);
221 static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx)
222 {
223 struct ccid_operations *ccid_ops;
225 if (ccid == NULL)
226 return;
228 ccid_ops = ccid->ccid_ops;
229 if (rx) {
230 if (ccid_ops->ccid_hc_rx_exit != NULL)
231 ccid_ops->ccid_hc_rx_exit(sk);
232 kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid);
233 } else {
234 if (ccid_ops->ccid_hc_tx_exit != NULL)
235 ccid_ops->ccid_hc_tx_exit(sk);
236 kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid);
237 }
238 ccids_read_lock();
239 if (ccids[ccid_ops->ccid_id] != NULL)
240 module_put(ccid_ops->ccid_owner);
241 ccids_read_unlock();
242 }
244 void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
245 {
246 ccid_delete(ccid, sk, 1);
247 }
249 EXPORT_SYMBOL_GPL(ccid_hc_rx_delete);
251 void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
252 {
253 ccid_delete(ccid, sk, 0);
254 }
256 EXPORT_SYMBOL_GPL(ccid_hc_tx_delete);