ia64/linux-2.6.18-xen.hg

view net/rxrpc/krxiod.c @ 871:9cbcc9008446

xen/x86: don't initialize cpu_data[]'s apicid field on generic code

Afaict, this is not only redundant with the intialization done in
drivers/xen/core/smpboot.c, but actually results - at least for
secondary CPUs - in the Xen-specific value written to be later
overwritten with whatever the generic code determines (with no
guarantee that the two values are identical).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu May 14 10:09:15 2009 +0100 (2009-05-14)
parents 831230e53067
children
line source
1 /* krxiod.c: Rx I/O daemon
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
12 #include <linux/sched.h>
13 #include <linux/completion.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <rxrpc/krxiod.h>
17 #include <rxrpc/transport.h>
18 #include <rxrpc/peer.h>
19 #include <rxrpc/call.h>
20 #include "internal.h"
22 static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
23 static DECLARE_COMPLETION(rxrpc_krxiod_dead);
25 static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
27 static LIST_HEAD(rxrpc_krxiod_transportq);
28 static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock);
30 static LIST_HEAD(rxrpc_krxiod_callq);
31 static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock);
33 static volatile int rxrpc_krxiod_die;
35 /*****************************************************************************/
36 /*
37 * Rx I/O daemon
38 */
39 static int rxrpc_krxiod(void *arg)
40 {
41 DECLARE_WAITQUEUE(krxiod,current);
43 printk("Started krxiod %d\n",current->pid);
45 daemonize("krxiod");
47 /* loop around waiting for work to do */
48 do {
49 /* wait for work or to be told to exit */
50 _debug("### Begin Wait");
51 if (!atomic_read(&rxrpc_krxiod_qcount)) {
52 set_current_state(TASK_INTERRUPTIBLE);
54 add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
56 for (;;) {
57 set_current_state(TASK_INTERRUPTIBLE);
58 if (atomic_read(&rxrpc_krxiod_qcount) ||
59 rxrpc_krxiod_die ||
60 signal_pending(current))
61 break;
63 schedule();
64 }
66 remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
67 set_current_state(TASK_RUNNING);
68 }
69 _debug("### End Wait");
71 /* do work if been given some to do */
72 _debug("### Begin Work");
74 /* see if there's a transport in need of attention */
75 if (!list_empty(&rxrpc_krxiod_transportq)) {
76 struct rxrpc_transport *trans = NULL;
78 spin_lock_irq(&rxrpc_krxiod_transportq_lock);
80 if (!list_empty(&rxrpc_krxiod_transportq)) {
81 trans = list_entry(
82 rxrpc_krxiod_transportq.next,
83 struct rxrpc_transport,
84 krxiodq_link);
86 list_del_init(&trans->krxiodq_link);
87 atomic_dec(&rxrpc_krxiod_qcount);
89 /* make sure it hasn't gone away and doesn't go
90 * away */
91 if (atomic_read(&trans->usage)>0)
92 rxrpc_get_transport(trans);
93 else
94 trans = NULL;
95 }
97 spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
99 if (trans) {
100 rxrpc_trans_receive_packet(trans);
101 rxrpc_put_transport(trans);
102 }
103 }
105 /* see if there's a call in need of attention */
106 if (!list_empty(&rxrpc_krxiod_callq)) {
107 struct rxrpc_call *call = NULL;
109 spin_lock_irq(&rxrpc_krxiod_callq_lock);
111 if (!list_empty(&rxrpc_krxiod_callq)) {
112 call = list_entry(rxrpc_krxiod_callq.next,
113 struct rxrpc_call,
114 rcv_krxiodq_lk);
115 list_del_init(&call->rcv_krxiodq_lk);
116 atomic_dec(&rxrpc_krxiod_qcount);
118 /* make sure it hasn't gone away and doesn't go
119 * away */
120 if (atomic_read(&call->usage) > 0) {
121 _debug("@@@ KRXIOD"
122 " Begin Attend Call %p", call);
123 rxrpc_get_call(call);
124 }
125 else {
126 call = NULL;
127 }
128 }
130 spin_unlock_irq(&rxrpc_krxiod_callq_lock);
132 if (call) {
133 rxrpc_call_do_stuff(call);
134 rxrpc_put_call(call);
135 _debug("@@@ KRXIOD End Attend Call %p", call);
136 }
137 }
139 _debug("### End Work");
141 try_to_freeze();
143 /* discard pending signals */
144 rxrpc_discard_my_signals();
146 } while (!rxrpc_krxiod_die);
148 /* and that's all */
149 complete_and_exit(&rxrpc_krxiod_dead, 0);
151 } /* end rxrpc_krxiod() */
153 /*****************************************************************************/
154 /*
155 * start up a krxiod daemon
156 */
157 int __init rxrpc_krxiod_init(void)
158 {
159 return kernel_thread(rxrpc_krxiod, NULL, 0);
161 } /* end rxrpc_krxiod_init() */
163 /*****************************************************************************/
164 /*
165 * kill the krxiod daemon and wait for it to complete
166 */
167 void rxrpc_krxiod_kill(void)
168 {
169 rxrpc_krxiod_die = 1;
170 wake_up_all(&rxrpc_krxiod_sleepq);
171 wait_for_completion(&rxrpc_krxiod_dead);
173 } /* end rxrpc_krxiod_kill() */
175 /*****************************************************************************/
176 /*
177 * queue a transport for attention by krxiod
178 */
179 void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
180 {
181 unsigned long flags;
183 _enter("");
185 if (list_empty(&trans->krxiodq_link)) {
186 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
188 if (list_empty(&trans->krxiodq_link)) {
189 if (atomic_read(&trans->usage) > 0) {
190 list_add_tail(&trans->krxiodq_link,
191 &rxrpc_krxiod_transportq);
192 atomic_inc(&rxrpc_krxiod_qcount);
193 }
194 }
196 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
197 wake_up_all(&rxrpc_krxiod_sleepq);
198 }
200 _leave("");
202 } /* end rxrpc_krxiod_queue_transport() */
204 /*****************************************************************************/
205 /*
206 * dequeue a transport from krxiod's attention queue
207 */
208 void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
209 {
210 unsigned long flags;
212 _enter("");
214 spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
215 if (!list_empty(&trans->krxiodq_link)) {
216 list_del_init(&trans->krxiodq_link);
217 atomic_dec(&rxrpc_krxiod_qcount);
218 }
219 spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
221 _leave("");
223 } /* end rxrpc_krxiod_dequeue_transport() */
225 /*****************************************************************************/
226 /*
227 * queue a call for attention by krxiod
228 */
229 void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
230 {
231 unsigned long flags;
233 if (list_empty(&call->rcv_krxiodq_lk)) {
234 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
235 if (atomic_read(&call->usage) > 0) {
236 list_add_tail(&call->rcv_krxiodq_lk,
237 &rxrpc_krxiod_callq);
238 atomic_inc(&rxrpc_krxiod_qcount);
239 }
240 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
241 }
242 wake_up_all(&rxrpc_krxiod_sleepq);
244 } /* end rxrpc_krxiod_queue_call() */
246 /*****************************************************************************/
247 /*
248 * dequeue a call from krxiod's attention queue
249 */
250 void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
251 {
252 unsigned long flags;
254 spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
255 if (!list_empty(&call->rcv_krxiodq_lk)) {
256 list_del_init(&call->rcv_krxiodq_lk);
257 atomic_dec(&rxrpc_krxiod_qcount);
258 }
259 spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
261 } /* end rxrpc_krxiod_dequeue_call() */