ia64/xen-unstable

view linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c @ 5707:05b63285047c

Merge.
author sos22@douglas.cl.cam.ac.uk
date Fri Jul 08 17:38:38 2005 +0000 (2005-07-08)
parents 445b12a7221a 9b73afea874e
children 215d8b2f3d94
line source
1 /******************************************************************************
2 * ctrl_if.c
3 *
4 * Management functions for special interface to the domain controller.
5 *
6 * Copyright (c) 2004, K A Fraser
7 *
8 * This file may be distributed separately from the Linux kernel, or
9 * incorporated into other software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
30 #include <linux/config.h>
31 #include <linux/kernel.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/errno.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/module.h>
39 #include <asm-xen/ctrl_if.h>
40 #include <asm-xen/evtchn.h>
42 #if 0
43 #define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
44 __FILE__ , __LINE__ , ## _a )
45 #else
46 #define DPRINTK(_f, _a...) ((void)0)
47 #endif
49 /*
50 * Extra ring macros to sync a consumer index up to the public producer index.
51 * Generally UNSAFE, but we use it for recovery and shutdown in some cases.
52 */
53 #define RING_DROP_PENDING_REQUESTS(_r) \
54 do { \
55 (_r)->req_cons = (_r)->sring->req_prod; \
56 } while (0)
57 #define RING_DROP_PENDING_RESPONSES(_r) \
58 do { \
59 (_r)->rsp_cons = (_r)->sring->rsp_prod; \
60 } while (0)
62 /*
63 * Only used by initial domain which must create its own control-interface
64 * event channel. This value is picked up by the user-space domain controller
65 * via an ioctl.
66 */
67 int initdom_ctrlif_domcontroller_port = -1;
69 static int ctrl_if_evtchn;
70 static int ctrl_if_irq;
71 static spinlock_t ctrl_if_lock;
73 static struct irqaction ctrl_if_irq_action;
75 static ctrl_front_ring_t ctrl_if_tx_ring;
76 static ctrl_back_ring_t ctrl_if_rx_ring;
78 /* Incoming message requests. */
79 /* Primary message type -> message handler. */
80 static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
81 /* Primary message type -> callback in process context? */
82 static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
83 /* Is it late enough during bootstrap to use schedule_task()? */
84 static int safe_to_schedule_task;
85 /* Queue up messages to be handled in process context. */
86 static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
87 static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
88 static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
90 /* Incoming message responses: message identifier -> message handler/id. */
91 static struct {
92 ctrl_msg_handler_t fn;
93 unsigned long id;
94 } ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
96 /* For received messages that must be deferred to process context. */
97 static void __ctrl_if_rxmsg_deferred(void *unused);
98 static DECLARE_WORK(ctrl_if_rxmsg_deferred_work,
99 __ctrl_if_rxmsg_deferred,
100 NULL);
102 /* Deferred callbacks for people waiting for space in the transmit ring. */
103 static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
105 static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
106 static void __ctrl_if_tx_tasklet(unsigned long data);
107 static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
109 static void __ctrl_if_rx_tasklet(unsigned long data);
110 static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
112 #define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
114 static void ctrl_if_notify_controller(void)
115 {
116 notify_via_evtchn(ctrl_if_evtchn);
117 }
119 static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
120 {
121 msg->length = 0;
122 ctrl_if_send_response(msg);
123 }
125 static void __ctrl_if_tx_tasklet(unsigned long data)
126 {
127 ctrl_msg_t *msg;
128 int was_full = RING_FULL(&ctrl_if_tx_ring);
129 RING_IDX i, rp;
131 i = ctrl_if_tx_ring.rsp_cons;
132 rp = ctrl_if_tx_ring.sring->rsp_prod;
133 rmb(); /* Ensure we see all requests up to 'rp'. */
135 for ( ; i != rp; i++ )
136 {
137 msg = RING_GET_RESPONSE(&ctrl_if_tx_ring, i);
139 DPRINTK("Rx-Rsp %u/%u :: %d/%d\n", i-1,
140 ctrl_if_tx_ring.sring->rsp_prod,
141 msg->type, msg->subtype);
143 /* Execute the callback handler, if one was specified. */
144 if ( msg->id != 0xFF )
145 {
146 (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
147 msg, ctrl_if_txmsg_id_mapping[msg->id].id);
148 smp_mb(); /* Execute, /then/ free. */
149 ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
150 }
151 }
153 /*
154 * Step over messages in the ring /after/ finishing reading them. As soon
155 * as the index is updated then the message may get blown away.
156 */
157 smp_mb();
158 ctrl_if_tx_ring.rsp_cons = i;
160 if ( was_full && !RING_FULL(&ctrl_if_tx_ring) )
161 {
162 wake_up(&ctrl_if_tx_wait);
163 run_task_queue(&ctrl_if_tx_tq);
164 }
165 }
167 static void __ctrl_if_rxmsg_deferred(void *unused)
168 {
169 ctrl_msg_t *msg;
170 CONTROL_RING_IDX dp;
172 dp = ctrl_if_rxmsg_deferred_prod;
173 rmb(); /* Ensure we see all deferred requests up to 'dp'. */
175 while ( ctrl_if_rxmsg_deferred_cons != dp )
176 {
177 msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
178 ctrl_if_rxmsg_deferred_cons++)];
179 (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
180 }
181 }
183 static void __ctrl_if_rx_tasklet(unsigned long data)
184 {
185 ctrl_msg_t msg, *pmsg;
186 CONTROL_RING_IDX dp;
187 RING_IDX rp, i;
189 i = ctrl_if_rx_ring.req_cons;
190 rp = ctrl_if_rx_ring.sring->req_prod;
191 dp = ctrl_if_rxmsg_deferred_prod;
192 rmb(); /* Ensure we see all requests up to 'rp'. */
194 for ( ; i != rp; i++)
195 {
196 pmsg = RING_GET_REQUEST(&ctrl_if_rx_ring, i);
197 memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
199 DPRINTK("Rx-Req %u/%u :: %d/%d\n", i-1,
200 ctrl_if_rx_ring.sring->req_prod,
201 msg.type, msg.subtype);
203 if ( msg.length > sizeof(msg.msg) )
204 msg.length = sizeof(msg.msg);
206 if ( msg.length != 0 )
207 memcpy(msg.msg, pmsg->msg, msg.length);
209 if ( test_bit(msg.type,
210 (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
211 memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)],
212 &msg, offsetof(ctrl_msg_t, msg) + msg.length);
213 else
214 (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
215 }
217 ctrl_if_rx_ring.req_cons = i;
219 if ( dp != ctrl_if_rxmsg_deferred_prod )
220 {
221 wmb();
222 ctrl_if_rxmsg_deferred_prod = dp;
223 schedule_work(&ctrl_if_rxmsg_deferred_work);
224 }
225 }
227 static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
228 struct pt_regs *regs)
229 {
230 if ( RING_HAS_UNCONSUMED_RESPONSES(&ctrl_if_tx_ring) )
231 tasklet_schedule(&ctrl_if_tx_tasklet);
233 if ( RING_HAS_UNCONSUMED_REQUESTS(&ctrl_if_rx_ring) )
234 tasklet_schedule(&ctrl_if_rx_tasklet);
236 return IRQ_HANDLED;
237 }
239 int
240 ctrl_if_send_message_noblock(
241 ctrl_msg_t *msg,
242 ctrl_msg_handler_t hnd,
243 unsigned long id)
244 {
245 unsigned long flags;
246 ctrl_msg_t *dmsg;
247 int i;
249 spin_lock_irqsave(&ctrl_if_lock, flags);
251 if ( RING_FULL(&ctrl_if_tx_ring) )
252 {
253 spin_unlock_irqrestore(&ctrl_if_lock, flags);
254 return -EAGAIN;
255 }
257 msg->id = 0xFF;
258 if ( hnd != NULL )
259 {
260 for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
261 continue;
262 ctrl_if_txmsg_id_mapping[i].fn = hnd;
263 ctrl_if_txmsg_id_mapping[i].id = id;
264 msg->id = i;
265 }
267 DPRINTK("Tx-Req %u/%u :: %d/%d\n",
268 ctrl_if_tx_ring.req_prod_pvt,
269 ctrl_if_tx_ring.rsp_cons,
270 msg->type, msg->subtype);
272 dmsg = RING_GET_REQUEST(&ctrl_if_tx_ring,
273 ctrl_if_tx_ring.req_prod_pvt);
274 memcpy(dmsg, msg, sizeof(*msg));
275 ctrl_if_tx_ring.req_prod_pvt++;
276 RING_PUSH_REQUESTS(&ctrl_if_tx_ring);
278 spin_unlock_irqrestore(&ctrl_if_lock, flags);
280 ctrl_if_notify_controller();
282 return 0;
283 }
285 int
286 ctrl_if_send_message_block(
287 ctrl_msg_t *msg,
288 ctrl_msg_handler_t hnd,
289 unsigned long id,
290 long wait_state)
291 {
292 DECLARE_WAITQUEUE(wait, current);
293 int rc;
295 /* Fast path. */
296 if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
297 return rc;
299 add_wait_queue(&ctrl_if_tx_wait, &wait);
301 for ( ; ; )
302 {
303 set_current_state(wait_state);
305 if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
306 break;
308 rc = -ERESTARTSYS;
309 if ( signal_pending(current) && (wait_state == TASK_INTERRUPTIBLE) )
310 break;
312 schedule();
313 }
315 set_current_state(TASK_RUNNING);
316 remove_wait_queue(&ctrl_if_tx_wait, &wait);
318 return rc;
319 }
321 /* Allow a reponse-callback handler to find context of a blocked requester. */
322 struct rsp_wait {
323 ctrl_msg_t *msg; /* Buffer for the response message. */
324 struct task_struct *task; /* The task that is blocked on the response. */
325 int done; /* Indicate to 'task' that response is rcv'ed. */
326 };
328 static void __ctrl_if_get_response(ctrl_msg_t *msg, unsigned long id)
329 {
330 struct rsp_wait *wait = (struct rsp_wait *)id;
331 struct task_struct *task = wait->task;
333 memcpy(wait->msg, msg, sizeof(*msg));
334 wmb();
335 wait->done = 1;
337 wake_up_process(task);
338 }
340 int
341 ctrl_if_send_message_and_get_response(
342 ctrl_msg_t *msg,
343 ctrl_msg_t *rmsg,
344 long wait_state)
345 {
346 struct rsp_wait wait;
347 int rc;
349 wait.msg = rmsg;
350 wait.done = 0;
351 wait.task = current;
353 if ( (rc = ctrl_if_send_message_block(msg, __ctrl_if_get_response,
354 (unsigned long)&wait,
355 wait_state)) != 0 )
356 return rc;
358 for ( ; ; )
359 {
360 /* NB. Can't easily support TASK_INTERRUPTIBLE here. */
361 set_current_state(TASK_UNINTERRUPTIBLE);
362 if ( wait.done )
363 break;
364 schedule();
365 }
367 set_current_state(TASK_RUNNING);
368 return 0;
369 }
371 int
372 ctrl_if_enqueue_space_callback(
373 struct tq_struct *task)
374 {
375 /* Fast path. */
376 if ( !RING_FULL(&ctrl_if_tx_ring) )
377 return 0;
379 (void)queue_task(task, &ctrl_if_tx_tq);
381 /*
382 * We may race execution of the task queue, so return re-checked status. If
383 * the task is not executed despite the ring being non-full then we will
384 * certainly return 'not full'.
385 */
386 smp_mb();
387 return RING_FULL(&ctrl_if_tx_ring);
388 }
390 void
391 ctrl_if_send_response(
392 ctrl_msg_t *msg)
393 {
394 unsigned long flags;
395 ctrl_msg_t *dmsg;
397 /*
398 * NB. The response may the original request message, modified in-place.
399 * In this situation we may have src==dst, so no copying is required.
400 */
401 spin_lock_irqsave(&ctrl_if_lock, flags);
403 DPRINTK("Tx-Rsp %u :: %d/%d\n",
404 ctrl_if_rx_ring.rsp_prod_pvt,
405 msg->type, msg->subtype);
407 dmsg = RING_GET_RESPONSE(&ctrl_if_rx_ring,
408 ctrl_if_rx_ring.rsp_prod_pvt);
409 if ( dmsg != msg )
410 memcpy(dmsg, msg, sizeof(*msg));
412 ctrl_if_rx_ring.rsp_prod_pvt++;
413 RING_PUSH_RESPONSES(&ctrl_if_rx_ring);
415 spin_unlock_irqrestore(&ctrl_if_lock, flags);
417 ctrl_if_notify_controller();
418 }
420 int
421 ctrl_if_register_receiver(
422 u8 type,
423 ctrl_msg_handler_t hnd,
424 unsigned int flags)
425 {
426 unsigned long _flags;
427 int inuse;
429 spin_lock_irqsave(&ctrl_if_lock, _flags);
431 inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
433 if ( inuse )
434 {
435 printk(KERN_INFO "Receiver %p already established for control "
436 "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
437 }
438 else
439 {
440 ctrl_if_rxmsg_handler[type] = hnd;
441 clear_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
442 if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
443 {
444 set_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
445 if ( !safe_to_schedule_task )
446 BUG();
447 }
448 }
450 spin_unlock_irqrestore(&ctrl_if_lock, _flags);
452 return !inuse;
453 }
455 void
456 ctrl_if_unregister_receiver(
457 u8 type,
458 ctrl_msg_handler_t hnd)
459 {
460 unsigned long flags;
462 spin_lock_irqsave(&ctrl_if_lock, flags);
464 if ( ctrl_if_rxmsg_handler[type] != hnd )
465 printk(KERN_INFO "Receiver %p is not registered for control "
466 "messages of type %d.\n", hnd, type);
467 else
468 ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
470 spin_unlock_irqrestore(&ctrl_if_lock, flags);
472 /* Ensure that @hnd will not be executed after this function returns. */
473 tasklet_unlock_wait(&ctrl_if_rx_tasklet);
474 }
476 void ctrl_if_suspend(void)
477 {
478 teardown_irq(ctrl_if_irq, &ctrl_if_irq_action);
479 unbind_evtchn_from_irq(ctrl_if_evtchn);
480 }
482 void ctrl_if_resume(void)
483 {
484 control_if_t *ctrl_if = get_ctrl_if();
486 if ( xen_start_info.flags & SIF_INITDOMAIN )
487 {
488 /*
489 * The initial domain must create its own domain-controller link.
490 * The controller is probably not running at this point, but will
491 * pick up its end of the event channel from
492 */
493 evtchn_op_t op;
494 extern void bind_evtchn_to_cpu(unsigned port, unsigned cpu);
496 op.cmd = EVTCHNOP_bind_interdomain;
497 op.u.bind_interdomain.dom1 = DOMID_SELF;
498 op.u.bind_interdomain.dom2 = DOMID_SELF;
499 op.u.bind_interdomain.port1 = 0;
500 op.u.bind_interdomain.port2 = 0;
501 if ( HYPERVISOR_event_channel_op(&op) != 0 )
502 BUG();
503 xen_start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
504 initdom_ctrlif_domcontroller_port = op.u.bind_interdomain.port2;
505 bind_evtchn_to_cpu(op.u.bind_interdomain.port1, 0);
506 }
508 /* Sync up with shared indexes. */
509 FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring, CONTROL_RING_MEM);
510 BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring, CONTROL_RING_MEM);
512 ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
513 ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
515 memset(&ctrl_if_irq_action, 0, sizeof(ctrl_if_irq_action));
516 ctrl_if_irq_action.handler = ctrl_if_interrupt;
517 ctrl_if_irq_action.name = "ctrl-if";
518 (void)setup_irq(ctrl_if_irq, &ctrl_if_irq_action);
519 }
521 void __init ctrl_if_init(void)
522 {
523 control_if_t *ctrl_if = get_ctrl_if();
524 int i;
526 for ( i = 0; i < 256; i++ )
527 ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
529 FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring, CONTROL_RING_MEM);
530 BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring, CONTROL_RING_MEM);
532 spin_lock_init(&ctrl_if_lock);
534 ctrl_if_resume();
535 }
538 /* This is called after it is safe to call schedule_task(). */
539 static int __init ctrl_if_late_setup(void)
540 {
541 safe_to_schedule_task = 1;
542 return 0;
543 }
544 __initcall(ctrl_if_late_setup);
547 /*
548 * !! The following are DANGEROUS FUNCTIONS !!
549 * Use with care [for example, see xencons_force_flush()].
550 */
552 int ctrl_if_transmitter_empty(void)
553 {
554 return (ctrl_if_tx_ring.sring->req_prod == ctrl_if_tx_ring.rsp_cons);
556 }
558 void ctrl_if_discard_responses(void)
559 {
560 RING_DROP_PENDING_RESPONSES(&ctrl_if_tx_ring);
561 }
563 EXPORT_SYMBOL(ctrl_if_send_message_noblock);
564 EXPORT_SYMBOL(ctrl_if_send_message_block);
565 EXPORT_SYMBOL(ctrl_if_send_message_and_get_response);
566 EXPORT_SYMBOL(ctrl_if_enqueue_space_callback);
567 EXPORT_SYMBOL(ctrl_if_send_response);
568 EXPORT_SYMBOL(ctrl_if_register_receiver);
569 EXPORT_SYMBOL(ctrl_if_unregister_receiver);