ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c @ 8786:e9d78d8bd568

Clean up the TPM stack a bit.

Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
author cl349@firebug.cl.cam.ac.uk
date Tue Feb 07 20:38:44 2006 +0000 (2006-02-07)
parents fd9b2c1bb577
children 201d48272a57
line source
1 /*
2 * Copyright (c) 2005, IBM Corporation
3 *
4 * Author: Stefan Berger, stefanb@us.ibm.com
5 * Grant table support: Mahadevan Gomathisankaran
6 *
7 * This code has been derived from drivers/xen/netfront/netfront.c
8 *
9 * Copyright (c) 2002-2004, K A Fraser
10 *
11 * This file may be distributed separately from the Linux kernel, or
12 * incorporated into other software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/version.h>
36 #include <linux/kernel.h>
37 #include <linux/slab.h>
38 #include <linux/errno.h>
39 #include <linux/interrupt.h>
40 #include <linux/init.h>
41 #include <xen/tpmfe.h>
42 #include <linux/err.h>
44 #include <asm/semaphore.h>
45 #include <asm/io.h>
46 #include <xen/evtchn.h>
47 #include <xen/interface/grant_table.h>
48 #include <xen/interface/io/tpmif.h>
49 #include <asm/uaccess.h>
50 #include <xen/xenbus.h>
51 #include <xen/interface/grant_table.h>
53 #include "tpmfront.h"
55 #undef DEBUG
57 /* locally visible variables */
58 static grant_ref_t gref_head;
59 static struct tpm_private *my_priv;
61 /* local function prototypes */
62 static irqreturn_t tpmif_int(int irq,
63 void *tpm_priv,
64 struct pt_regs *ptregs);
65 static void tpmif_rx_action(unsigned long unused);
66 static void tpmif_connect(struct tpm_private *tp, domid_t domid);
67 static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
68 static int tpm_allocate_buffers(struct tpm_private *tp);
69 static void tpmif_set_connected_state(struct tpm_private *tp,
70 u8 newstate);
71 static int tpm_xmit(struct tpm_private *tp,
72 const u8 * buf, size_t count, int userbuffer,
73 void *remember);
75 #define DPRINTK(fmt, args...) \
76 pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
77 #define IPRINTK(fmt, args...) \
78 printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
79 #define WPRINTK(fmt, args...) \
80 printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
83 static inline int
84 tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
85 int isuserbuffer)
86 {
87 int copied = len;
89 if (len > txb->size) {
90 copied = txb->size;
91 }
92 if (isuserbuffer) {
93 if (copy_from_user(txb->data, src, copied))
94 return -EFAULT;
95 } else {
96 memcpy(txb->data, src, copied);
97 }
98 txb->len = len;
99 return copied;
100 }
102 static inline struct tx_buffer *tx_buffer_alloc(void)
103 {
104 struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
105 GFP_KERNEL);
107 if (txb) {
108 txb->len = 0;
109 txb->size = PAGE_SIZE;
110 txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
111 if (txb->data == NULL) {
112 kfree(txb);
113 txb = NULL;
114 }
115 }
116 return txb;
117 }
120 /**************************************************************
121 Utility function for the tpm_private structure
122 **************************************************************/
123 static inline void tpm_private_init(struct tpm_private *tp)
124 {
125 spin_lock_init(&tp->tx_lock);
126 init_waitqueue_head(&tp->wait_q);
127 }
129 static struct tpm_private *tpm_private_get(void)
130 {
131 if (!my_priv) {
132 my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
133 if (my_priv) {
134 tpm_private_init(my_priv);
135 }
136 }
137 return my_priv;
138 }
140 static inline void tpm_private_free(struct tpm_private *tp)
141 {
142 kfree(tp);
143 }
145 /**************************************************************
147 The interface to let the tpm plugin register its callback
148 function and send data to another partition using this module
150 **************************************************************/
152 static DECLARE_MUTEX(upperlayer_lock);
153 static DECLARE_MUTEX(suspend_lock);
154 static struct tpmfe_device *upperlayer_tpmfe;
156 /*
157 * Send data via this module by calling this function
158 */
159 int tpm_fe_send(struct tpm_private *tp, const u8 * buf, size_t count, void *ptr)
160 {
161 int sent = 0;
163 down(&suspend_lock);
164 sent = tpm_xmit(tp, buf, count, 0, ptr);
165 up(&suspend_lock);
167 return sent;
168 }
169 EXPORT_SYMBOL(tpm_fe_send);
171 /*
172 * Register a callback for receiving data from this module
173 */
174 int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
175 {
176 int rc = 0;
178 down(&upperlayer_lock);
179 if (NULL == upperlayer_tpmfe) {
180 upperlayer_tpmfe = tpmfe_dev;
181 tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
182 tpmfe_dev->tpm_private = tpm_private_get();
183 } else {
184 rc = -EBUSY;
185 }
186 up(&upperlayer_lock);
187 return rc;
188 }
189 EXPORT_SYMBOL(tpm_fe_register_receiver);
191 /*
192 * Unregister the callback for receiving data from this module
193 */
194 void tpm_fe_unregister_receiver(void)
195 {
196 down(&upperlayer_lock);
197 upperlayer_tpmfe = NULL;
198 up(&upperlayer_lock);
199 }
200 EXPORT_SYMBOL(tpm_fe_unregister_receiver);
202 /*
203 * Call this function to send data to the upper layer's
204 * registered receiver function.
205 */
206 static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
207 const void *ptr)
208 {
209 int rc = 0;
211 down(&upperlayer_lock);
213 if (upperlayer_tpmfe && upperlayer_tpmfe->receive)
214 rc = upperlayer_tpmfe->receive(buf, count, ptr);
216 up(&upperlayer_lock);
217 return rc;
218 }
220 /**************************************************************
221 XENBUS support code
222 **************************************************************/
224 static int setup_tpmring(struct xenbus_device *dev,
225 struct tpm_private *tp)
226 {
227 tpmif_tx_interface_t *sring;
228 int err;
230 sring = (void *)__get_free_page(GFP_KERNEL);
231 if (!sring) {
232 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
233 return -ENOMEM;
234 }
235 tp->tx = sring;
237 tpm_allocate_buffers(tp);
239 err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
240 if (err < 0) {
241 free_page((unsigned long)sring);
242 tp->tx = NULL;
243 xenbus_dev_fatal(dev, err, "allocating grant reference");
244 goto fail;
245 }
246 tp->ring_ref = err;
248 err = xenbus_alloc_evtchn(dev, &tp->evtchn);
249 if (err)
250 goto fail;
252 tpmif_connect(tp, dev->otherend_id);
254 return 0;
255 fail:
256 return err;
257 }
260 static void destroy_tpmring(struct tpm_private *tp)
261 {
262 tpmif_set_connected_state(tp, 0);
263 if (tp->tx != NULL) {
264 gnttab_end_foreign_access(tp->ring_ref, 0,
265 (unsigned long)tp->tx);
266 tp->tx = NULL;
267 }
269 if (tp->irq)
270 unbind_from_irqhandler(tp->irq, NULL);
271 tp->evtchn = tp->irq = 0;
272 }
275 static int talk_to_backend(struct xenbus_device *dev,
276 struct tpm_private *tp)
277 {
278 const char *message = NULL;
279 int err;
280 xenbus_transaction_t xbt;
282 err = setup_tpmring(dev, tp);
283 if (err) {
284 xenbus_dev_fatal(dev, err, "setting up ring");
285 goto out;
286 }
288 again:
289 err = xenbus_transaction_start(&xbt);
290 if (err) {
291 xenbus_dev_fatal(dev, err, "starting transaction");
292 goto destroy_tpmring;
293 }
295 err = xenbus_printf(xbt, dev->nodename,
296 "ring-ref","%u", tp->ring_ref);
297 if (err) {
298 message = "writing ring-ref";
299 goto abort_transaction;
300 }
302 err = xenbus_printf(xbt, dev->nodename,
303 "event-channel", "%u", tp->evtchn);
304 if (err) {
305 message = "writing event-channel";
306 goto abort_transaction;
307 }
309 err = xenbus_switch_state(dev, xbt, XenbusStateInitialised);
310 if (err) {
311 goto abort_transaction;
312 }
314 err = xenbus_transaction_end(xbt, 0);
315 if (err == -EAGAIN)
316 goto again;
317 if (err) {
318 xenbus_dev_fatal(dev, err, "completing transaction");
319 goto destroy_tpmring;
320 }
321 return 0;
323 abort_transaction:
324 xenbus_transaction_end(xbt, 1);
325 if (message)
326 xenbus_dev_error(dev, err, "%s", message);
327 destroy_tpmring:
328 destroy_tpmring(tp);
329 out:
330 return err;
331 }
333 /**
334 * Callback received when the backend's state changes.
335 */
336 static void backend_changed(struct xenbus_device *dev,
337 XenbusState backend_state)
338 {
339 struct tpm_private *tp = dev->data;
340 DPRINTK("\n");
342 switch (backend_state) {
343 case XenbusStateInitialising:
344 case XenbusStateInitWait:
345 case XenbusStateInitialised:
346 case XenbusStateUnknown:
347 break;
349 case XenbusStateConnected:
350 tpmif_set_connected_state(tp, 1);
351 break;
353 case XenbusStateClosing:
354 tpmif_set_connected_state(tp, 0);
355 break;
357 case XenbusStateClosed:
358 if (tp->is_suspended == 0) {
359 device_unregister(&dev->dev);
360 }
361 break;
362 }
363 }
366 static int tpmfront_probe(struct xenbus_device *dev,
367 const struct xenbus_device_id *id)
368 {
369 int err;
370 int handle;
371 struct tpm_private *tp = tpm_private_get();
373 err = xenbus_scanf(XBT_NULL, dev->nodename,
374 "handle", "%i", &handle);
375 if (XENBUS_EXIST_ERR(err))
376 return err;
378 if (err < 0) {
379 xenbus_dev_fatal(dev,err,"reading virtual-device");
380 return err;
381 }
383 tp->dev = dev;
384 dev->data = tp;
386 err = talk_to_backend(dev, tp);
387 if (err) {
388 tpm_private_free(tp);
389 dev->data = NULL;
390 return err;
391 }
392 return 0;
393 }
396 static int tpmfront_remove(struct xenbus_device *dev)
397 {
398 struct tpm_private *tp = dev->data;
399 destroy_tpmring(tp);
400 return 0;
401 }
403 static int
404 tpmfront_suspend(struct xenbus_device *dev)
405 {
406 struct tpm_private *tp = dev->data;
407 u32 ctr;
409 /* lock, so no app can send */
410 down(&suspend_lock);
411 tp->is_suspended = 1;
413 for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
414 if ((ctr % 10) == 0)
415 printk("TPM-FE [INFO]: Waiting for outstanding request.\n");
416 /*
417 * Wait for a request to be responded to.
418 */
419 interruptible_sleep_on_timeout(&tp->wait_q, 100);
420 }
422 if (atomic_read(&tp->tx_busy)) {
423 /*
424 * A temporary work-around.
425 */
426 printk("TPM-FE [WARNING]: Resetting busy flag.");
427 atomic_set(&tp->tx_busy, 0);
428 }
430 return 0;
431 }
433 static int
434 tpmfront_resume(struct xenbus_device *dev)
435 {
436 struct tpm_private *tp = dev->data;
437 return talk_to_backend(dev, tp);
438 }
440 static void
441 tpmif_connect(struct tpm_private *tp, domid_t domid)
442 {
443 int err;
445 tp->backend_id = domid;
447 err = bind_evtchn_to_irqhandler(tp->evtchn,
448 tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
449 tp);
450 if (err <= 0) {
451 WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
452 return;
453 }
455 tp->irq = err;
456 }
458 static struct xenbus_device_id tpmfront_ids[] = {
459 { "vtpm" },
460 { "" }
461 };
463 static struct xenbus_driver tpmfront = {
464 .name = "vtpm",
465 .owner = THIS_MODULE,
466 .ids = tpmfront_ids,
467 .probe = tpmfront_probe,
468 .remove = tpmfront_remove,
469 .resume = tpmfront_resume,
470 .otherend_changed = backend_changed,
471 .suspend = tpmfront_suspend,
472 };
474 static void __init init_tpm_xenbus(void)
475 {
476 xenbus_register_frontend(&tpmfront);
477 }
480 static int
481 tpm_allocate_buffers(struct tpm_private *tp)
482 {
483 unsigned int i;
485 for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
486 tp->tx_buffers[i] = tx_buffer_alloc();
487 return 1;
488 }
490 static void
491 tpmif_rx_action(unsigned long priv)
492 {
493 struct tpm_private *tp = (struct tpm_private *)priv;
495 int i = 0;
496 unsigned int received;
497 unsigned int offset = 0;
498 u8 *buffer;
499 tpmif_tx_request_t *tx;
500 tx = &tp->tx->ring[i].req;
502 received = tx->size;
504 buffer = kmalloc(received, GFP_KERNEL);
505 if (NULL == buffer) {
506 goto exit;
507 }
509 for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
510 struct tx_buffer *txb = tp->tx_buffers[i];
511 tpmif_tx_request_t *tx;
512 unsigned int tocopy;
514 tx = &tp->tx->ring[i].req;
515 tocopy = tx->size;
516 if (tocopy > PAGE_SIZE) {
517 tocopy = PAGE_SIZE;
518 }
520 memcpy(&buffer[offset], txb->data, tocopy);
522 gnttab_release_grant_reference(&gref_head, tx->ref);
524 offset += tocopy;
525 }
527 tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
528 kfree(buffer);
530 exit:
531 atomic_set(&tp->tx_busy, 0);
532 wake_up_interruptible(&tp->wait_q);
533 }
536 static irqreturn_t
537 tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
538 {
539 struct tpm_private *tp = tpm_priv;
540 unsigned long flags;
542 spin_lock_irqsave(&tp->tx_lock, flags);
543 tpmif_rx_tasklet.data = (unsigned long)tp;
544 tasklet_schedule(&tpmif_rx_tasklet);
545 spin_unlock_irqrestore(&tp->tx_lock, flags);
547 return IRQ_HANDLED;
548 }
551 static int
552 tpm_xmit(struct tpm_private *tp,
553 const u8 * buf, size_t count, int isuserbuffer,
554 void *remember)
555 {
556 tpmif_tx_request_t *tx;
557 TPMIF_RING_IDX i;
558 unsigned int offset = 0;
560 spin_lock_irq(&tp->tx_lock);
562 if (unlikely(atomic_read(&tp->tx_busy))) {
563 printk("tpm_xmit: There's an outstanding request/response "
564 "on the way!\n");
565 spin_unlock_irq(&tp->tx_lock);
566 return -EBUSY;
567 }
569 if (tp->is_connected != 1) {
570 spin_unlock_irq(&tp->tx_lock);
571 return -EIO;
572 }
574 for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
575 struct tx_buffer *txb = tp->tx_buffers[i];
576 int copied;
578 if (NULL == txb) {
579 DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
580 "Not transmitting anything!\n", i);
581 spin_unlock_irq(&tp->tx_lock);
582 return -EFAULT;
583 }
584 copied = tx_buffer_copy(txb, &buf[offset], count,
585 isuserbuffer);
586 if (copied < 0) {
587 /* An error occurred */
588 spin_unlock_irq(&tp->tx_lock);
589 return copied;
590 }
591 count -= copied;
592 offset += copied;
594 tx = &tp->tx->ring[i].req;
596 tx->id = i;
597 tx->addr = virt_to_machine(txb->data);
598 tx->size = txb->len;
600 DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
601 txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
603 /* get the granttable reference for this page */
604 tx->ref = gnttab_claim_grant_reference(&gref_head);
606 if (-ENOSPC == tx->ref) {
607 spin_unlock_irq(&tp->tx_lock);
608 DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
609 return -ENOSPC;
610 }
611 gnttab_grant_foreign_access_ref( tx->ref,
612 tp->backend_id,
613 (tx->addr >> PAGE_SHIFT),
614 0 /*RW*/);
615 wmb();
616 }
618 atomic_set(&tp->tx_busy, 1);
619 tp->tx_remember = remember;
620 mb();
622 DPRINTK("Notifying backend via event channel %d\n",
623 tp->evtchn);
625 notify_remote_via_irq(tp->irq);
627 spin_unlock_irq(&tp->tx_lock);
628 return offset;
629 }
632 static void tpmif_notify_upperlayer(struct tpm_private *tp)
633 {
634 /*
635 * Notify upper layer about the state of the connection
636 * to the BE.
637 */
638 down(&upperlayer_lock);
640 if (upperlayer_tpmfe != NULL) {
641 if (tp->is_connected) {
642 upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
643 } else {
644 upperlayer_tpmfe->status(0);
645 }
646 }
647 up(&upperlayer_lock);
648 }
651 static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
652 {
653 /*
654 * Don't notify upper layer if we are in suspend mode and
655 * should disconnect - assumption is that we will resume
656 * The semaphore keeps apps from sending.
657 */
658 if (is_connected == 0 && tp->is_suspended == 1) {
659 return;
660 }
662 /*
663 * Unlock the semaphore if we are connected again
664 * after being suspended - now resuming.
665 * This also removes the suspend state.
666 */
667 if (is_connected == 1 && tp->is_suspended == 1) {
668 tp->is_suspended = 0;
669 /* unlock, so apps can resume sending */
670 up(&suspend_lock);
671 }
673 if (is_connected != tp->is_connected) {
674 tp->is_connected = is_connected;
675 tpmif_notify_upperlayer(tp);
676 }
677 }
680 /* =================================================================
681 * Initialization function.
682 * =================================================================
683 */
685 static int __init
686 tpmif_init(void)
687 {
688 IPRINTK("Initialising the vTPM driver.\n");
689 if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
690 &gref_head ) < 0) {
691 return -EFAULT;
692 }
694 init_tpm_xenbus();
696 return 0;
697 }
699 __initcall(tpmif_init);
701 /*
702 * Local variables:
703 * c-file-style: "linux"
704 * indent-tabs-mode: t
705 * c-indent-level: 8
706 * c-basic-offset: 8
707 * tab-width: 8
708 * End:
709 */