ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c @ 14221:cc18ea7309b3

[Linux] [TPM driver] Resume frontend after checkpointing

This patch implements TPM frontend driver resume after checkpointing a
domain. I also now let the front-end wait for the response from the
TPM until the response is there. A response must come due to the
strict request/response nature of the protocol, but the time until the
responses arrives depends on the type of operation that is executed by
the vTPM. Resending a request is not possible with this type of device.

Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
author kfraser@localhost.localdomain
date Fri Mar 02 16:03:21 2007 +0000 (2007-03-02)
parents 58086aa7c70a
children bc265a79dd32
line source
1 /*
2 * Copyright (c) 2005, IBM Corporation
3 *
4 * Author: Stefan Berger, stefanb@us.ibm.com
5 * Grant table support: Mahadevan Gomathisankaran
6 *
7 * This code has been derived from drivers/xen/netfront/netfront.c
8 *
9 * Copyright (c) 2002-2004, K A Fraser
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License version 2
13 * as published by the Free Software Foundation; or, when distributed
14 * separately from the Linux kernel or incorporated into other
15 * software packages, subject to the following license:
16 *
17 * Permission is hereby granted, free of charge, to any person obtaining a copy
18 * of this source file (the "Software"), to deal in the Software without
19 * restriction, including without limitation the rights to use, copy, modify,
20 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
21 * and to permit persons to whom the Software is furnished to do so, subject to
22 * the following conditions:
23 *
24 * The above copyright notice and this permission notice shall be included in
25 * all copies or substantial portions of the Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
30 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
31 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
32 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 * IN THE SOFTWARE.
34 */
36 #include <linux/errno.h>
37 #include <linux/err.h>
38 #include <linux/interrupt.h>
39 #include <linux/mutex.h>
40 #include <asm/uaccess.h>
41 #include <xen/evtchn.h>
42 #include <xen/interface/grant_table.h>
43 #include <xen/interface/io/tpmif.h>
44 #include <xen/gnttab.h>
45 #include <xen/xenbus.h>
46 #include "tpm.h"
47 #include "tpm_vtpm.h"
49 #undef DEBUG
51 /* local structures */
52 struct tpm_private {
53 struct tpm_chip *chip;
55 tpmif_tx_interface_t *tx;
56 atomic_t refcnt;
57 unsigned int irq;
58 u8 is_connected;
59 u8 is_suspended;
61 spinlock_t tx_lock;
63 struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
65 atomic_t tx_busy;
66 void *tx_remember;
68 domid_t backend_id;
69 wait_queue_head_t wait_q;
71 struct xenbus_device *dev;
72 int ring_ref;
73 };
75 struct tx_buffer {
76 unsigned int size; // available space in data
77 unsigned int len; // used space in data
78 unsigned char *data; // pointer to a page
79 };
82 /* locally visible variables */
83 static grant_ref_t gref_head;
84 static struct tpm_private *my_priv;
86 /* local function prototypes */
87 static irqreturn_t tpmif_int(int irq,
88 void *tpm_priv,
89 struct pt_regs *ptregs);
90 static void tpmif_rx_action(unsigned long unused);
91 static int tpmif_connect(struct xenbus_device *dev,
92 struct tpm_private *tp,
93 domid_t domid);
94 static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
95 static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
96 static void tpmif_free_tx_buffers(struct tpm_private *tp);
97 static void tpmif_set_connected_state(struct tpm_private *tp,
98 u8 newstate);
99 static int tpm_xmit(struct tpm_private *tp,
100 const u8 * buf, size_t count, int userbuffer,
101 void *remember);
102 static void destroy_tpmring(struct tpm_private *tp);
103 void __exit tpmif_exit(void);
105 #define DPRINTK(fmt, args...) \
106 pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
107 #define IPRINTK(fmt, args...) \
108 printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
109 #define WPRINTK(fmt, args...) \
110 printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
112 #define GRANT_INVALID_REF 0
115 static inline int
116 tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
117 int isuserbuffer)
118 {
119 int copied = len;
121 if (len > txb->size) {
122 copied = txb->size;
123 }
124 if (isuserbuffer) {
125 if (copy_from_user(txb->data, src, copied))
126 return -EFAULT;
127 } else {
128 memcpy(txb->data, src, copied);
129 }
130 txb->len = len;
131 return copied;
132 }
134 static inline struct tx_buffer *tx_buffer_alloc(void)
135 {
136 struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
137 GFP_KERNEL);
139 if (txb) {
140 txb->len = 0;
141 txb->size = PAGE_SIZE;
142 txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
143 if (txb->data == NULL) {
144 kfree(txb);
145 txb = NULL;
146 }
147 }
148 return txb;
149 }
152 static inline void tx_buffer_free(struct tx_buffer *txb)
153 {
154 if (txb) {
155 free_page((long)txb->data);
156 kfree(txb);
157 }
158 }
160 /**************************************************************
161 Utility function for the tpm_private structure
162 **************************************************************/
163 static inline void tpm_private_init(struct tpm_private *tp)
164 {
165 spin_lock_init(&tp->tx_lock);
166 init_waitqueue_head(&tp->wait_q);
167 atomic_set(&tp->refcnt, 1);
168 }
170 static inline void tpm_private_put(void)
171 {
172 if ( atomic_dec_and_test(&my_priv->refcnt)) {
173 tpmif_free_tx_buffers(my_priv);
174 kfree(my_priv);
175 my_priv = NULL;
176 }
177 }
179 static struct tpm_private *tpm_private_get(void)
180 {
181 int err;
182 if (!my_priv) {
183 my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
184 if (my_priv) {
185 tpm_private_init(my_priv);
186 err = tpmif_allocate_tx_buffers(my_priv);
187 if (err < 0) {
188 tpm_private_put();
189 }
190 }
191 } else {
192 atomic_inc(&my_priv->refcnt);
193 }
194 return my_priv;
195 }
197 /**************************************************************
199 The interface to let the tpm plugin register its callback
200 function and send data to another partition using this module
202 **************************************************************/
204 static DEFINE_MUTEX(suspend_lock);
205 /*
206 * Send data via this module by calling this function
207 */
208 int vtpm_vd_send(struct tpm_private *tp,
209 const u8 * buf, size_t count, void *ptr)
210 {
211 int sent;
213 mutex_lock(&suspend_lock);
214 sent = tpm_xmit(tp, buf, count, 0, ptr);
215 mutex_unlock(&suspend_lock);
217 return sent;
218 }
220 /**************************************************************
221 XENBUS support code
222 **************************************************************/
224 static int setup_tpmring(struct xenbus_device *dev,
225 struct tpm_private *tp)
226 {
227 tpmif_tx_interface_t *sring;
228 int err;
230 tp->ring_ref = GRANT_INVALID_REF;
232 sring = (void *)__get_free_page(GFP_KERNEL);
233 if (!sring) {
234 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
235 return -ENOMEM;
236 }
237 tp->tx = sring;
239 err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
240 if (err < 0) {
241 free_page((unsigned long)sring);
242 tp->tx = NULL;
243 xenbus_dev_fatal(dev, err, "allocating grant reference");
244 goto fail;
245 }
246 tp->ring_ref = err;
248 err = tpmif_connect(dev, tp, dev->otherend_id);
249 if (err)
250 goto fail;
252 return 0;
253 fail:
254 destroy_tpmring(tp);
255 return err;
256 }
259 static void destroy_tpmring(struct tpm_private *tp)
260 {
261 tpmif_set_connected_state(tp, 0);
263 if (tp->ring_ref != GRANT_INVALID_REF) {
264 gnttab_end_foreign_access(tp->ring_ref, 0,
265 (unsigned long)tp->tx);
266 tp->ring_ref = GRANT_INVALID_REF;
267 tp->tx = NULL;
268 }
270 if (tp->irq)
271 unbind_from_irqhandler(tp->irq, tp);
273 tp->irq = 0;
274 }
277 static int talk_to_backend(struct xenbus_device *dev,
278 struct tpm_private *tp)
279 {
280 const char *message = NULL;
281 int err;
282 struct xenbus_transaction xbt;
284 err = setup_tpmring(dev, tp);
285 if (err) {
286 xenbus_dev_fatal(dev, err, "setting up ring");
287 goto out;
288 }
290 again:
291 err = xenbus_transaction_start(&xbt);
292 if (err) {
293 xenbus_dev_fatal(dev, err, "starting transaction");
294 goto destroy_tpmring;
295 }
297 err = xenbus_printf(xbt, dev->nodename,
298 "ring-ref","%u", tp->ring_ref);
299 if (err) {
300 message = "writing ring-ref";
301 goto abort_transaction;
302 }
304 err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
305 irq_to_evtchn_port(tp->irq));
306 if (err) {
307 message = "writing event-channel";
308 goto abort_transaction;
309 }
311 err = xenbus_transaction_end(xbt, 0);
312 if (err == -EAGAIN)
313 goto again;
314 if (err) {
315 xenbus_dev_fatal(dev, err, "completing transaction");
316 goto destroy_tpmring;
317 }
319 xenbus_switch_state(dev, XenbusStateConnected);
321 return 0;
323 abort_transaction:
324 xenbus_transaction_end(xbt, 1);
325 if (message)
326 xenbus_dev_error(dev, err, "%s", message);
327 destroy_tpmring:
328 destroy_tpmring(tp);
329 out:
330 return err;
331 }
333 /**
334 * Callback received when the backend's state changes.
335 */
336 static void backend_changed(struct xenbus_device *dev,
337 enum xenbus_state backend_state)
338 {
339 struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
340 DPRINTK("\n");
342 switch (backend_state) {
343 case XenbusStateInitialising:
344 case XenbusStateInitWait:
345 case XenbusStateInitialised:
346 case XenbusStateUnknown:
347 break;
349 case XenbusStateConnected:
350 tpmif_set_connected_state(tp, 1);
351 break;
353 case XenbusStateClosing:
354 tpmif_set_connected_state(tp, 0);
355 xenbus_frontend_closed(dev);
356 break;
358 case XenbusStateClosed:
359 tpmif_set_connected_state(tp, 0);
360 if (tp->is_suspended == 0)
361 device_unregister(&dev->dev);
362 xenbus_frontend_closed(dev);
363 break;
364 }
365 }
367 struct tpm_virtual_device tvd = {
368 .max_tx_size = PAGE_SIZE * TPMIF_TX_RING_SIZE,
369 };
371 static int tpmfront_probe(struct xenbus_device *dev,
372 const struct xenbus_device_id *id)
373 {
374 int err;
375 int handle;
376 struct tpm_private *tp = tpm_private_get();
378 if (!tp)
379 return -ENOMEM;
381 tp->chip = init_vtpm(&dev->dev, &tvd, tp);
383 if (IS_ERR(tp->chip)) {
384 return PTR_ERR(tp->chip);
385 }
387 err = xenbus_scanf(XBT_NIL, dev->nodename,
388 "handle", "%i", &handle);
389 if (XENBUS_EXIST_ERR(err))
390 return err;
392 if (err < 0) {
393 xenbus_dev_fatal(dev,err,"reading virtual-device");
394 return err;
395 }
397 tp->dev = dev;
399 err = talk_to_backend(dev, tp);
400 if (err) {
401 tpm_private_put();
402 return err;
403 }
404 return 0;
405 }
408 static int tpmfront_remove(struct xenbus_device *dev)
409 {
410 struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
411 destroy_tpmring(tp);
412 cleanup_vtpm(&dev->dev);
413 return 0;
414 }
416 static int tpmfront_suspend(struct xenbus_device *dev)
417 {
418 struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
419 u32 ctr;
420 /* lock, so no app can send */
421 mutex_lock(&suspend_lock);
422 tp->is_suspended = 1;
424 for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) {
425 if ((ctr % 10) == 0)
426 printk("TPM-FE [INFO]: Waiting for outstanding "
427 "request.\n");
428 /*
429 * Wait for a request to be responded to.
430 */
431 interruptible_sleep_on_timeout(&tp->wait_q, 100);
432 }
434 return 0;
435 }
437 static int __tpmfront_suspend_cancel(struct tpm_private *tp)
438 {
439 tp->is_suspended = 0;
440 /* unlock, so apps can send again */
441 mutex_unlock(&suspend_lock);
442 return 0;
443 }
445 static int tpmfront_suspend_cancel(struct xenbus_device *dev)
446 {
447 struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
448 return __tpmfront_suspend_cancel(tp);
449 }
451 static int tpmfront_resume(struct xenbus_device *dev)
452 {
453 struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
454 destroy_tpmring(tp);
455 return talk_to_backend(dev, tp);
456 }
458 static int tpmif_connect(struct xenbus_device *dev,
459 struct tpm_private *tp,
460 domid_t domid)
461 {
462 int err;
464 tp->backend_id = domid;
466 err = bind_listening_port_to_irqhandler(
467 domid, tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
468 if (err <= 0) {
469 WPRINTK("bind_listening_port_to_irqhandler failed "
470 "(err=%d)\n", err);
471 return err;
472 }
473 tp->irq = err;
475 return 0;
476 }
478 static struct xenbus_device_id tpmfront_ids[] = {
479 { "vtpm" },
480 { "" }
481 };
483 static struct xenbus_driver tpmfront = {
484 .name = "vtpm",
485 .owner = THIS_MODULE,
486 .ids = tpmfront_ids,
487 .probe = tpmfront_probe,
488 .remove = tpmfront_remove,
489 .resume = tpmfront_resume,
490 .otherend_changed = backend_changed,
491 .suspend = tpmfront_suspend,
492 .suspend_cancel = tpmfront_suspend_cancel,
493 };
495 static void __init init_tpm_xenbus(void)
496 {
497 xenbus_register_frontend(&tpmfront);
498 }
500 static void __exit exit_tpm_xenbus(void)
501 {
502 xenbus_unregister_driver(&tpmfront);
503 }
505 static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
506 {
507 unsigned int i;
509 for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
510 tp->tx_buffers[i] = tx_buffer_alloc();
511 if (!tp->tx_buffers[i]) {
512 tpmif_free_tx_buffers(tp);
513 return -ENOMEM;
514 }
515 }
516 return 0;
517 }
519 static void tpmif_free_tx_buffers(struct tpm_private *tp)
520 {
521 unsigned int i;
523 for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
524 tx_buffer_free(tp->tx_buffers[i]);
525 }
526 }
528 static void tpmif_rx_action(unsigned long priv)
529 {
530 struct tpm_private *tp = (struct tpm_private *)priv;
532 int i = 0;
533 unsigned int received;
534 unsigned int offset = 0;
535 u8 *buffer;
536 tpmif_tx_request_t *tx;
537 tx = &tp->tx->ring[i].req;
539 atomic_set(&tp->tx_busy, 0);
540 wake_up_interruptible(&tp->wait_q);
542 received = tx->size;
544 buffer = kmalloc(received, GFP_ATOMIC);
545 if (NULL == buffer) {
546 goto exit;
547 }
549 for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
550 struct tx_buffer *txb = tp->tx_buffers[i];
551 tpmif_tx_request_t *tx;
552 unsigned int tocopy;
554 tx = &tp->tx->ring[i].req;
555 tocopy = tx->size;
556 if (tocopy > PAGE_SIZE) {
557 tocopy = PAGE_SIZE;
558 }
560 memcpy(&buffer[offset], txb->data, tocopy);
562 gnttab_release_grant_reference(&gref_head, tx->ref);
564 offset += tocopy;
565 }
567 vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember);
568 kfree(buffer);
570 exit:
572 return;
573 }
576 static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
577 {
578 struct tpm_private *tp = tpm_priv;
579 unsigned long flags;
581 spin_lock_irqsave(&tp->tx_lock, flags);
582 tpmif_rx_tasklet.data = (unsigned long)tp;
583 tasklet_schedule(&tpmif_rx_tasklet);
584 spin_unlock_irqrestore(&tp->tx_lock, flags);
586 return IRQ_HANDLED;
587 }
590 static int tpm_xmit(struct tpm_private *tp,
591 const u8 * buf, size_t count, int isuserbuffer,
592 void *remember)
593 {
594 tpmif_tx_request_t *tx;
595 TPMIF_RING_IDX i;
596 unsigned int offset = 0;
598 spin_lock_irq(&tp->tx_lock);
600 if (unlikely(atomic_read(&tp->tx_busy))) {
601 printk("tpm_xmit: There's an outstanding request/response "
602 "on the way!\n");
603 spin_unlock_irq(&tp->tx_lock);
604 return -EBUSY;
605 }
607 if (tp->is_connected != 1) {
608 spin_unlock_irq(&tp->tx_lock);
609 return -EIO;
610 }
612 for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
613 struct tx_buffer *txb = tp->tx_buffers[i];
614 int copied;
616 if (NULL == txb) {
617 DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
618 "Not transmitting anything!\n", i);
619 spin_unlock_irq(&tp->tx_lock);
620 return -EFAULT;
621 }
622 copied = tx_buffer_copy(txb, &buf[offset], count,
623 isuserbuffer);
624 if (copied < 0) {
625 /* An error occurred */
626 spin_unlock_irq(&tp->tx_lock);
627 return copied;
628 }
629 count -= copied;
630 offset += copied;
632 tx = &tp->tx->ring[i].req;
634 tx->addr = virt_to_machine(txb->data);
635 tx->size = txb->len;
637 DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
638 txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
640 /* get the granttable reference for this page */
641 tx->ref = gnttab_claim_grant_reference(&gref_head);
643 if (-ENOSPC == tx->ref) {
644 spin_unlock_irq(&tp->tx_lock);
645 DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
646 return -ENOSPC;
647 }
648 gnttab_grant_foreign_access_ref( tx->ref,
649 tp->backend_id,
650 virt_to_mfn(txb->data),
651 0 /*RW*/);
652 wmb();
653 }
655 atomic_set(&tp->tx_busy, 1);
656 tp->tx_remember = remember;
658 mb();
660 notify_remote_via_irq(tp->irq);
662 spin_unlock_irq(&tp->tx_lock);
663 return offset;
664 }
667 static void tpmif_notify_upperlayer(struct tpm_private *tp)
668 {
669 /*
670 * Notify upper layer about the state of the connection
671 * to the BE.
672 */
673 if (tp->is_connected) {
674 vtpm_vd_status(tp->chip, TPM_VD_STATUS_CONNECTED);
675 } else {
676 vtpm_vd_status(tp->chip, TPM_VD_STATUS_DISCONNECTED);
677 }
678 }
681 static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
682 {
683 /*
684 * Don't notify upper layer if we are in suspend mode and
685 * should disconnect - assumption is that we will resume
686 * The mutex keeps apps from sending.
687 */
688 if (is_connected == 0 && tp->is_suspended == 1) {
689 return;
690 }
692 /*
693 * Unlock the mutex if we are connected again
694 * after being suspended - now resuming.
695 * This also removes the suspend state.
696 */
697 if (is_connected == 1 && tp->is_suspended == 1) {
698 __tpmfront_suspend_cancel(tp);
699 }
701 if (is_connected != tp->is_connected) {
702 tp->is_connected = is_connected;
703 tpmif_notify_upperlayer(tp);
704 }
705 }
709 /* =================================================================
710 * Initialization function.
711 * =================================================================
712 */
715 static int __init tpmif_init(void)
716 {
717 long rc = 0;
718 struct tpm_private *tp;
720 if (is_initial_xendomain())
721 return -EPERM;
723 tp = tpm_private_get();
724 if (!tp) {
725 rc = -ENOMEM;
726 goto failexit;
727 }
729 IPRINTK("Initialising the vTPM driver.\n");
730 if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
731 &gref_head ) < 0) {
732 rc = -EFAULT;
733 goto gnttab_alloc_failed;
734 }
736 init_tpm_xenbus();
737 return 0;
739 gnttab_alloc_failed:
740 tpm_private_put();
741 failexit:
743 return (int)rc;
744 }
747 void __exit tpmif_exit(void)
748 {
749 exit_tpm_xenbus();
750 tpm_private_put();
751 gnttab_free_grant_references(gref_head);
752 }
754 module_init(tpmif_init);
756 MODULE_LICENSE("Dual BSD/GPL");