ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c @ 10076:18c3da3ad6f7

When doing local migration, a timing-related problem occurred due to the
frontend switching to the Closed state, which could end up having
the .remove function being called after the backend has shut down. This
now fixes the problem by switching to the Closing state.
The other part of the patch cleans up freeing of memory.

Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed May 17 23:28:22 2006 +0100 (2006-05-17)
parents f6ebf236faee
children 48c0f5489d44
line source
1 /*
2 * Copyright (c) 2005, IBM Corporation
3 *
4 * Author: Stefan Berger, stefanb@us.ibm.com
5 * Grant table support: Mahadevan Gomathisankaran
6 *
7 * This code has been derived from drivers/xen/netfront/netfront.c
8 *
9 * Copyright (c) 2002-2004, K A Fraser
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License version 2
13 * as published by the Free Software Foundation; or, when distributed
14 * separately from the Linux kernel or incorporated into other
15 * software packages, subject to the following license:
16 *
17 * Permission is hereby granted, free of charge, to any person obtaining a copy
18 * of this source file (the "Software"), to deal in the Software without
19 * restriction, including without limitation the rights to use, copy, modify,
20 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
21 * and to permit persons to whom the Software is furnished to do so, subject to
22 * the following conditions:
23 *
24 * The above copyright notice and this permission notice shall be included in
25 * all copies or substantial portions of the Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
30 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
31 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
32 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 * IN THE SOFTWARE.
34 */
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/mutex.h>
39 #include <asm/uaccess.h>
40 #include <xen/evtchn.h>
41 #include <xen/interface/grant_table.h>
42 #include <xen/interface/io/tpmif.h>
43 #include <xen/xenbus.h>
44 #include "tpm_vtpm.h"
46 #undef DEBUG
48 /* local structures */
49 struct tpm_private {
50 tpmif_tx_interface_t *tx;
51 atomic_t refcnt;
52 unsigned int evtchn;
53 unsigned int irq;
54 u8 is_connected;
55 u8 is_suspended;
57 spinlock_t tx_lock;
59 struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
61 atomic_t tx_busy;
62 void *tx_remember;
63 domid_t backend_id;
64 wait_queue_head_t wait_q;
66 struct xenbus_device *dev;
67 int ring_ref;
68 };
70 struct tx_buffer {
71 unsigned int size; // available space in data
72 unsigned int len; // used space in data
73 unsigned char *data; // pointer to a page
74 };
77 /* locally visible variables */
78 static grant_ref_t gref_head;
79 static struct tpm_private *my_priv;
81 /* local function prototypes */
82 static irqreturn_t tpmif_int(int irq,
83 void *tpm_priv,
84 struct pt_regs *ptregs);
85 static void tpmif_rx_action(unsigned long unused);
86 static int tpmif_connect(struct xenbus_device *dev,
87 struct tpm_private *tp,
88 domid_t domid);
89 static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
90 static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
91 static void tpmif_free_tx_buffers(struct tpm_private *tp);
92 static void tpmif_set_connected_state(struct tpm_private *tp,
93 u8 newstate);
94 static int tpm_xmit(struct tpm_private *tp,
95 const u8 * buf, size_t count, int userbuffer,
96 void *remember);
97 static void destroy_tpmring(struct tpm_private *tp);
99 #define DPRINTK(fmt, args...) \
100 pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
101 #define IPRINTK(fmt, args...) \
102 printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
103 #define WPRINTK(fmt, args...) \
104 printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
106 #define GRANT_INVALID_REF 0
109 static inline int
110 tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
111 int isuserbuffer)
112 {
113 int copied = len;
115 if (len > txb->size) {
116 copied = txb->size;
117 }
118 if (isuserbuffer) {
119 if (copy_from_user(txb->data, src, copied))
120 return -EFAULT;
121 } else {
122 memcpy(txb->data, src, copied);
123 }
124 txb->len = len;
125 return copied;
126 }
128 static inline struct tx_buffer *tx_buffer_alloc(void)
129 {
130 struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
131 GFP_KERNEL);
133 if (txb) {
134 txb->len = 0;
135 txb->size = PAGE_SIZE;
136 txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
137 if (txb->data == NULL) {
138 kfree(txb);
139 txb = NULL;
140 }
141 }
142 return txb;
143 }
146 static inline void tx_buffer_free(struct tx_buffer *txb)
147 {
148 if (txb) {
149 free_page((long)txb->data);
150 kfree(txb);
151 }
152 }
154 /**************************************************************
155 Utility function for the tpm_private structure
156 **************************************************************/
157 static inline void tpm_private_init(struct tpm_private *tp)
158 {
159 spin_lock_init(&tp->tx_lock);
160 init_waitqueue_head(&tp->wait_q);
161 atomic_set(&tp->refcnt, 1);
162 }
164 static inline void tpm_private_put(void)
165 {
166 if ( atomic_dec_and_test(&my_priv->refcnt)) {
167 tpmif_free_tx_buffers(my_priv);
168 kfree(my_priv);
169 my_priv = NULL;
170 }
171 }
173 static struct tpm_private *tpm_private_get(void)
174 {
175 int err;
176 if (!my_priv) {
177 my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
178 if (my_priv) {
179 tpm_private_init(my_priv);
180 err = tpmif_allocate_tx_buffers(my_priv);
181 if (err < 0) {
182 tpm_private_put();
183 }
184 }
185 } else {
186 atomic_inc(&my_priv->refcnt);
187 }
188 return my_priv;
189 }
191 /**************************************************************
193 The interface to let the tpm plugin register its callback
194 function and send data to another partition using this module
196 **************************************************************/
198 static DEFINE_MUTEX(suspend_lock);
199 /*
200 * Send data via this module by calling this function
201 */
202 int vtpm_vd_send(struct tpm_chip *chip,
203 struct tpm_private *tp,
204 const u8 * buf, size_t count, void *ptr)
205 {
206 int sent;
208 mutex_lock(&suspend_lock);
209 sent = tpm_xmit(tp, buf, count, 0, ptr);
210 mutex_unlock(&suspend_lock);
212 return sent;
213 }
215 /**************************************************************
216 XENBUS support code
217 **************************************************************/
219 static int setup_tpmring(struct xenbus_device *dev,
220 struct tpm_private *tp)
221 {
222 tpmif_tx_interface_t *sring;
223 int err;
225 tp->ring_ref = GRANT_INVALID_REF;
227 sring = (void *)__get_free_page(GFP_KERNEL);
228 if (!sring) {
229 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
230 return -ENOMEM;
231 }
232 tp->tx = sring;
234 err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
235 if (err < 0) {
236 free_page((unsigned long)sring);
237 tp->tx = NULL;
238 xenbus_dev_fatal(dev, err, "allocating grant reference");
239 goto fail;
240 }
241 tp->ring_ref = err;
243 err = tpmif_connect(dev, tp, dev->otherend_id);
244 if (err)
245 goto fail;
247 return 0;
248 fail:
249 destroy_tpmring(tp);
250 return err;
251 }
254 static void destroy_tpmring(struct tpm_private *tp)
255 {
256 tpmif_set_connected_state(tp, 0);
258 if (tp->ring_ref != GRANT_INVALID_REF) {
259 gnttab_end_foreign_access(tp->ring_ref, 0,
260 (unsigned long)tp->tx);
261 tp->ring_ref = GRANT_INVALID_REF;
262 tp->tx = NULL;
263 }
265 if (tp->irq)
266 unbind_from_irqhandler(tp->irq, tp);
268 tp->evtchn = tp->irq = 0;
269 }
272 static int talk_to_backend(struct xenbus_device *dev,
273 struct tpm_private *tp)
274 {
275 const char *message = NULL;
276 int err;
277 xenbus_transaction_t xbt;
279 err = setup_tpmring(dev, tp);
280 if (err) {
281 xenbus_dev_fatal(dev, err, "setting up ring");
282 goto out;
283 }
285 again:
286 err = xenbus_transaction_start(&xbt);
287 if (err) {
288 xenbus_dev_fatal(dev, err, "starting transaction");
289 goto destroy_tpmring;
290 }
292 err = xenbus_printf(xbt, dev->nodename,
293 "ring-ref","%u", tp->ring_ref);
294 if (err) {
295 message = "writing ring-ref";
296 goto abort_transaction;
297 }
299 err = xenbus_printf(xbt, dev->nodename,
300 "event-channel", "%u", tp->evtchn);
301 if (err) {
302 message = "writing event-channel";
303 goto abort_transaction;
304 }
306 err = xenbus_transaction_end(xbt, 0);
307 if (err == -EAGAIN)
308 goto again;
309 if (err) {
310 xenbus_dev_fatal(dev, err, "completing transaction");
311 goto destroy_tpmring;
312 }
314 xenbus_switch_state(dev, XenbusStateConnected);
316 return 0;
318 abort_transaction:
319 xenbus_transaction_end(xbt, 1);
320 if (message)
321 xenbus_dev_error(dev, err, "%s", message);
322 destroy_tpmring:
323 destroy_tpmring(tp);
324 out:
325 return err;
326 }
328 /**
329 * Callback received when the backend's state changes.
330 */
331 static void backend_changed(struct xenbus_device *dev,
332 XenbusState backend_state)
333 {
334 struct tpm_private *tp = dev->data;
335 DPRINTK("\n");
337 switch (backend_state) {
338 case XenbusStateInitialising:
339 case XenbusStateInitWait:
340 case XenbusStateInitialised:
341 case XenbusStateUnknown:
342 break;
344 case XenbusStateConnected:
345 tpmif_set_connected_state(tp, 1);
346 break;
348 case XenbusStateClosing:
349 tpmif_set_connected_state(tp, 0);
350 break;
352 case XenbusStateClosed:
353 if (tp->is_suspended == 0) {
354 device_unregister(&dev->dev);
355 }
356 xenbus_switch_state(dev, XenbusStateClosed);
357 break;
358 }
359 }
362 static int tpmfront_probe(struct xenbus_device *dev,
363 const struct xenbus_device_id *id)
364 {
365 int err;
366 int handle;
367 struct tpm_private *tp = tpm_private_get();
369 if (!tp)
370 return -ENOMEM;
372 err = xenbus_scanf(XBT_NULL, dev->nodename,
373 "handle", "%i", &handle);
374 if (XENBUS_EXIST_ERR(err))
375 return err;
377 if (err < 0) {
378 xenbus_dev_fatal(dev,err,"reading virtual-device");
379 return err;
380 }
382 tp->dev = dev;
383 dev->data = tp;
385 err = talk_to_backend(dev, tp);
386 if (err) {
387 tpm_private_put();
388 dev->data = NULL;
389 return err;
390 }
391 return 0;
392 }
395 static int tpmfront_remove(struct xenbus_device *dev)
396 {
397 struct tpm_private *tp = (struct tpm_private *)dev->data;
398 destroy_tpmring(tp);
399 return 0;
400 }
402 static int tpmfront_suspend(struct xenbus_device *dev)
403 {
404 struct tpm_private *tp = (struct tpm_private *)dev->data;
405 u32 ctr;
407 /* lock, so no app can send */
408 mutex_lock(&suspend_lock);
409 tp->is_suspended = 1;
411 for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
412 if ((ctr % 10) == 0)
413 printk("TPM-FE [INFO]: Waiting for outstanding request.\n");
414 /*
415 * Wait for a request to be responded to.
416 */
417 interruptible_sleep_on_timeout(&tp->wait_q, 100);
418 }
419 xenbus_switch_state(dev, XenbusStateClosing);
421 if (atomic_read(&tp->tx_busy)) {
422 /*
423 * A temporary work-around.
424 */
425 printk("TPM-FE [WARNING]: Resetting busy flag.");
426 atomic_set(&tp->tx_busy, 0);
427 }
429 return 0;
430 }
432 static int tpmfront_resume(struct xenbus_device *dev)
433 {
434 struct tpm_private *tp = (struct tpm_private *)dev->data;
435 destroy_tpmring(tp);
436 return talk_to_backend(dev, tp);
437 }
439 static int tpmif_connect(struct xenbus_device *dev,
440 struct tpm_private *tp,
441 domid_t domid)
442 {
443 int err;
445 tp->backend_id = domid;
447 err = xenbus_alloc_evtchn(dev, &tp->evtchn);
448 if (err)
449 return err;
451 err = bind_evtchn_to_irqhandler(tp->evtchn,
452 tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
453 tp);
454 if (err <= 0) {
455 WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
456 return err;
457 }
459 tp->irq = err;
460 return 0;
461 }
463 static struct xenbus_device_id tpmfront_ids[] = {
464 { "vtpm" },
465 { "" }
466 };
468 static struct xenbus_driver tpmfront = {
469 .name = "vtpm",
470 .owner = THIS_MODULE,
471 .ids = tpmfront_ids,
472 .probe = tpmfront_probe,
473 .remove = tpmfront_remove,
474 .resume = tpmfront_resume,
475 .otherend_changed = backend_changed,
476 .suspend = tpmfront_suspend,
477 };
479 static void __init init_tpm_xenbus(void)
480 {
481 xenbus_register_frontend(&tpmfront);
482 }
484 static void __exit exit_tpm_xenbus(void)
485 {
486 xenbus_unregister_driver(&tpmfront);
487 }
489 static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
490 {
491 unsigned int i;
493 for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
494 tp->tx_buffers[i] = tx_buffer_alloc();
495 if (!tp->tx_buffers[i]) {
496 tpmif_free_tx_buffers(tp);
497 return -ENOMEM;
498 }
499 }
500 return 0;
501 }
503 static void tpmif_free_tx_buffers(struct tpm_private *tp)
504 {
505 unsigned int i;
507 for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
508 tx_buffer_free(tp->tx_buffers[i]);
509 }
510 }
512 static void tpmif_rx_action(unsigned long priv)
513 {
514 struct tpm_private *tp = (struct tpm_private *)priv;
516 int i = 0;
517 unsigned int received;
518 unsigned int offset = 0;
519 u8 *buffer;
520 tpmif_tx_request_t *tx;
521 tx = &tp->tx->ring[i].req;
523 atomic_set(&tp->tx_busy, 0);
524 wake_up_interruptible(&tp->wait_q);
526 received = tx->size;
528 buffer = kmalloc(received, GFP_ATOMIC);
529 if (NULL == buffer) {
530 goto exit;
531 }
533 for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
534 struct tx_buffer *txb = tp->tx_buffers[i];
535 tpmif_tx_request_t *tx;
536 unsigned int tocopy;
538 tx = &tp->tx->ring[i].req;
539 tocopy = tx->size;
540 if (tocopy > PAGE_SIZE) {
541 tocopy = PAGE_SIZE;
542 }
544 memcpy(&buffer[offset], txb->data, tocopy);
546 gnttab_release_grant_reference(&gref_head, tx->ref);
548 offset += tocopy;
549 }
551 vtpm_vd_recv(buffer, received, tp->tx_remember);
552 kfree(buffer);
554 exit:
556 return;
557 }
560 static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
561 {
562 struct tpm_private *tp = tpm_priv;
563 unsigned long flags;
565 spin_lock_irqsave(&tp->tx_lock, flags);
566 tpmif_rx_tasklet.data = (unsigned long)tp;
567 tasklet_schedule(&tpmif_rx_tasklet);
568 spin_unlock_irqrestore(&tp->tx_lock, flags);
570 return IRQ_HANDLED;
571 }
574 static int tpm_xmit(struct tpm_private *tp,
575 const u8 * buf, size_t count, int isuserbuffer,
576 void *remember)
577 {
578 tpmif_tx_request_t *tx;
579 TPMIF_RING_IDX i;
580 unsigned int offset = 0;
582 spin_lock_irq(&tp->tx_lock);
584 if (unlikely(atomic_read(&tp->tx_busy))) {
585 printk("tpm_xmit: There's an outstanding request/response "
586 "on the way!\n");
587 spin_unlock_irq(&tp->tx_lock);
588 return -EBUSY;
589 }
591 if (tp->is_connected != 1) {
592 spin_unlock_irq(&tp->tx_lock);
593 return -EIO;
594 }
596 for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
597 struct tx_buffer *txb = tp->tx_buffers[i];
598 int copied;
600 if (NULL == txb) {
601 DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
602 "Not transmitting anything!\n", i);
603 spin_unlock_irq(&tp->tx_lock);
604 return -EFAULT;
605 }
606 copied = tx_buffer_copy(txb, &buf[offset], count,
607 isuserbuffer);
608 if (copied < 0) {
609 /* An error occurred */
610 spin_unlock_irq(&tp->tx_lock);
611 return copied;
612 }
613 count -= copied;
614 offset += copied;
616 tx = &tp->tx->ring[i].req;
618 tx->addr = virt_to_machine(txb->data);
619 tx->size = txb->len;
621 DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
622 txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
624 /* get the granttable reference for this page */
625 tx->ref = gnttab_claim_grant_reference(&gref_head);
627 if (-ENOSPC == tx->ref) {
628 spin_unlock_irq(&tp->tx_lock);
629 DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
630 return -ENOSPC;
631 }
632 gnttab_grant_foreign_access_ref( tx->ref,
633 tp->backend_id,
634 (tx->addr >> PAGE_SHIFT),
635 0 /*RW*/);
636 wmb();
637 }
639 atomic_set(&tp->tx_busy, 1);
640 tp->tx_remember = remember;
641 mb();
643 DPRINTK("Notifying backend via event channel %d\n",
644 tp->evtchn);
646 notify_remote_via_irq(tp->irq);
648 spin_unlock_irq(&tp->tx_lock);
649 return offset;
650 }
653 static void tpmif_notify_upperlayer(struct tpm_private *tp)
654 {
655 /*
656 * Notify upper layer about the state of the connection
657 * to the BE.
658 */
659 if (tp->is_connected) {
660 vtpm_vd_status(TPM_VD_STATUS_CONNECTED);
661 } else {
662 vtpm_vd_status(TPM_VD_STATUS_DISCONNECTED);
663 }
664 }
667 static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
668 {
669 /*
670 * Don't notify upper layer if we are in suspend mode and
671 * should disconnect - assumption is that we will resume
672 * The mutex keeps apps from sending.
673 */
674 if (is_connected == 0 && tp->is_suspended == 1) {
675 return;
676 }
678 /*
679 * Unlock the mutex if we are connected again
680 * after being suspended - now resuming.
681 * This also removes the suspend state.
682 */
683 if (is_connected == 1 && tp->is_suspended == 1) {
684 tp->is_suspended = 0;
685 /* unlock, so apps can resume sending */
686 mutex_unlock(&suspend_lock);
687 }
689 if (is_connected != tp->is_connected) {
690 tp->is_connected = is_connected;
691 tpmif_notify_upperlayer(tp);
692 }
693 }
697 /* =================================================================
698 * Initialization function.
699 * =================================================================
700 */
702 struct tpm_virtual_device tvd = {
703 .max_tx_size = PAGE_SIZE * TPMIF_TX_RING_SIZE,
704 };
706 static int __init tpmif_init(void)
707 {
708 int rc;
709 struct tpm_private *tp;
711 if ((xen_start_info->flags & SIF_INITDOMAIN)) {
712 return -EPERM;
713 }
715 tp = tpm_private_get();
716 if (!tp) {
717 rc = -ENOMEM;
718 goto failexit;
719 }
721 tvd.tpm_private = tp;
722 rc = init_vtpm(&tvd);
723 if (rc)
724 goto init_vtpm_failed;
726 IPRINTK("Initialising the vTPM driver.\n");
727 if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
728 &gref_head ) < 0) {
729 rc = -EFAULT;
730 goto gnttab_alloc_failed;
731 }
733 init_tpm_xenbus();
734 return 0;
736 gnttab_alloc_failed:
737 cleanup_vtpm();
738 init_vtpm_failed:
739 tpm_private_put();
740 failexit:
742 return rc;
743 }
746 static void __exit tpmif_exit(void)
747 {
748 exit_tpm_xenbus();
749 cleanup_vtpm();
750 tpm_private_put();
751 gnttab_free_grant_references(gref_head);
752 }
754 module_init(tpmif_init);
756 MODULE_LICENSE("Dual BSD/GPL");
758 /*
759 * Local variables:
760 * c-file-style: "linux"
761 * indent-tabs-mode: t
762 * c-indent-level: 8
763 * c-basic-offset: 8
764 * tab-width: 8
765 * End:
766 */