ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c @ 6989:7a45b8ccef01

Some cleanup in tpm-related files.

Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Sep 21 10:13:11 2005 +0000 (2005-09-21)
parents 872cf6ee0594
children 55fc0ecc19c3
line source
1 /*
2 * Copyright (c) 2005, IBM Corporation
3 *
4 * Author: Stefan Berger, stefanb@us.ibm.com
5 * Grant table support: Mahadevan Gomathisankaran
6 *
7 * This code has been derived from drivers/xen/netfront/netfront.c
8 *
9 * Copyright (c) 2002-2004, K A Fraser
10 *
11 * This file may be distributed separately from the Linux kernel, or
12 * incorporated into other software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/version.h>
36 #include <linux/kernel.h>
37 #include <linux/slab.h>
38 #include <linux/errno.h>
39 #include <linux/interrupt.h>
40 #include <linux/init.h>
41 #include <linux/tpmfe.h>
43 #include <asm/semaphore.h>
44 #include <asm/io.h>
45 #include <asm-xen/evtchn.h>
46 #include <asm-xen/xen-public/io/tpmif.h>
47 #include <asm/uaccess.h>
48 #include <asm-xen/xenbus.h>
49 #include <asm-xen/xen-public/grant_table.h>
51 #include "tpmfront.h"
53 #undef DEBUG
55 #if 1
56 #define ASSERT(_p) \
57 if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
58 __LINE__, __FILE__); *(int*)0=0; }
59 #else
60 #define ASSERT(_p)
61 #endif
63 /* locally visible variables */
64 static grant_ref_t gref_head;
65 static struct tpm_private my_private;
67 /* local function prototypes */
68 static irqreturn_t tpmif_int(int irq,
69 void *tpm_priv,
70 struct pt_regs *ptregs);
71 static void tpmif_rx_action(unsigned long unused);
72 static void tpmif_connect(u16 evtchn, domid_t domid);
73 static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
74 static int tpm_allocate_buffers(struct tpm_private *tp);
75 static void tpmif_set_connected_state(struct tpm_private *tp, int newstate);
76 static int tpm_xmit(struct tpm_private *tp,
77 const u8 * buf, size_t count, int userbuffer,
78 void *remember);
80 #if DEBUG
81 #define DPRINTK(fmt, args...) \
82 printk(KERN_ALERT "xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
83 #else
84 #define DPRINTK(fmt, args...) ((void)0)
85 #endif
86 #define IPRINTK(fmt, args...) \
87 printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
88 #define WPRINTK(fmt, args...) \
89 printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
92 static inline int
93 tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
94 int isuserbuffer)
95 {
96 int copied = len;
98 if (len > txb->size) {
99 copied = txb->size;
100 }
101 if (isuserbuffer) {
102 if (copy_from_user(txb->data,
103 src,
104 copied)) {
105 return -EFAULT;
106 }
107 } else {
108 memcpy(txb->data, src, copied);
109 }
110 txb->len = len;
111 return copied;
112 }
114 static inline struct tx_buffer *tx_buffer_alloc(void)
115 {
116 struct tx_buffer *txb = kmalloc(sizeof (struct tx_buffer),
117 GFP_KERNEL);
119 if (txb) {
120 txb->len = 0;
121 txb->size = PAGE_SIZE;
122 txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
123 if (txb->data == NULL) {
124 kfree(txb);
125 txb = NULL;
126 }
127 }
128 return txb;
129 }
132 /**************************************************************
134 The interface to let the tpm plugin register its callback
135 function and send data to another partition using this module
137 **************************************************************/
139 static DECLARE_MUTEX(upperlayer_lock);
140 static DECLARE_MUTEX(suspend_lock);
141 static struct tpmfe_device *upperlayer_tpmfe;
143 /*
144 * Send data via this module by calling this function
145 */
146 int tpm_fe_send(const u8 * buf, size_t count, void *ptr)
147 {
148 int sent = 0;
149 struct tpm_private *tp = &my_private;
151 down(&suspend_lock);
152 sent = tpm_xmit(tp, buf, count, 0, ptr);
153 up(&suspend_lock);
155 return sent;
156 }
157 EXPORT_SYMBOL(tpm_fe_send);
159 /*
160 * Register a callback for receiving data from this module
161 */
162 int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
163 {
164 int rc = 0;
166 down(&upperlayer_lock);
167 if (NULL == upperlayer_tpmfe) {
168 upperlayer_tpmfe = tpmfe_dev;
169 tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
170 } else {
171 rc = -EBUSY;
172 }
173 up(&upperlayer_lock);
174 return rc;
175 }
176 EXPORT_SYMBOL(tpm_fe_register_receiver);
178 /*
179 * Unregister the callback for receiving data from this module
180 */
181 void tpm_fe_unregister_receiver(void)
182 {
183 down(&upperlayer_lock);
184 upperlayer_tpmfe = NULL;
185 up(&upperlayer_lock);
186 }
187 EXPORT_SYMBOL(tpm_fe_unregister_receiver);
189 /*
190 * Call this function to send data to the upper layer's
191 * registered receiver function.
192 */
193 static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
194 const void *ptr)
195 {
196 int rc;
198 down(&upperlayer_lock);
200 if (upperlayer_tpmfe && upperlayer_tpmfe->receive) {
201 rc = upperlayer_tpmfe->receive(buf, count, ptr);
202 } else {
203 rc = 0;
204 }
206 up(&upperlayer_lock);
207 return rc;
208 }
210 /**************************************************************
211 XENBUS support code
212 **************************************************************/
214 static void watch_for_status(struct xenbus_watch *watch, const char *node)
215 {
216 struct tpmfront_info *info;
217 int err;
218 unsigned long ready;
219 struct tpm_private *tp = &my_private;
221 info = container_of(watch, struct tpmfront_info, watch);
222 node += strlen(watch->node);
224 if (tp->connected)
225 return;
227 err = xenbus_gather(watch->node,
228 "ready", "%lu", &ready,
229 NULL);
230 if (err) {
231 xenbus_dev_error(info->dev, err, "reading 'ready' field");
232 return;
233 }
235 tpmif_set_connected_state(tp, 1);
237 xenbus_dev_ok(info->dev);
238 }
241 static int setup_tpmring(struct xenbus_device *dev,
242 struct tpmfront_info * info,
243 domid_t backend_id)
244 {
245 tpmif_tx_interface_t *sring;
246 struct tpm_private *tp = &my_private;
248 evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
249 int err;
251 sring = (void *)__get_free_page(GFP_KERNEL);
252 if (!sring) {
253 xenbus_dev_error(dev, -ENOMEM, "allocating shared ring");
254 return -ENOMEM;
255 }
256 tp->tx = sring;
258 tpm_allocate_buffers(tp);
260 err = gnttab_grant_foreign_access(backend_id,
261 (virt_to_machine(tp->tx) >> PAGE_SHIFT),
262 0);
264 if (err == -ENOSPC) {
265 free_page((unsigned long)sring);
266 tp->tx = NULL;
267 xenbus_dev_error(dev, err, "allocating grant reference");
268 return err;
269 }
270 info->ring_ref = err;
272 op.u.alloc_unbound.dom = backend_id;
273 err = HYPERVISOR_event_channel_op(&op);
274 if (err) {
275 gnttab_end_foreign_access(info->ring_ref, 0);
276 free_page((unsigned long)sring);
277 tp->tx = NULL;
278 xenbus_dev_error(dev, err, "allocating event channel");
279 return err;
280 }
281 tpmif_connect(op.u.alloc_unbound.port, backend_id);
282 return 0;
283 }
286 static void destroy_tpmring(struct tpmfront_info *info, struct tpm_private *tp)
287 {
288 tpmif_set_connected_state(tp,0);
290 if ( tp->tx != NULL ) {
291 gnttab_end_foreign_access(info->ring_ref, 0);
292 free_page((unsigned long)tp->tx);
293 tp->tx = NULL;
294 }
295 unbind_evtchn_from_irqhandler(tp->evtchn, NULL);
296 tp->evtchn = 0;
297 }
300 static int talk_to_backend(struct xenbus_device *dev,
301 struct tpmfront_info *info)
302 {
303 char *backend;
304 const char *message;
305 int err;
306 int backend_id;
308 backend = NULL;
309 err = xenbus_gather(dev->nodename,
310 "backend-id", "%i", &backend_id,
311 "backend", NULL, &backend,
312 NULL);
313 if (XENBUS_EXIST_ERR(err))
314 goto out;
315 if (backend && strlen(backend) == 0) {
316 err = -ENOENT;
317 goto out;
318 }
319 if (err < 0) {
320 xenbus_dev_error(dev, err, "reading %s/backend or backend-id",
321 dev->nodename);
322 goto out;
323 }
325 info->backend_id = backend_id;
326 my_private.backend_id = backend_id;
328 err = setup_tpmring(dev, info, backend_id);
329 if (err) {
330 xenbus_dev_error(dev, err, "setting up ring");
331 goto out;
332 }
334 err = xenbus_transaction_start(dev->nodename);
335 if (err) {
336 xenbus_dev_error(dev, err, "starting transaction");
337 goto destroy_tpmring;
338 }
340 err = xenbus_printf(dev->nodename,
341 "ring-ref","%u", info->ring_ref);
342 if (err) {
343 message = "writing ring-ref";
344 goto abort_transaction;
345 }
347 err = xenbus_printf(dev->nodename,
348 "event-channel", "%u", my_private.evtchn);
349 if (err) {
350 message = "writing event-channel";
351 goto abort_transaction;
352 }
354 info->backend = backend;
355 backend = NULL;
357 info->watch.node = info->backend;
358 info->watch.callback = watch_for_status;
359 err = register_xenbus_watch(&info->watch);
360 if (err) {
361 message = "registering watch on backend";
362 goto abort_transaction;
363 }
365 err = xenbus_transaction_end(0);
366 if (err) {
367 xenbus_dev_error(dev, err, "completing transaction");
368 goto destroy_tpmring;
369 }
371 out:
372 if (backend)
373 kfree(backend);
374 return err;
376 abort_transaction:
377 xenbus_transaction_end(1);
378 /* Have to do this *outside* transaction. */
379 xenbus_dev_error(dev, err, "%s", message);
380 destroy_tpmring:
381 destroy_tpmring(info, &my_private);
382 goto out;
383 }
386 static int tpmfront_probe(struct xenbus_device *dev,
387 const struct xenbus_device_id *id)
388 {
389 int err;
390 struct tpmfront_info *info;
391 int handle;
393 err = xenbus_scanf(dev->nodename,
394 "handle", "%i", &handle);
395 if (XENBUS_EXIST_ERR(err))
396 return err;
398 if (err < 0) {
399 xenbus_dev_error(dev,err,"reading virtual-device");
400 return err;
401 }
403 info = kmalloc(sizeof(*info), GFP_KERNEL);
404 if (!info) {
405 xenbus_dev_error(dev,err,"allocating info structure");
406 return err;
407 }
408 memset(info, 0x0, sizeof(*info));
410 info->dev = dev;
411 info->handle = handle;
412 dev->data = info;
414 err = talk_to_backend(dev, info);
415 if (err) {
416 kfree(info);
417 dev->data = NULL;
418 return err;
419 }
421 return 0;
422 }
424 static int tpmfront_remove(struct xenbus_device *dev)
425 {
426 struct tpmfront_info *info = dev->data;
427 if (info->backend)
428 unregister_xenbus_watch(&info->watch);
430 destroy_tpmring(info, &my_private);
432 kfree(info->backend);
433 kfree(info);
435 return 0;
436 }
438 static int tpmfront_suspend(struct xenbus_device *dev)
439 {
440 struct tpmfront_info *info = dev->data;
441 struct tpm_private *tp = &my_private;
443 /* lock so no app can send */
444 down(&suspend_lock);
446 while (atomic_read(&tp->tx_busy)) {
447 printk("---- TPMIF: Outstanding request.\n");
448 #if 0
449 /*
450 * Would like to wait until the outstanding request
451 * has come back, but this does not work properly, yet.
452 */
453 interruptible_sleep_on_timeout(&tp->wait_q,
454 100);
455 #else
456 break;
457 #endif
458 }
460 unregister_xenbus_watch(&info->watch);
462 kfree(info->backend);
463 info->backend = NULL;
465 destroy_tpmring(info, tp);
467 return 0;
468 }
470 static int tpmif_recover(void)
471 {
472 return 0;
473 }
475 static int tpmfront_resume(struct xenbus_device *dev)
476 {
477 struct tpmfront_info *info = dev->data;
478 int err;
480 err = talk_to_backend(dev, info);
481 if (!err) {
482 tpmif_recover();
483 }
485 /* unlock so apps can resume */
486 up(&suspend_lock);
488 return err;
489 }
491 static void tpmif_connect(u16 evtchn, domid_t domid)
492 {
493 int err = 0;
494 struct tpm_private *tp = &my_private;
496 tp->evtchn = evtchn;
497 tp->backend_id = domid;
499 err = bind_evtchn_to_irqhandler(
500 tp->evtchn,
501 tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
502 if ( err != 0 ) {
503 WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
504 return;
505 }
506 }
508 static struct xenbus_device_id tpmfront_ids[] = {
509 { "vtpm" },
510 { "" }
511 };
513 static struct xenbus_driver tpmfront = {
514 .name = "vtpm",
515 .owner = THIS_MODULE,
516 .ids = tpmfront_ids,
517 .probe = tpmfront_probe,
518 .remove = tpmfront_remove,
519 .resume = tpmfront_resume,
520 .suspend = tpmfront_suspend,
521 };
523 static void __init init_tpm_xenbus(void)
524 {
525 xenbus_register_device(&tpmfront);
526 }
529 static int
530 tpm_allocate_buffers(struct tpm_private *tp)
531 {
532 unsigned int i;
534 i = 0;
535 while (i < TPMIF_TX_RING_SIZE) {
536 tp->tx_buffers[i] = tx_buffer_alloc();
537 i++;
538 }
540 return 1;
541 }
543 static void
544 tpmif_rx_action(unsigned long unused)
545 {
546 struct tpm_private *tp = &my_private;
548 int i = 0;
549 unsigned int received;
550 unsigned int offset = 0;
551 u8 *buffer;
552 tpmif_tx_request_t *tx;
553 tx = &tp->tx->ring[i].req;
555 received = tx->size;
557 buffer = kmalloc(received, GFP_KERNEL);
558 if (NULL == buffer) {
559 goto exit;
560 }
562 i = 0;
563 while (i < TPMIF_TX_RING_SIZE &&
564 offset < received) {
565 struct tx_buffer *txb = tp->tx_buffers[i];
566 tpmif_tx_request_t *tx;
567 unsigned int tocopy;
569 tx = &tp->tx->ring[i].req;
570 tocopy = tx->size;
571 if (tocopy > PAGE_SIZE) {
572 tocopy = PAGE_SIZE;
573 }
575 memcpy(&buffer[offset], txb->data, tocopy);
577 gnttab_release_grant_reference(&gref_head, tx->ref);
579 offset += tocopy;
580 i++;
581 }
583 tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
584 kfree(buffer);
586 exit:
587 atomic_set(&tp->tx_busy, 0);
588 wake_up_interruptible(&tp->wait_q);
589 }
592 static irqreturn_t
593 tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
594 {
595 struct tpm_private *tp = tpm_priv;
596 unsigned long flags;
598 spin_lock_irqsave(&tp->tx_lock, flags);
599 tasklet_schedule(&tpmif_rx_tasklet);
600 spin_unlock_irqrestore(&tp->tx_lock, flags);
602 return IRQ_HANDLED;
603 }
606 static int
607 tpm_xmit(struct tpm_private *tp,
608 const u8 * buf, size_t count, int isuserbuffer,
609 void *remember)
610 {
611 tpmif_tx_request_t *tx;
612 TPMIF_RING_IDX i;
613 unsigned int offset = 0;
615 spin_lock_irq(&tp->tx_lock);
617 if (unlikely(atomic_read(&tp->tx_busy))) {
618 printk("There's an outstanding request/response on the way!\n");
619 spin_unlock_irq(&tp->tx_lock);
620 return -EBUSY;
621 }
623 if (tp->connected != 1) {
624 spin_unlock_irq(&tp->tx_lock);
625 return -EIO;
626 }
628 i = 0;
629 while (count > 0 && i < TPMIF_TX_RING_SIZE) {
630 struct tx_buffer *txb = tp->tx_buffers[i];
631 int copied;
633 if (NULL == txb) {
634 DPRINTK("txb (i=%d) is NULL. buffers initilized?\n", i);
635 DPRINTK("Not transmittin anything!\n");
636 spin_unlock_irq(&tp->tx_lock);
637 return -EFAULT;
638 }
639 copied = tx_buffer_copy(txb, &buf[offset], count,
640 isuserbuffer);
641 if (copied < 0) {
642 /* An error occurred */
643 return copied;
644 }
645 count -= copied;
646 offset += copied;
648 tx = &tp->tx->ring[i].req;
650 tx->id = i;
651 tx->addr = virt_to_machine(txb->data);
652 tx->size = txb->len;
654 DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
655 txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
657 /* get the granttable reference for this page */
658 tx->ref = gnttab_claim_grant_reference( &gref_head );
660 if(-ENOSPC == tx->ref ) {
661 DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
662 return -ENOSPC;
663 }
664 gnttab_grant_foreign_access_ref( tx->ref,
665 tp->backend_id,
666 (tx->addr >> PAGE_SHIFT),
667 0 /*RW*/);
668 i++;
669 wmb();
670 }
672 atomic_set(&tp->tx_busy, 1);
673 tp->tx_remember = remember;
674 mb();
676 DPRINTK("Notifying backend via event channel %d\n",
677 tp->evtchn);
679 notify_via_evtchn(tp->evtchn);
681 spin_unlock_irq(&tp->tx_lock);
682 return offset;
683 }
686 static void tpmif_notify_upperlayer(struct tpm_private *tp)
687 {
688 /*
689 * Notify upper layer about the state of the connection
690 * to the BE.
691 */
692 down(&upperlayer_lock);
694 if (upperlayer_tpmfe != NULL) {
695 switch (tp->connected) {
696 case 1:
697 upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
698 break;
700 default:
701 upperlayer_tpmfe->status(0);
702 break;
703 }
704 }
705 up(&upperlayer_lock);
706 }
709 static void tpmif_set_connected_state(struct tpm_private *tp, int newstate)
710 {
711 if (newstate != tp->connected) {
712 tp->connected = newstate;
713 tpmif_notify_upperlayer(tp);
714 }
715 }
718 /* =================================================================
719 * Initialization function.
720 * =================================================================
721 */
723 static int __init
724 tpmif_init(void)
725 {
726 IPRINTK("Initialising the vTPM driver.\n");
727 if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
728 &gref_head ) < 0) {
729 return -EFAULT;
730 }
731 /*
732 * Only don't send the driver status when we are in the
733 * INIT domain.
734 */
735 spin_lock_init(&my_private.tx_lock);
736 init_waitqueue_head(&my_private.wait_q);
738 init_tpm_xenbus();
740 return 0;
741 }
743 __initcall(tpmif_init);