ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c @ 6689:7d0fb56b4a91

merge?
author cl349@firebug.cl.cam.ac.uk
date Wed Sep 07 19:01:31 2005 +0000 (2005-09-07)
parents 549f4256ab3c dd668f7527cb
children b2f4823b6ff0 ac6605bceb9d 4d899a738d59 e7c7196fa329
line source
1 /*
2 * Copyright (c) 2005, IBM Corporation
3 *
4 * Author: Stefan Berger, stefanb@us.ibm.com
5 * Grant table support: Mahadevan Gomathisankaran
6 *
7 * This code has been derived from drivers/xen/netfront/netfront.c
8 *
9 * Copyright (c) 2002-2004, K A Fraser
10 *
11 * This file may be distributed separately from the Linux kernel, or
12 * incorporated into other software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/version.h>
36 #include <linux/kernel.h>
37 #include <linux/slab.h>
38 #include <linux/errno.h>
39 #include <linux/interrupt.h>
40 #include <linux/init.h>
41 #include <linux/tpmfe.h>
43 #include <asm/semaphore.h>
44 #include <asm/io.h>
45 #include <asm-xen/evtchn.h>
46 #include <asm-xen/xen-public/io/tpmif.h>
47 #include <asm/uaccess.h>
48 #include <asm-xen/xenbus.h>
49 #include <asm-xen/xen-public/io/domain_controller.h>
50 #include <asm-xen/xen-public/grant_table.h>
52 #include "tpmfront.h"
54 #undef DEBUG
56 #if 1
57 #define ASSERT(_p) \
58 if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
59 __LINE__, __FILE__); *(int*)0=0; }
60 #else
61 #define ASSERT(_p)
62 #endif
64 /* locally visible variables */
65 static grant_ref_t gref_head;
66 static struct tpm_private my_private;
68 /* local function prototypes */
69 static irqreturn_t tpmif_int(int irq,
70 void *tpm_priv,
71 struct pt_regs *ptregs);
72 static void tpmif_rx_action(unsigned long unused);
73 static void tpmif_connect(u16 evtchn, domid_t domid);
74 static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
75 static int tpm_allocate_buffers(struct tpm_private *tp);
76 static void tpmif_set_connected_state(struct tpm_private *tp, int newstate);
77 static int tpm_xmit(struct tpm_private *tp,
78 const u8 * buf, size_t count, int userbuffer,
79 void *remember);
81 #if DEBUG
82 #define DPRINTK(fmt, args...) \
83 printk(KERN_ALERT "xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
84 #else
85 #define DPRINTK(fmt, args...) ((void)0)
86 #endif
87 #define IPRINTK(fmt, args...) \
88 printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
89 #define WPRINTK(fmt, args...) \
90 printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
93 static inline int
94 tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
95 int isuserbuffer)
96 {
97 int copied = len;
99 if (len > txb->size) {
100 copied = txb->size;
101 }
102 if (isuserbuffer) {
103 if (copy_from_user(txb->data,
104 src,
105 copied)) {
106 return -EFAULT;
107 }
108 } else {
109 memcpy(txb->data, src, copied);
110 }
111 txb->len = len;
112 return copied;
113 }
115 static inline struct tx_buffer *tx_buffer_alloc(void)
116 {
117 struct tx_buffer *txb = kmalloc(sizeof (struct tx_buffer),
118 GFP_KERNEL);
120 if (txb) {
121 txb->len = 0;
122 txb->size = PAGE_SIZE;
123 txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
124 if (txb->data == NULL) {
125 kfree(txb);
126 txb = NULL;
127 }
128 }
129 return txb;
130 }
133 /**************************************************************
135 The interface to let the tpm plugin register its callback
136 function and send data to another partition using this module
138 **************************************************************/
140 static DECLARE_MUTEX(upperlayer_lock);
141 static DECLARE_MUTEX(suspend_lock);
142 static struct tpmfe_device *upperlayer_tpmfe;
144 /*
145 * Send data via this module by calling this function
146 */
147 int tpm_fe_send(const u8 * buf, size_t count, void *ptr)
148 {
149 int sent = 0;
150 struct tpm_private *tp = &my_private;
152 down(&suspend_lock);
153 sent = tpm_xmit(tp, buf, count, 0, ptr);
154 up(&suspend_lock);
156 return sent;
157 }
158 EXPORT_SYMBOL(tpm_fe_send);
160 /*
161 * Register a callback for receiving data from this module
162 */
163 int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
164 {
165 int rc = 0;
167 down(&upperlayer_lock);
168 if (NULL == upperlayer_tpmfe) {
169 upperlayer_tpmfe = tpmfe_dev;
170 tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
171 } else {
172 rc = -EBUSY;
173 }
174 up(&upperlayer_lock);
175 return rc;
176 }
177 EXPORT_SYMBOL(tpm_fe_register_receiver);
179 /*
180 * Unregister the callback for receiving data from this module
181 */
182 void tpm_fe_unregister_receiver(void)
183 {
184 down(&upperlayer_lock);
185 upperlayer_tpmfe = NULL;
186 up(&upperlayer_lock);
187 }
188 EXPORT_SYMBOL(tpm_fe_unregister_receiver);
190 /*
191 * Call this function to send data to the upper layer's
192 * registered receiver function.
193 */
194 static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
195 const void *ptr)
196 {
197 int rc;
199 down(&upperlayer_lock);
201 if (upperlayer_tpmfe && upperlayer_tpmfe->receive) {
202 rc = upperlayer_tpmfe->receive(buf, count, ptr);
203 } else {
204 rc = 0;
205 }
207 up(&upperlayer_lock);
208 return rc;
209 }
211 /**************************************************************
212 XENBUS support code
213 **************************************************************/
215 static void watch_for_status(struct xenbus_watch *watch, const char *node)
216 {
217 struct tpmfront_info *info;
218 int err;
219 unsigned long ready;
220 struct tpm_private *tp = &my_private;
222 info = container_of(watch, struct tpmfront_info, watch);
223 node += strlen(watch->node);
225 if (tp->connected)
226 return;
228 err = xenbus_gather(watch->node,
229 "ready", "%lu", &ready,
230 NULL);
231 if (err) {
232 xenbus_dev_error(info->dev, err, "reading 'ready' field");
233 return;
234 }
236 tpmif_set_connected_state(tp, 1);
238 xenbus_dev_ok(info->dev);
239 }
242 static int setup_tpmring(struct xenbus_device *dev,
243 struct tpmfront_info * info,
244 domid_t backend_id)
245 {
246 tpmif_tx_interface_t *sring;
247 struct tpm_private *tp = &my_private;
249 evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
250 int err;
252 sring = (void *)__get_free_page(GFP_KERNEL);
253 if (!sring) {
254 xenbus_dev_error(dev, -ENOMEM, "allocating shared ring");
255 return -ENOMEM;
256 }
257 tp->tx = sring;
259 tpm_allocate_buffers(tp);
261 info->ring_ref = gnttab_claim_grant_reference(&gref_head);
262 ASSERT(info->ring_ref != -ENOSPC);
263 gnttab_grant_foreign_access_ref(info->ring_ref,
264 backend_id,
265 (virt_to_machine(tp->tx) >> PAGE_SHIFT),
266 0);
268 op.u.alloc_unbound.dom = backend_id;
269 err = HYPERVISOR_event_channel_op(&op);
270 if (err) {
271 free_page((unsigned long)sring);
272 tp->tx = 0;
273 xenbus_dev_error(dev, err, "allocating event channel");
274 return err;
275 }
276 tpmif_connect(op.u.alloc_unbound.port, backend_id);
277 return 0;
278 }
281 static void destroy_tpmring(struct tpmfront_info *info, struct tpm_private *tp)
282 {
283 tpmif_set_connected_state(tp,0);
285 if ( tp->tx != NULL ) {
286 free_page((unsigned long)tp->tx);
287 tp->tx = NULL;
288 }
289 unbind_evtchn_from_irqhandler(tp->evtchn, NULL);
290 tp->evtchn = 0;
291 }
294 static int talk_to_backend(struct xenbus_device *dev,
295 struct tpmfront_info *info)
296 {
297 char *backend;
298 const char *message;
299 int err;
300 int backend_id;
302 backend = NULL;
303 err = xenbus_gather(dev->nodename,
304 "backend-id", "%i", &backend_id,
305 "backend", NULL, &backend,
306 NULL);
307 if (XENBUS_EXIST_ERR(err))
308 goto out;
309 if (backend && strlen(backend) == 0) {
310 err = -ENOENT;
311 goto out;
312 }
313 if (err < 0) {
314 xenbus_dev_error(dev, err, "reading %s/backend or backend-id",
315 dev->nodename);
316 goto out;
317 }
319 info->backend_id = backend_id;
320 my_private.backend_id = backend_id;
322 err = setup_tpmring(dev, info, backend_id);
323 if (err) {
324 xenbus_dev_error(dev, err, "setting up ring");
325 goto out;
326 }
328 err = xenbus_transaction_start(dev->nodename);
329 if (err) {
330 xenbus_dev_error(dev, err, "starting transaction");
331 goto destroy_tpmring;
332 }
334 err = xenbus_printf(dev->nodename,
335 "ring-ref","%u", info->ring_ref);
336 if (err) {
337 message = "writing ring-ref";
338 goto abort_transaction;
339 }
341 err = xenbus_printf(dev->nodename,
342 "event-channel", "%u", my_private.evtchn);
343 if (err) {
344 message = "writing event-channel";
345 goto abort_transaction;
346 }
348 info->backend = backend;
349 backend = NULL;
351 info->watch.node = info->backend;
352 info->watch.callback = watch_for_status;
353 err = register_xenbus_watch(&info->watch);
354 if (err) {
355 message = "registering watch on backend";
356 goto abort_transaction;
357 }
359 err = xenbus_transaction_end(0);
360 if (err) {
361 xenbus_dev_error(dev, err, "completing transaction");
362 goto destroy_tpmring;
363 }
365 out:
366 if (backend)
367 kfree(backend);
368 return err;
370 abort_transaction:
371 xenbus_transaction_end(1);
372 /* Have to do this *outside* transaction. */
373 xenbus_dev_error(dev, err, "%s", message);
374 destroy_tpmring:
375 destroy_tpmring(info, &my_private);
376 goto out;
377 }
380 static int tpmfront_probe(struct xenbus_device *dev,
381 const struct xenbus_device_id *id)
382 {
383 int err;
384 struct tpmfront_info *info;
385 int handle;
387 err = xenbus_scanf(dev->nodename,
388 "handle", "%i", &handle);
389 if (XENBUS_EXIST_ERR(err))
390 return err;
392 if (err < 0) {
393 xenbus_dev_error(dev,err,"reading virtual-device");
394 return err;
395 }
397 info = kmalloc(sizeof(*info), GFP_KERNEL);
398 if (!info) {
399 xenbus_dev_error(dev,err,"allocating info structure");
400 return err;
401 }
402 memset(info, 0x0, sizeof(*info));
404 info->dev = dev;
405 info->handle = handle;
406 dev->data = info;
408 err = talk_to_backend(dev, info);
409 if (err) {
410 kfree(info);
411 dev->data = NULL;
412 return err;
413 }
415 watch_for_status(&info->watch, info->watch.node);
416 return 0;
417 }
419 static int tpmfront_remove(struct xenbus_device *dev)
420 {
421 struct tpmfront_info *info = dev->data;
422 if (info->backend)
423 unregister_xenbus_watch(&info->watch);
425 destroy_tpmring(info, &my_private);
427 kfree(info->backend);
428 kfree(info);
430 return 0;
431 }
433 static int tpmfront_suspend(struct xenbus_device *dev)
434 {
435 struct tpmfront_info *info = dev->data;
436 struct tpm_private *tp = &my_private;
438 /* lock so no app can send */
439 down(&suspend_lock);
441 while (atomic_read(&tp->tx_busy)) {
442 printk("---- TPMIF: Outstanding request.\n");
443 #if 0
444 /*
445 * Would like to wait until the outstanding request
446 * has come back, but this does not work properly, yet.
447 */
448 interruptible_sleep_on_timeout(&tp->wait_q,
449 100);
450 #else
451 break;
452 #endif
453 }
455 unregister_xenbus_watch(&info->watch);
457 kfree(info->backend);
458 info->backend = NULL;
460 destroy_tpmring(info, tp);
462 return 0;
463 }
465 static int tpmif_recover(void)
466 {
467 return 0;
468 }
470 static int tpmfront_resume(struct xenbus_device *dev)
471 {
472 struct tpmfront_info *info = dev->data;
473 int err;
475 err = talk_to_backend(dev, info);
476 if (!err) {
477 tpmif_recover();
478 }
480 /* unlock so apps can resume */
481 up(&suspend_lock);
483 return err;
484 }
486 static void tpmif_connect(u16 evtchn, domid_t domid)
487 {
488 int err = 0;
489 struct tpm_private *tp = &my_private;
491 tp->evtchn = evtchn;
492 tp->backend_id = domid;
494 err = bind_evtchn_to_irqhandler(
495 tp->evtchn,
496 tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
497 if ( err != 0 ) {
498 WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
499 return;
500 }
501 }
503 static struct xenbus_device_id tpmfront_ids[] = {
504 { "vtpm" },
505 { "" }
506 };
508 static struct xenbus_driver tpmfront = {
509 .name = "vtpm",
510 .owner = THIS_MODULE,
511 .ids = tpmfront_ids,
512 .probe = tpmfront_probe,
513 .remove = tpmfront_remove,
514 .resume = tpmfront_resume,
515 .suspend = tpmfront_suspend,
516 };
518 static void __init init_tpm_xenbus(void)
519 {
520 xenbus_register_device(&tpmfront);
521 }
524 static int
525 tpm_allocate_buffers(struct tpm_private *tp)
526 {
527 unsigned int i;
529 i = 0;
530 while (i < TPMIF_TX_RING_SIZE) {
531 tp->tx_buffers[i] = tx_buffer_alloc();
532 i++;
533 }
535 return 1;
536 }
538 static void
539 tpmif_rx_action(unsigned long unused)
540 {
541 struct tpm_private *tp = &my_private;
543 int i = 0;
544 unsigned int received;
545 unsigned int offset = 0;
546 u8 *buffer;
547 tpmif_tx_request_t *tx;
548 tx = &tp->tx->ring[i].req;
550 received = tx->size;
552 buffer = kmalloc(received, GFP_KERNEL);
553 if (NULL == buffer) {
554 goto exit;
555 }
557 i = 0;
558 while (i < TPMIF_TX_RING_SIZE &&
559 offset < received) {
560 struct tx_buffer *txb = tp->tx_buffers[i];
561 tpmif_tx_request_t *tx;
562 unsigned int tocopy;
564 tx = &tp->tx->ring[i].req;
565 tocopy = tx->size;
566 if (tocopy > PAGE_SIZE) {
567 tocopy = PAGE_SIZE;
568 }
570 memcpy(&buffer[offset], txb->data, tocopy);
572 gnttab_release_grant_reference(&gref_head, tx->ref);
574 offset += tocopy;
575 i++;
576 }
578 tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
579 kfree(buffer);
581 exit:
582 atomic_set(&tp->tx_busy, 0);
583 wake_up_interruptible(&tp->wait_q);
584 }
587 static irqreturn_t
588 tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
589 {
590 struct tpm_private *tp = tpm_priv;
591 unsigned long flags;
593 spin_lock_irqsave(&tp->tx_lock, flags);
594 tasklet_schedule(&tpmif_rx_tasklet);
595 spin_unlock_irqrestore(&tp->tx_lock, flags);
597 return IRQ_HANDLED;
598 }
601 static int
602 tpm_xmit(struct tpm_private *tp,
603 const u8 * buf, size_t count, int isuserbuffer,
604 void *remember)
605 {
606 tpmif_tx_request_t *tx;
607 TPMIF_RING_IDX i;
608 unsigned int offset = 0;
610 spin_lock_irq(&tp->tx_lock);
612 if (unlikely(atomic_read(&tp->tx_busy))) {
613 printk("There's an outstanding request/response on the way!\n");
614 spin_unlock_irq(&tp->tx_lock);
615 return -EBUSY;
616 }
618 if (tp->connected != 1) {
619 spin_unlock_irq(&tp->tx_lock);
620 return -EIO;
621 }
623 i = 0;
624 while (count > 0 && i < TPMIF_TX_RING_SIZE) {
625 struct tx_buffer *txb = tp->tx_buffers[i];
626 int copied;
628 if (NULL == txb) {
629 DPRINTK("txb (i=%d) is NULL. buffers initilized?\n", i);
630 DPRINTK("Not transmittin anything!\n");
631 spin_unlock_irq(&tp->tx_lock);
632 return -EFAULT;
633 }
634 copied = tx_buffer_copy(txb, &buf[offset], count,
635 isuserbuffer);
636 if (copied < 0) {
637 /* An error occurred */
638 return copied;
639 }
640 count -= copied;
641 offset += copied;
643 tx = &tp->tx->ring[i].req;
645 tx->id = i;
646 tx->addr = virt_to_machine(txb->data);
647 tx->size = txb->len;
649 DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
650 txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
652 /* get the granttable reference for this page */
653 tx->ref = gnttab_claim_grant_reference( &gref_head );
655 if(-ENOSPC == tx->ref ) {
656 DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
657 return -ENOSPC;
658 }
659 gnttab_grant_foreign_access_ref( tx->ref,
660 tp->backend_id,
661 (tx->addr >> PAGE_SHIFT),
662 0 /*RW*/);
663 i++;
664 wmb();
665 }
667 atomic_set(&tp->tx_busy, 1);
668 tp->tx_remember = remember;
669 mb();
671 DPRINTK("Notifying backend via event channel %d\n",
672 tp->evtchn);
674 notify_via_evtchn(tp->evtchn);
676 spin_unlock_irq(&tp->tx_lock);
677 return offset;
678 }
681 static void tpmif_notify_upperlayer(struct tpm_private *tp)
682 {
683 /*
684 * Notify upper layer about the state of the connection
685 * to the BE.
686 */
687 down(&upperlayer_lock);
689 if (upperlayer_tpmfe != NULL) {
690 switch (tp->connected) {
691 case 1:
692 upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
693 break;
695 default:
696 upperlayer_tpmfe->status(0);
697 break;
698 }
699 }
700 up(&upperlayer_lock);
701 }
704 static void tpmif_set_connected_state(struct tpm_private *tp, int newstate)
705 {
706 if (newstate != tp->connected) {
707 tp->connected = newstate;
708 tpmif_notify_upperlayer(tp);
709 }
710 }
713 /* =================================================================
714 * Initialization function.
715 * =================================================================
716 */
718 static int __init
719 tpmif_init(void)
720 {
721 IPRINTK("Initialising the vTPM driver.\n");
722 if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
723 &gref_head ) < 0) {
724 return -EFAULT;
725 }
726 /*
727 * Only don't send the driver status when we are in the
728 * INIT domain.
729 */
730 spin_lock_init(&my_private.tx_lock);
731 init_waitqueue_head(&my_private.wait_q);
733 init_tpm_xenbus();
735 return 0;
736 }
738 __initcall(tpmif_init);