ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c @ 6742:ac6605bceb9d

remove pointless NULL check before calling kfree

Signed-off-by: Vincent Hanquez <vincent@xensource.com>
author vh249@arcadians.cl.cam.ac.uk
date Sat Sep 10 14:41:16 2005 +0000 (2005-09-10)
parents 7d0fb56b4a91
children 9ead08216805
line source
1 /*
2 * Copyright (c) 2005, IBM Corporation
3 *
4 * Author: Stefan Berger, stefanb@us.ibm.com
5 * Grant table support: Mahadevan Gomathisankaran
6 *
7 * This code has been derived from drivers/xen/netfront/netfront.c
8 *
9 * Copyright (c) 2002-2004, K A Fraser
10 *
11 * This file may be distributed separately from the Linux kernel, or
12 * incorporated into other software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/version.h>
36 #include <linux/kernel.h>
37 #include <linux/slab.h>
38 #include <linux/errno.h>
39 #include <linux/interrupt.h>
40 #include <linux/init.h>
41 #include <linux/tpmfe.h>
43 #include <asm/semaphore.h>
44 #include <asm/io.h>
45 #include <asm-xen/evtchn.h>
46 #include <asm-xen/xen-public/io/tpmif.h>
47 #include <asm/uaccess.h>
48 #include <asm-xen/xenbus.h>
49 #include <asm-xen/xen-public/io/domain_controller.h>
50 #include <asm-xen/xen-public/grant_table.h>
52 #include "tpmfront.h"
54 #undef DEBUG
56 #if 1
57 #define ASSERT(_p) \
58 if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
59 __LINE__, __FILE__); *(int*)0=0; }
60 #else
61 #define ASSERT(_p)
62 #endif
64 /* locally visible variables */
65 static grant_ref_t gref_head;
66 static struct tpm_private my_private;
68 /* local function prototypes */
69 static irqreturn_t tpmif_int(int irq,
70 void *tpm_priv,
71 struct pt_regs *ptregs);
72 static void tpmif_rx_action(unsigned long unused);
73 static void tpmif_connect(u16 evtchn, domid_t domid);
74 static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
75 static int tpm_allocate_buffers(struct tpm_private *tp);
76 static void tpmif_set_connected_state(struct tpm_private *tp, int newstate);
77 static int tpm_xmit(struct tpm_private *tp,
78 const u8 * buf, size_t count, int userbuffer,
79 void *remember);
81 #if DEBUG
82 #define DPRINTK(fmt, args...) \
83 printk(KERN_ALERT "xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
84 #else
85 #define DPRINTK(fmt, args...) ((void)0)
86 #endif
87 #define IPRINTK(fmt, args...) \
88 printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
89 #define WPRINTK(fmt, args...) \
90 printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
93 static inline int
94 tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
95 int isuserbuffer)
96 {
97 int copied = len;
99 if (len > txb->size) {
100 copied = txb->size;
101 }
102 if (isuserbuffer) {
103 if (copy_from_user(txb->data,
104 src,
105 copied)) {
106 return -EFAULT;
107 }
108 } else {
109 memcpy(txb->data, src, copied);
110 }
111 txb->len = len;
112 return copied;
113 }
115 static inline struct tx_buffer *tx_buffer_alloc(void)
116 {
117 struct tx_buffer *txb = kmalloc(sizeof (struct tx_buffer),
118 GFP_KERNEL);
120 if (txb) {
121 txb->len = 0;
122 txb->size = PAGE_SIZE;
123 txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
124 if (txb->data == NULL) {
125 kfree(txb);
126 txb = NULL;
127 }
128 }
129 return txb;
130 }
133 /**************************************************************
135 The interface to let the tpm plugin register its callback
136 function and send data to another partition using this module
138 **************************************************************/
140 static DECLARE_MUTEX(upperlayer_lock);
141 static DECLARE_MUTEX(suspend_lock);
142 static struct tpmfe_device *upperlayer_tpmfe;
144 /*
145 * Send data via this module by calling this function
146 */
147 int tpm_fe_send(const u8 * buf, size_t count, void *ptr)
148 {
149 int sent = 0;
150 struct tpm_private *tp = &my_private;
152 down(&suspend_lock);
153 sent = tpm_xmit(tp, buf, count, 0, ptr);
154 up(&suspend_lock);
156 return sent;
157 }
158 EXPORT_SYMBOL(tpm_fe_send);
160 /*
161 * Register a callback for receiving data from this module
162 */
163 int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
164 {
165 int rc = 0;
167 down(&upperlayer_lock);
168 if (NULL == upperlayer_tpmfe) {
169 upperlayer_tpmfe = tpmfe_dev;
170 tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
171 } else {
172 rc = -EBUSY;
173 }
174 up(&upperlayer_lock);
175 return rc;
176 }
177 EXPORT_SYMBOL(tpm_fe_register_receiver);
179 /*
180 * Unregister the callback for receiving data from this module
181 */
182 void tpm_fe_unregister_receiver(void)
183 {
184 down(&upperlayer_lock);
185 upperlayer_tpmfe = NULL;
186 up(&upperlayer_lock);
187 }
188 EXPORT_SYMBOL(tpm_fe_unregister_receiver);
190 /*
191 * Call this function to send data to the upper layer's
192 * registered receiver function.
193 */
194 static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
195 const void *ptr)
196 {
197 int rc;
199 down(&upperlayer_lock);
201 if (upperlayer_tpmfe && upperlayer_tpmfe->receive) {
202 rc = upperlayer_tpmfe->receive(buf, count, ptr);
203 } else {
204 rc = 0;
205 }
207 up(&upperlayer_lock);
208 return rc;
209 }
211 /**************************************************************
212 XENBUS support code
213 **************************************************************/
215 static void watch_for_status(struct xenbus_watch *watch, const char *node)
216 {
217 struct tpmfront_info *info;
218 int err;
219 unsigned long ready;
220 struct tpm_private *tp = &my_private;
222 info = container_of(watch, struct tpmfront_info, watch);
223 node += strlen(watch->node);
225 if (tp->connected)
226 return;
228 err = xenbus_gather(watch->node,
229 "ready", "%lu", &ready,
230 NULL);
231 if (err) {
232 xenbus_dev_error(info->dev, err, "reading 'ready' field");
233 return;
234 }
236 tpmif_set_connected_state(tp, 1);
238 xenbus_dev_ok(info->dev);
239 }
242 static int setup_tpmring(struct xenbus_device *dev,
243 struct tpmfront_info * info,
244 domid_t backend_id)
245 {
246 tpmif_tx_interface_t *sring;
247 struct tpm_private *tp = &my_private;
249 evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
250 int err;
252 sring = (void *)__get_free_page(GFP_KERNEL);
253 if (!sring) {
254 xenbus_dev_error(dev, -ENOMEM, "allocating shared ring");
255 return -ENOMEM;
256 }
257 tp->tx = sring;
259 tpm_allocate_buffers(tp);
261 info->ring_ref = gnttab_claim_grant_reference(&gref_head);
262 ASSERT(info->ring_ref != -ENOSPC);
263 gnttab_grant_foreign_access_ref(info->ring_ref,
264 backend_id,
265 (virt_to_machine(tp->tx) >> PAGE_SHIFT),
266 0);
268 op.u.alloc_unbound.dom = backend_id;
269 err = HYPERVISOR_event_channel_op(&op);
270 if (err) {
271 free_page((unsigned long)sring);
272 tp->tx = 0;
273 xenbus_dev_error(dev, err, "allocating event channel");
274 return err;
275 }
276 tpmif_connect(op.u.alloc_unbound.port, backend_id);
277 return 0;
278 }
281 static void destroy_tpmring(struct tpmfront_info *info, struct tpm_private *tp)
282 {
283 tpmif_set_connected_state(tp,0);
285 if ( tp->tx != NULL ) {
286 free_page((unsigned long)tp->tx);
287 tp->tx = NULL;
288 }
289 unbind_evtchn_from_irqhandler(tp->evtchn, NULL);
290 tp->evtchn = 0;
291 }
294 static int talk_to_backend(struct xenbus_device *dev,
295 struct tpmfront_info *info)
296 {
297 char *backend;
298 const char *message;
299 int err;
300 int backend_id;
302 backend = NULL;
303 err = xenbus_gather(dev->nodename,
304 "backend-id", "%i", &backend_id,
305 "backend", NULL, &backend,
306 NULL);
307 if (XENBUS_EXIST_ERR(err))
308 goto out;
309 if (backend && strlen(backend) == 0) {
310 err = -ENOENT;
311 goto out;
312 }
313 if (err < 0) {
314 xenbus_dev_error(dev, err, "reading %s/backend or backend-id",
315 dev->nodename);
316 goto out;
317 }
319 info->backend_id = backend_id;
320 my_private.backend_id = backend_id;
322 err = setup_tpmring(dev, info, backend_id);
323 if (err) {
324 xenbus_dev_error(dev, err, "setting up ring");
325 goto out;
326 }
328 err = xenbus_transaction_start(dev->nodename);
329 if (err) {
330 xenbus_dev_error(dev, err, "starting transaction");
331 goto destroy_tpmring;
332 }
334 err = xenbus_printf(dev->nodename,
335 "ring-ref","%u", info->ring_ref);
336 if (err) {
337 message = "writing ring-ref";
338 goto abort_transaction;
339 }
341 err = xenbus_printf(dev->nodename,
342 "event-channel", "%u", my_private.evtchn);
343 if (err) {
344 message = "writing event-channel";
345 goto abort_transaction;
346 }
348 info->backend = backend;
349 backend = NULL;
351 info->watch.node = info->backend;
352 info->watch.callback = watch_for_status;
353 err = register_xenbus_watch(&info->watch);
354 if (err) {
355 message = "registering watch on backend";
356 goto abort_transaction;
357 }
359 err = xenbus_transaction_end(0);
360 if (err) {
361 xenbus_dev_error(dev, err, "completing transaction");
362 goto destroy_tpmring;
363 }
365 out:
366 kfree(backend);
367 return err;
369 abort_transaction:
370 xenbus_transaction_end(1);
371 /* Have to do this *outside* transaction. */
372 xenbus_dev_error(dev, err, "%s", message);
373 destroy_tpmring:
374 destroy_tpmring(info, &my_private);
375 goto out;
376 }
379 static int tpmfront_probe(struct xenbus_device *dev,
380 const struct xenbus_device_id *id)
381 {
382 int err;
383 struct tpmfront_info *info;
384 int handle;
386 err = xenbus_scanf(dev->nodename,
387 "handle", "%i", &handle);
388 if (XENBUS_EXIST_ERR(err))
389 return err;
391 if (err < 0) {
392 xenbus_dev_error(dev,err,"reading virtual-device");
393 return err;
394 }
396 info = kmalloc(sizeof(*info), GFP_KERNEL);
397 if (!info) {
398 xenbus_dev_error(dev,err,"allocating info structure");
399 return err;
400 }
401 memset(info, 0x0, sizeof(*info));
403 info->dev = dev;
404 info->handle = handle;
405 dev->data = info;
407 err = talk_to_backend(dev, info);
408 if (err) {
409 kfree(info);
410 dev->data = NULL;
411 return err;
412 }
414 watch_for_status(&info->watch, info->watch.node);
415 return 0;
416 }
418 static int tpmfront_remove(struct xenbus_device *dev)
419 {
420 struct tpmfront_info *info = dev->data;
421 if (info->backend)
422 unregister_xenbus_watch(&info->watch);
424 destroy_tpmring(info, &my_private);
426 kfree(info->backend);
427 kfree(info);
429 return 0;
430 }
432 static int tpmfront_suspend(struct xenbus_device *dev)
433 {
434 struct tpmfront_info *info = dev->data;
435 struct tpm_private *tp = &my_private;
437 /* lock so no app can send */
438 down(&suspend_lock);
440 while (atomic_read(&tp->tx_busy)) {
441 printk("---- TPMIF: Outstanding request.\n");
442 #if 0
443 /*
444 * Would like to wait until the outstanding request
445 * has come back, but this does not work properly, yet.
446 */
447 interruptible_sleep_on_timeout(&tp->wait_q,
448 100);
449 #else
450 break;
451 #endif
452 }
454 unregister_xenbus_watch(&info->watch);
456 kfree(info->backend);
457 info->backend = NULL;
459 destroy_tpmring(info, tp);
461 return 0;
462 }
464 static int tpmif_recover(void)
465 {
466 return 0;
467 }
469 static int tpmfront_resume(struct xenbus_device *dev)
470 {
471 struct tpmfront_info *info = dev->data;
472 int err;
474 err = talk_to_backend(dev, info);
475 if (!err) {
476 tpmif_recover();
477 }
479 /* unlock so apps can resume */
480 up(&suspend_lock);
482 return err;
483 }
485 static void tpmif_connect(u16 evtchn, domid_t domid)
486 {
487 int err = 0;
488 struct tpm_private *tp = &my_private;
490 tp->evtchn = evtchn;
491 tp->backend_id = domid;
493 err = bind_evtchn_to_irqhandler(
494 tp->evtchn,
495 tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
496 if ( err != 0 ) {
497 WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
498 return;
499 }
500 }
502 static struct xenbus_device_id tpmfront_ids[] = {
503 { "vtpm" },
504 { "" }
505 };
507 static struct xenbus_driver tpmfront = {
508 .name = "vtpm",
509 .owner = THIS_MODULE,
510 .ids = tpmfront_ids,
511 .probe = tpmfront_probe,
512 .remove = tpmfront_remove,
513 .resume = tpmfront_resume,
514 .suspend = tpmfront_suspend,
515 };
517 static void __init init_tpm_xenbus(void)
518 {
519 xenbus_register_device(&tpmfront);
520 }
523 static int
524 tpm_allocate_buffers(struct tpm_private *tp)
525 {
526 unsigned int i;
528 i = 0;
529 while (i < TPMIF_TX_RING_SIZE) {
530 tp->tx_buffers[i] = tx_buffer_alloc();
531 i++;
532 }
534 return 1;
535 }
537 static void
538 tpmif_rx_action(unsigned long unused)
539 {
540 struct tpm_private *tp = &my_private;
542 int i = 0;
543 unsigned int received;
544 unsigned int offset = 0;
545 u8 *buffer;
546 tpmif_tx_request_t *tx;
547 tx = &tp->tx->ring[i].req;
549 received = tx->size;
551 buffer = kmalloc(received, GFP_KERNEL);
552 if (NULL == buffer) {
553 goto exit;
554 }
556 i = 0;
557 while (i < TPMIF_TX_RING_SIZE &&
558 offset < received) {
559 struct tx_buffer *txb = tp->tx_buffers[i];
560 tpmif_tx_request_t *tx;
561 unsigned int tocopy;
563 tx = &tp->tx->ring[i].req;
564 tocopy = tx->size;
565 if (tocopy > PAGE_SIZE) {
566 tocopy = PAGE_SIZE;
567 }
569 memcpy(&buffer[offset], txb->data, tocopy);
571 gnttab_release_grant_reference(&gref_head, tx->ref);
573 offset += tocopy;
574 i++;
575 }
577 tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
578 kfree(buffer);
580 exit:
581 atomic_set(&tp->tx_busy, 0);
582 wake_up_interruptible(&tp->wait_q);
583 }
586 static irqreturn_t
587 tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
588 {
589 struct tpm_private *tp = tpm_priv;
590 unsigned long flags;
592 spin_lock_irqsave(&tp->tx_lock, flags);
593 tasklet_schedule(&tpmif_rx_tasklet);
594 spin_unlock_irqrestore(&tp->tx_lock, flags);
596 return IRQ_HANDLED;
597 }
600 static int
601 tpm_xmit(struct tpm_private *tp,
602 const u8 * buf, size_t count, int isuserbuffer,
603 void *remember)
604 {
605 tpmif_tx_request_t *tx;
606 TPMIF_RING_IDX i;
607 unsigned int offset = 0;
609 spin_lock_irq(&tp->tx_lock);
611 if (unlikely(atomic_read(&tp->tx_busy))) {
612 printk("There's an outstanding request/response on the way!\n");
613 spin_unlock_irq(&tp->tx_lock);
614 return -EBUSY;
615 }
617 if (tp->connected != 1) {
618 spin_unlock_irq(&tp->tx_lock);
619 return -EIO;
620 }
622 i = 0;
623 while (count > 0 && i < TPMIF_TX_RING_SIZE) {
624 struct tx_buffer *txb = tp->tx_buffers[i];
625 int copied;
627 if (NULL == txb) {
628 DPRINTK("txb (i=%d) is NULL. buffers initilized?\n", i);
629 DPRINTK("Not transmittin anything!\n");
630 spin_unlock_irq(&tp->tx_lock);
631 return -EFAULT;
632 }
633 copied = tx_buffer_copy(txb, &buf[offset], count,
634 isuserbuffer);
635 if (copied < 0) {
636 /* An error occurred */
637 return copied;
638 }
639 count -= copied;
640 offset += copied;
642 tx = &tp->tx->ring[i].req;
644 tx->id = i;
645 tx->addr = virt_to_machine(txb->data);
646 tx->size = txb->len;
648 DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
649 txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
651 /* get the granttable reference for this page */
652 tx->ref = gnttab_claim_grant_reference( &gref_head );
654 if(-ENOSPC == tx->ref ) {
655 DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
656 return -ENOSPC;
657 }
658 gnttab_grant_foreign_access_ref( tx->ref,
659 tp->backend_id,
660 (tx->addr >> PAGE_SHIFT),
661 0 /*RW*/);
662 i++;
663 wmb();
664 }
666 atomic_set(&tp->tx_busy, 1);
667 tp->tx_remember = remember;
668 mb();
670 DPRINTK("Notifying backend via event channel %d\n",
671 tp->evtchn);
673 notify_via_evtchn(tp->evtchn);
675 spin_unlock_irq(&tp->tx_lock);
676 return offset;
677 }
680 static void tpmif_notify_upperlayer(struct tpm_private *tp)
681 {
682 /*
683 * Notify upper layer about the state of the connection
684 * to the BE.
685 */
686 down(&upperlayer_lock);
688 if (upperlayer_tpmfe != NULL) {
689 switch (tp->connected) {
690 case 1:
691 upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
692 break;
694 default:
695 upperlayer_tpmfe->status(0);
696 break;
697 }
698 }
699 up(&upperlayer_lock);
700 }
703 static void tpmif_set_connected_state(struct tpm_private *tp, int newstate)
704 {
705 if (newstate != tp->connected) {
706 tp->connected = newstate;
707 tpmif_notify_upperlayer(tp);
708 }
709 }
712 /* =================================================================
713 * Initialization function.
714 * =================================================================
715 */
717 static int __init
718 tpmif_init(void)
719 {
720 IPRINTK("Initialising the vTPM driver.\n");
721 if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
722 &gref_head ) < 0) {
723 return -EFAULT;
724 }
725 /*
726 * Only don't send the driver status when we are in the
727 * INIT domain.
728 */
729 spin_lock_init(&my_private.tx_lock);
730 init_waitqueue_head(&my_private.wait_q);
732 init_tpm_xenbus();
734 return 0;
735 }
737 __initcall(tpmif_init);