ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c @ 10754:612671bd9a54

[TPM] Discard a future response packet after a timeout has occurred instead of
removing the request right away.
Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jul 14 11:48:48 2006 +0100 (2006-07-14)
parents 23136423a765
children affe80cdb485
line source
1 /******************************************************************************
2 * drivers/xen/tpmback/tpmback.c
3 *
4 * Copyright (c) 2005, IBM Corporation
5 *
6 * Author: Stefan Berger, stefanb@us.ibm.com
7 * Grant table support: Mahadevan Gomathisankaran
8 *
9 * This code has been derived from drivers/xen/netback/netback.c
10 * Copyright (c) 2002-2004, K A Fraser
11 *
12 */
14 #include "common.h"
15 #include <xen/evtchn.h>
17 #include <linux/types.h>
18 #include <linux/list.h>
19 #include <linux/miscdevice.h>
20 #include <linux/poll.h>
21 #include <asm/uaccess.h>
22 #include <xen/xenbus.h>
23 #include <xen/interface/grant_table.h>
24 #include <xen/gnttab.h>
26 /* local data structures */
27 struct data_exchange {
28 struct list_head pending_pak;
29 struct list_head current_pak;
30 unsigned int copied_so_far;
31 u8 has_opener:1;
32 u8 aborted:1;
33 rwlock_t pak_lock; // protects all of the previous fields
34 wait_queue_head_t wait_queue;
35 };
37 struct vtpm_resp_hdr {
38 uint32_t instance_no;
39 uint16_t tag_no;
40 uint32_t len_no;
41 uint32_t ordinal_no;
42 } __attribute__ ((packed));
44 struct packet {
45 struct list_head next;
46 unsigned int data_len;
47 u8 *data_buffer;
48 tpmif_t *tpmif;
49 u32 tpm_instance;
50 u8 req_tag;
51 u32 last_read;
52 u8 flags;
53 struct timer_list processing_timer;
54 };
56 enum {
57 PACKET_FLAG_DISCARD_RESPONSE = 1,
58 PACKET_FLAG_CHECK_RESPONSESTATUS = 2,
59 };
61 /* local variables */
62 static struct data_exchange dataex;
64 /* local function prototypes */
65 static int _packet_write(struct packet *pak,
66 const char *data, size_t size, int userbuffer);
67 static void processing_timeout(unsigned long ptr);
68 static int packet_read_shmem(struct packet *pak,
69 tpmif_t * tpmif,
70 u32 offset,
71 char *buffer, int isuserbuffer, u32 left);
72 static int vtpm_queue_packet(struct packet *pak);
74 /***************************************************************
75 Buffer copying fo user and kernel space buffes.
76 ***************************************************************/
77 static inline int copy_from_buffer(void *to,
78 const void *from, unsigned long size,
79 int isuserbuffer)
80 {
81 if (isuserbuffer) {
82 if (copy_from_user(to, (void __user *)from, size))
83 return -EFAULT;
84 } else {
85 memcpy(to, from, size);
86 }
87 return 0;
88 }
90 static inline int copy_to_buffer(void *to,
91 const void *from, unsigned long size,
92 int isuserbuffer)
93 {
94 if (isuserbuffer) {
95 if (copy_to_user((void __user *)to, from, size))
96 return -EFAULT;
97 } else {
98 memcpy(to, from, size);
99 }
100 return 0;
101 }
104 static void dataex_init(struct data_exchange *dataex)
105 {
106 INIT_LIST_HEAD(&dataex->pending_pak);
107 INIT_LIST_HEAD(&dataex->current_pak);
108 dataex->has_opener = 0;
109 rwlock_init(&dataex->pak_lock);
110 init_waitqueue_head(&dataex->wait_queue);
111 }
113 /***************************************************************
114 Packet-related functions
115 ***************************************************************/
117 static struct packet *packet_find_instance(struct list_head *head,
118 u32 tpm_instance)
119 {
120 struct packet *pak;
121 struct list_head *p;
123 /*
124 * traverse the list of packets and return the first
125 * one with the given instance number
126 */
127 list_for_each(p, head) {
128 pak = list_entry(p, struct packet, next);
130 if (pak->tpm_instance == tpm_instance) {
131 return pak;
132 }
133 }
134 return NULL;
135 }
137 static struct packet *packet_find_packet(struct list_head *head, void *packet)
138 {
139 struct packet *pak;
140 struct list_head *p;
142 /*
143 * traverse the list of packets and return the first
144 * one with the given instance number
145 */
146 list_for_each(p, head) {
147 pak = list_entry(p, struct packet, next);
149 if (pak == packet) {
150 return pak;
151 }
152 }
153 return NULL;
154 }
156 static struct packet *packet_alloc(tpmif_t * tpmif,
157 u32 size, u8 req_tag, u8 flags)
158 {
159 struct packet *pak = NULL;
160 pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
161 if (NULL != pak) {
162 if (tpmif) {
163 pak->tpmif = tpmif;
164 pak->tpm_instance = tpmback_get_instance(tpmif->bi);
165 tpmif_get(tpmif);
166 }
167 pak->data_len = size;
168 pak->req_tag = req_tag;
169 pak->last_read = 0;
170 pak->flags = flags;
172 /*
173 * cannot do tpmif_get(tpmif); bad things happen
174 * on the last tpmif_put()
175 */
176 init_timer(&pak->processing_timer);
177 pak->processing_timer.function = processing_timeout;
178 pak->processing_timer.data = (unsigned long)pak;
179 }
180 return pak;
181 }
183 static void inline packet_reset(struct packet *pak)
184 {
185 pak->last_read = 0;
186 }
188 static void packet_free(struct packet *pak)
189 {
190 if (timer_pending(&pak->processing_timer)) {
191 BUG();
192 }
194 if (pak->tpmif)
195 tpmif_put(pak->tpmif);
196 kfree(pak->data_buffer);
197 /*
198 * cannot do tpmif_put(pak->tpmif); bad things happen
199 * on the last tpmif_put()
200 */
201 kfree(pak);
202 }
204 static int packet_set(struct packet *pak,
205 const unsigned char *buffer, u32 size)
206 {
207 int rc = 0;
208 unsigned char *buf = kmalloc(size, GFP_KERNEL);
210 if (buf) {
211 pak->data_buffer = buf;
212 memcpy(buf, buffer, size);
213 pak->data_len = size;
214 } else {
215 rc = -ENOMEM;
216 }
217 return rc;
218 }
220 /*
221 * Write data to the shared memory and send it to the FE.
222 */
223 static int packet_write(struct packet *pak,
224 const char *data, size_t size, int isuserbuffer)
225 {
226 int rc = 0;
228 if ((pak->flags & PACKET_FLAG_CHECK_RESPONSESTATUS)) {
229 #ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
230 u32 res;
232 if (copy_from_buffer(&res,
233 &data[2 + 4], sizeof (res),
234 isuserbuffer)) {
235 return -EFAULT;
236 }
238 if (res != 0) {
239 /*
240 * Close down this device. Should have the
241 * FE notified about closure.
242 */
243 if (!pak->tpmif) {
244 return -EFAULT;
245 }
246 pak->tpmif->status = DISCONNECTING;
247 }
248 #endif
249 }
251 if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
252 /* Don't send a respone to this packet. Just acknowledge it. */
253 rc = size;
254 } else {
255 rc = _packet_write(pak, data, size, isuserbuffer);
256 }
258 return rc;
259 }
261 int _packet_write(struct packet *pak,
262 const char *data, size_t size, int isuserbuffer)
263 {
264 /*
265 * Write into the shared memory pages directly
266 * and send it to the front end.
267 */
268 tpmif_t *tpmif = pak->tpmif;
269 grant_handle_t handle;
270 int rc = 0;
271 unsigned int i = 0;
272 unsigned int offset = 0;
274 if (tpmif == NULL) {
275 return -EFAULT;
276 }
278 if (tpmif->status == DISCONNECTED) {
279 return size;
280 }
282 while (offset < size && i < TPMIF_TX_RING_SIZE) {
283 unsigned int tocopy;
284 struct gnttab_map_grant_ref map_op;
285 struct gnttab_unmap_grant_ref unmap_op;
286 tpmif_tx_request_t *tx;
288 tx = &tpmif->tx->ring[i].req;
290 if (0 == tx->addr) {
291 DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
292 return 0;
293 }
295 gnttab_set_map_op(&map_op, MMAP_VADDR(tpmif, i),
296 GNTMAP_host_map, tx->ref, tpmif->domid);
298 if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
299 &map_op, 1))) {
300 BUG();
301 }
303 handle = map_op.handle;
305 if (map_op.status) {
306 DPRINTK(" Grant table operation failure !\n");
307 return 0;
308 }
310 tocopy = min_t(size_t, size - offset, PAGE_SIZE);
312 if (copy_from_buffer((void *)(MMAP_VADDR(tpmif, i) |
313 (tx->addr & ~PAGE_MASK)),
314 &data[offset], tocopy, isuserbuffer)) {
315 tpmif_put(tpmif);
316 return -EFAULT;
317 }
318 tx->size = tocopy;
320 gnttab_set_unmap_op(&unmap_op, MMAP_VADDR(tpmif, i),
321 GNTMAP_host_map, handle);
323 if (unlikely
324 (HYPERVISOR_grant_table_op
325 (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
326 BUG();
327 }
329 offset += tocopy;
330 i++;
331 }
333 rc = offset;
334 DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
335 notify_remote_via_irq(tpmif->irq);
337 return rc;
338 }
340 /*
341 * Read data from the shared memory and copy it directly into the
342 * provided buffer. Advance the read_last indicator which tells
343 * how many bytes have already been read.
344 */
345 static int packet_read(struct packet *pak, size_t numbytes,
346 char *buffer, size_t buffersize, int isuserbuffer)
347 {
348 tpmif_t *tpmif = pak->tpmif;
350 /*
351 * Read 'numbytes' of data from the buffer. The first 4
352 * bytes are the instance number in network byte order,
353 * after that come the data from the shared memory buffer.
354 */
355 u32 to_copy;
356 u32 offset = 0;
357 u32 room_left = buffersize;
359 if (pak->last_read < 4) {
360 /*
361 * copy the instance number into the buffer
362 */
363 u32 instance_no = htonl(pak->tpm_instance);
364 u32 last_read = pak->last_read;
366 to_copy = min_t(size_t, 4 - last_read, numbytes);
368 if (copy_to_buffer(&buffer[0],
369 &(((u8 *) & instance_no)[last_read]),
370 to_copy, isuserbuffer)) {
371 return -EFAULT;
372 }
374 pak->last_read += to_copy;
375 offset += to_copy;
376 room_left -= to_copy;
377 }
379 /*
380 * If the packet has a data buffer appended, read from it...
381 */
383 if (room_left > 0) {
384 if (pak->data_buffer) {
385 u32 to_copy = min_t(u32, pak->data_len - offset, room_left);
386 u32 last_read = pak->last_read - 4;
388 if (copy_to_buffer(&buffer[offset],
389 &pak->data_buffer[last_read],
390 to_copy, isuserbuffer)) {
391 return -EFAULT;
392 }
393 pak->last_read += to_copy;
394 offset += to_copy;
395 } else {
396 offset = packet_read_shmem(pak,
397 tpmif,
398 offset,
399 buffer,
400 isuserbuffer, room_left);
401 }
402 }
403 return offset;
404 }
406 static int packet_read_shmem(struct packet *pak,
407 tpmif_t * tpmif,
408 u32 offset, char *buffer, int isuserbuffer,
409 u32 room_left)
410 {
411 u32 last_read = pak->last_read - 4;
412 u32 i = (last_read / PAGE_SIZE);
413 u32 pg_offset = last_read & (PAGE_SIZE - 1);
414 u32 to_copy;
415 grant_handle_t handle;
417 tpmif_tx_request_t *tx;
419 tx = &tpmif->tx->ring[0].req;
420 /*
421 * Start copying data at the page with index 'index'
422 * and within that page at offset 'offset'.
423 * Copy a maximum of 'room_left' bytes.
424 */
425 to_copy = min_t(u32, PAGE_SIZE - pg_offset, room_left);
426 while (to_copy > 0) {
427 void *src;
428 struct gnttab_map_grant_ref map_op;
429 struct gnttab_unmap_grant_ref unmap_op;
431 tx = &tpmif->tx->ring[i].req;
433 gnttab_set_map_op(&map_op, MMAP_VADDR(tpmif, i),
434 GNTMAP_host_map, tx->ref, tpmif->domid);
436 if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
437 &map_op, 1))) {
438 BUG();
439 }
441 if (map_op.status) {
442 DPRINTK(" Grant table operation failure !\n");
443 return -EFAULT;
444 }
446 handle = map_op.handle;
448 if (to_copy > tx->size) {
449 /*
450 * User requests more than what's available
451 */
452 to_copy = min_t(u32, tx->size, to_copy);
453 }
455 DPRINTK("Copying from mapped memory at %08lx\n",
456 (unsigned long)(MMAP_VADDR(tpmif, i) |
457 (tx->addr & ~PAGE_MASK)));
459 src = (void *)(MMAP_VADDR(tpmif, i) |
460 ((tx->addr & ~PAGE_MASK) + pg_offset));
461 if (copy_to_buffer(&buffer[offset],
462 src, to_copy, isuserbuffer)) {
463 return -EFAULT;
464 }
466 DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
467 tpmif->domid, buffer[offset], buffer[offset + 1],
468 buffer[offset + 2], buffer[offset + 3]);
470 gnttab_set_unmap_op(&unmap_op, MMAP_VADDR(tpmif, i),
471 GNTMAP_host_map, handle);
473 if (unlikely
474 (HYPERVISOR_grant_table_op
475 (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
476 BUG();
477 }
479 offset += to_copy;
480 pg_offset = 0;
481 last_read += to_copy;
482 room_left -= to_copy;
484 to_copy = min_t(u32, PAGE_SIZE, room_left);
485 i++;
486 } /* while (to_copy > 0) */
487 /*
488 * Adjust the last_read pointer
489 */
490 pak->last_read = last_read + 4;
491 return offset;
492 }
494 /* ============================================================
495 * The file layer for reading data from this device
496 * ============================================================
497 */
498 static int vtpm_op_open(struct inode *inode, struct file *f)
499 {
500 int rc = 0;
501 unsigned long flags;
503 write_lock_irqsave(&dataex.pak_lock, flags);
504 if (dataex.has_opener == 0) {
505 dataex.has_opener = 1;
506 } else {
507 rc = -EPERM;
508 }
509 write_unlock_irqrestore(&dataex.pak_lock, flags);
510 return rc;
511 }
513 static ssize_t vtpm_op_read(struct file *file,
514 char __user * data, size_t size, loff_t * offset)
515 {
516 int ret_size = -ENODATA;
517 struct packet *pak = NULL;
518 unsigned long flags;
520 write_lock_irqsave(&dataex.pak_lock, flags);
521 if (dataex.aborted) {
522 dataex.aborted = 0;
523 dataex.copied_so_far = 0;
524 write_unlock_irqrestore(&dataex.pak_lock, flags);
525 return -EIO;
526 }
528 if (list_empty(&dataex.pending_pak)) {
529 write_unlock_irqrestore(&dataex.pak_lock, flags);
530 wait_event_interruptible(dataex.wait_queue,
531 !list_empty(&dataex.pending_pak));
532 write_lock_irqsave(&dataex.pak_lock, flags);
533 dataex.copied_so_far = 0;
534 }
536 if (!list_empty(&dataex.pending_pak)) {
537 unsigned int left;
539 pak = list_entry(dataex.pending_pak.next, struct packet, next);
540 left = pak->data_len - dataex.copied_so_far;
541 list_del(&pak->next);
542 write_unlock_irqrestore(&dataex.pak_lock, flags);
544 DPRINTK("size given by app: %d, available: %d\n", size, left);
546 ret_size = min_t(size_t, size, left);
548 ret_size = packet_read(pak, ret_size, data, size, 1);
550 write_lock_irqsave(&dataex.pak_lock, flags);
552 if (ret_size < 0) {
553 del_singleshot_timer_sync(&pak->processing_timer);
554 packet_free(pak);
555 dataex.copied_so_far = 0;
556 } else {
557 DPRINTK("Copied %d bytes to user buffer\n", ret_size);
559 dataex.copied_so_far += ret_size;
560 if (dataex.copied_so_far >= pak->data_len + 4) {
561 DPRINTK("All data from this packet given to app.\n");
562 /* All data given to app */
564 del_singleshot_timer_sync(&pak->
565 processing_timer);
566 list_add_tail(&pak->next, &dataex.current_pak);
567 /*
568 * The more fontends that are handled at the same time,
569 * the more time we give the TPM to process the request.
570 */
571 mod_timer(&pak->processing_timer,
572 jiffies + (num_frontends * 60 * HZ));
573 dataex.copied_so_far = 0;
574 } else {
575 list_add(&pak->next, &dataex.pending_pak);
576 }
577 }
578 }
579 write_unlock_irqrestore(&dataex.pak_lock, flags);
581 DPRINTK("Returning result from read to app: %d\n", ret_size);
583 return ret_size;
584 }
586 /*
587 * Write operation - only works after a previous read operation!
588 */
589 static ssize_t vtpm_op_write(struct file *file,
590 const char __user * data, size_t size,
591 loff_t * offset)
592 {
593 struct packet *pak;
594 int rc = 0;
595 unsigned int off = 4;
596 unsigned long flags;
597 struct vtpm_resp_hdr vrh;
599 /*
600 * Minimum required packet size is:
601 * 4 bytes for instance number
602 * 2 bytes for tag
603 * 4 bytes for paramSize
604 * 4 bytes for the ordinal
605 * sum: 14 bytes
606 */
607 if (size < sizeof (vrh))
608 return -EFAULT;
610 if (copy_from_user(&vrh, data, sizeof (vrh)))
611 return -EFAULT;
613 /* malformed packet? */
614 if ((off + ntohl(vrh.len_no)) != size)
615 return -EFAULT;
617 write_lock_irqsave(&dataex.pak_lock, flags);
618 pak = packet_find_instance(&dataex.current_pak,
619 ntohl(vrh.instance_no));
621 if (pak == NULL) {
622 write_unlock_irqrestore(&dataex.pak_lock, flags);
623 DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
624 ntohl(vrh.instance_no));
625 return -EFAULT;
626 }
628 del_singleshot_timer_sync(&pak->processing_timer);
629 list_del(&pak->next);
631 write_unlock_irqrestore(&dataex.pak_lock, flags);
633 /*
634 * The first 'offset' bytes must be the instance number - skip them.
635 */
636 size -= off;
638 rc = packet_write(pak, &data[off], size, 1);
640 if (rc > 0) {
641 /* I neglected the first 4 bytes */
642 rc += off;
643 }
644 packet_free(pak);
645 return rc;
646 }
648 static int vtpm_op_release(struct inode *inode, struct file *file)
649 {
650 unsigned long flags;
652 vtpm_release_packets(NULL, 1);
653 write_lock_irqsave(&dataex.pak_lock, flags);
654 dataex.has_opener = 0;
655 write_unlock_irqrestore(&dataex.pak_lock, flags);
656 return 0;
657 }
659 static unsigned int vtpm_op_poll(struct file *file,
660 struct poll_table_struct *pts)
661 {
662 unsigned int flags = POLLOUT | POLLWRNORM;
664 poll_wait(file, &dataex.wait_queue, pts);
665 if (!list_empty(&dataex.pending_pak)) {
666 flags |= POLLIN | POLLRDNORM;
667 }
668 return flags;
669 }
671 static struct file_operations vtpm_ops = {
672 .owner = THIS_MODULE,
673 .llseek = no_llseek,
674 .open = vtpm_op_open,
675 .read = vtpm_op_read,
676 .write = vtpm_op_write,
677 .release = vtpm_op_release,
678 .poll = vtpm_op_poll,
679 };
681 static struct miscdevice vtpms_miscdevice = {
682 .minor = 225,
683 .name = "vtpm",
684 .fops = &vtpm_ops,
685 };
687 /***************************************************************
688 Utility functions
689 ***************************************************************/
691 static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
692 {
693 int rc;
694 static const unsigned char tpm_error_message_fail[] = {
695 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x0a,
697 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
698 };
699 unsigned char buffer[sizeof (tpm_error_message_fail)];
701 memcpy(buffer, tpm_error_message_fail,
702 sizeof (tpm_error_message_fail));
703 /*
704 * Insert the right response tag depending on the given tag
705 * All response tags are '+3' to the request tag.
706 */
707 buffer[1] = req_tag + 3;
709 /*
710 * Write the data to shared memory and notify the front-end
711 */
712 rc = packet_write(pak, buffer, sizeof (buffer), 0);
714 return rc;
715 }
717 static int _vtpm_release_packets(struct list_head *head,
718 tpmif_t * tpmif, int send_msgs)
719 {
720 int aborted = 0;
721 int c = 0;
722 struct packet *pak;
723 struct list_head *pos, *tmp;
725 list_for_each_safe(pos, tmp, head) {
726 pak = list_entry(pos, struct packet, next);
727 c += 1;
729 if (tpmif == NULL || pak->tpmif == tpmif) {
730 int can_send = 0;
732 del_singleshot_timer_sync(&pak->processing_timer);
733 list_del(&pak->next);
735 if (pak->tpmif && pak->tpmif->status == CONNECTED) {
736 can_send = 1;
737 }
739 if (send_msgs && can_send) {
740 tpm_send_fail_message(pak, pak->req_tag);
741 }
742 packet_free(pak);
743 if (c == 1)
744 aborted = 1;
745 }
746 }
747 return aborted;
748 }
750 int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
751 {
752 unsigned long flags;
754 write_lock_irqsave(&dataex.pak_lock, flags);
756 dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
757 tpmif,
758 send_msgs);
759 _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
761 write_unlock_irqrestore(&dataex.pak_lock, flags);
762 return 0;
763 }
765 static int vtpm_queue_packet(struct packet *pak)
766 {
767 int rc = 0;
769 if (dataex.has_opener) {
770 unsigned long flags;
772 write_lock_irqsave(&dataex.pak_lock, flags);
773 list_add_tail(&pak->next, &dataex.pending_pak);
774 /* give the TPM some time to pick up the request */
775 mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
776 write_unlock_irqrestore(&dataex.pak_lock, flags);
778 wake_up_interruptible(&dataex.wait_queue);
779 } else {
780 rc = -EFAULT;
781 }
782 return rc;
783 }
785 static int vtpm_receive(tpmif_t * tpmif, u32 size)
786 {
787 int rc = 0;
788 unsigned char buffer[10];
789 __be32 *native_size;
790 struct packet *pak = packet_alloc(tpmif, size, 0, 0);
792 if (!pak)
793 return -ENOMEM;
794 /*
795 * Read 10 bytes from the received buffer to test its
796 * content for validity.
797 */
798 if (sizeof (buffer) != packet_read(pak,
799 sizeof (buffer), buffer,
800 sizeof (buffer), 0)) {
801 goto failexit;
802 }
803 /*
804 * Reset the packet read pointer so we can read all its
805 * contents again.
806 */
807 packet_reset(pak);
809 native_size = (__force __be32 *) (&buffer[4 + 2]);
810 /*
811 * Verify that the size of the packet is correct
812 * as indicated and that there's actually someone reading packets.
813 * The minimum size of the packet is '10' for tag, size indicator
814 * and ordinal.
815 */
816 if (size < 10 ||
817 be32_to_cpu(*native_size) != size ||
818 0 == dataex.has_opener || tpmif->status != CONNECTED) {
819 rc = -EINVAL;
820 goto failexit;
821 } else {
822 rc = vtpm_queue_packet(pak);
823 if (rc < 0)
824 goto failexit;
825 }
826 return 0;
828 failexit:
829 if (pak) {
830 tpm_send_fail_message(pak, buffer[4 + 1]);
831 packet_free(pak);
832 }
833 return rc;
834 }
836 /*
837 * Timeout function that gets invoked when a packet has not been processed
838 * during the timeout period.
839 * The packet must be on a list when this function is invoked. This
840 * also means that once its taken off a list, the timer must be
841 * destroyed as well.
842 */
843 static void processing_timeout(unsigned long ptr)
844 {
845 struct packet *pak = (struct packet *)ptr;
846 unsigned long flags;
848 write_lock_irqsave(&dataex.pak_lock, flags);
849 /*
850 * The packet needs to be searched whether it
851 * is still on the list.
852 */
853 if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
854 pak == packet_find_packet(&dataex.current_pak, pak)) {
855 if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
856 tpm_send_fail_message(pak, pak->req_tag);
857 }
858 /* discard future responses */
859 pak->flags |= PACKET_FLAG_DISCARD_RESPONSE;
860 }
862 write_unlock_irqrestore(&dataex.pak_lock, flags);
863 }
865 static void tpm_tx_action(unsigned long unused);
866 static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
868 static struct list_head tpm_schedule_list;
869 static spinlock_t tpm_schedule_list_lock;
871 static inline void maybe_schedule_tx_action(void)
872 {
873 smp_mb();
874 tasklet_schedule(&tpm_tx_tasklet);
875 }
877 static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
878 {
879 return tpmif->list.next != NULL;
880 }
882 static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
883 {
884 spin_lock_irq(&tpm_schedule_list_lock);
885 if (likely(__on_tpm_schedule_list(tpmif))) {
886 list_del(&tpmif->list);
887 tpmif->list.next = NULL;
888 tpmif_put(tpmif);
889 }
890 spin_unlock_irq(&tpm_schedule_list_lock);
891 }
893 static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
894 {
895 if (__on_tpm_schedule_list(tpmif))
896 return;
898 spin_lock_irq(&tpm_schedule_list_lock);
899 if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
900 list_add_tail(&tpmif->list, &tpm_schedule_list);
901 tpmif_get(tpmif);
902 }
903 spin_unlock_irq(&tpm_schedule_list_lock);
904 }
906 void tpmif_schedule_work(tpmif_t * tpmif)
907 {
908 add_to_tpm_schedule_list_tail(tpmif);
909 maybe_schedule_tx_action();
910 }
912 void tpmif_deschedule_work(tpmif_t * tpmif)
913 {
914 remove_from_tpm_schedule_list(tpmif);
915 }
917 static void tpm_tx_action(unsigned long unused)
918 {
919 struct list_head *ent;
920 tpmif_t *tpmif;
921 tpmif_tx_request_t *tx;
923 DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
925 while (!list_empty(&tpm_schedule_list)) {
926 /* Get a tpmif from the list with work to do. */
927 ent = tpm_schedule_list.next;
928 tpmif = list_entry(ent, tpmif_t, list);
929 tpmif_get(tpmif);
930 remove_from_tpm_schedule_list(tpmif);
932 tx = &tpmif->tx->ring[0].req;
934 /* pass it up */
935 vtpm_receive(tpmif, tx->size);
937 tpmif_put(tpmif);
938 }
939 }
941 irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
942 {
943 tpmif_t *tpmif = (tpmif_t *) dev_id;
945 add_to_tpm_schedule_list_tail(tpmif);
946 maybe_schedule_tx_action();
947 return IRQ_HANDLED;
948 }
950 static int __init tpmback_init(void)
951 {
952 int rc;
954 if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
955 printk(KERN_ALERT
956 "Could not register misc device for TPM BE.\n");
957 return rc;
958 }
960 dataex_init(&dataex);
962 spin_lock_init(&tpm_schedule_list_lock);
963 INIT_LIST_HEAD(&tpm_schedule_list);
965 tpmif_interface_init();
966 tpmif_xenbus_init();
968 printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
970 return 0;
971 }
973 module_init(tpmback_init);
975 void __exit tpmback_exit(void)
976 {
977 vtpm_release_packets(NULL, 0);
978 tpmif_xenbus_exit();
979 tpmif_interface_exit();
980 misc_deregister(&vtpms_miscdevice);
981 }
983 MODULE_LICENSE("Dual BSD/GPL");