ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c @ 6742:ac6605bceb9d

remove pointless NULL check before calling kfree

Signed-off-by: Vincent Hanquez <vincent@xensource.com>
author vh249@arcadians.cl.cam.ac.uk
date Sat Sep 10 14:41:16 2005 +0000 (2005-09-10)
parents cdfa7dd00c44
children 9ead08216805
line source
1 /******************************************************************************
2 * drivers/xen/tpmback/tpmback.c
3 *
4 * Copyright (c) 2005, IBM Corporation
5 *
6 * Author: Stefan Berger, stefanb@us.ibm.com
7 * Grant table support: Mahadevan Gomathisankaran
8 *
9 * This code has been derived from drivers/xen/netback/netback.c
10 * Copyright (c) 2002-2004, K A Fraser
11 *
12 */
14 #include "common.h"
15 #include <asm-xen/evtchn.h>
17 #include <linux/types.h>
18 #include <linux/list.h>
19 #include <linux/miscdevice.h>
20 #include <asm/uaccess.h>
21 #include <asm-xen/xenbus.h>
22 #include <asm-xen/xen-public/grant_table.h>
25 struct data_exchange {
26 struct list_head pending_pak;
27 struct list_head current_pak;
28 unsigned int copied_so_far;
29 u8 has_opener;
30 rwlock_t pak_lock; // protects all of the previous fields
31 wait_queue_head_t wait_queue;
32 };
34 struct packet {
35 struct list_head next;
36 unsigned int data_len;
37 u8 *data_buffer;
38 tpmif_t *tpmif;
39 u32 tpm_instance;
40 u8 req_tag;
41 u32 last_read;
42 u8 flags;
43 struct timer_list processing_timer;
44 };
46 enum {
47 PACKET_FLAG_DISCARD_RESPONSE = 1,
48 PACKET_FLAG_SEND_CONTROLMESSAGE = 2,
49 };
51 static struct data_exchange dataex;
53 /* local function prototypes */
54 static int vtpm_queue_packet(struct packet *pak);
55 static int _packet_write(struct packet *pak,
56 const char *data, size_t size,
57 int userbuffer);
58 static void processing_timeout(unsigned long ptr);
59 static int packet_read_shmem(struct packet *pak,
60 tpmif_t *tpmif,
61 u32 offset,
62 char *buffer,
63 int isuserbuffer,
64 u32 left);
67 #define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
69 static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
71 #define MIN(x,y) (x) < (y) ? (x) : (y)
73 /***************************************************************
74 Packet-related functions
75 ***************************************************************/
77 static struct packet *
78 packet_find_instance(struct list_head *head, u32 tpm_instance)
79 {
80 struct packet *pak;
81 struct list_head *p;
82 /*
83 * traverse the list of packets and return the first
84 * one with the given instance number
85 */
86 list_for_each(p, head) {
87 pak = list_entry(p, struct packet, next);
88 if (pak->tpm_instance == tpm_instance) {
89 return pak;
90 }
91 }
92 return NULL;
93 }
95 static struct packet *
96 packet_find_packet(struct list_head *head, void *packet)
97 {
98 struct packet *pak;
99 struct list_head *p;
100 /*
101 * traverse the list of packets and return the first
102 * one with the given instance number
103 */
104 list_for_each(p, head) {
105 pak = list_entry(p, struct packet, next);
106 if (pak == packet) {
107 return pak;
108 }
109 }
110 return NULL;
111 }
113 static struct packet *
114 packet_alloc(tpmif_t *tpmif, u32 size, u8 req_tag, u8 flags)
115 {
116 struct packet *pak = NULL;
117 pak = kmalloc(sizeof(struct packet),
118 GFP_KERNEL);
119 if (NULL != pak) {
120 memset(pak, 0x0, sizeof(*pak));
121 if (tpmif) {
122 pak->tpmif = tpmif;
123 pak->tpm_instance = tpmif->tpm_instance;
124 }
125 pak->data_len = size;
126 pak->req_tag = req_tag;
127 pak->last_read = 0;
128 pak->flags = flags;
130 /*
131 * cannot do tpmif_get(tpmif); bad things happen
132 * on the last tpmif_put()
133 */
134 init_timer(&pak->processing_timer);
135 pak->processing_timer.function = processing_timeout;
136 pak->processing_timer.data = (unsigned long)pak;
137 }
138 return pak;
139 }
141 static void inline
142 packet_reset(struct packet *pak)
143 {
144 pak->last_read = 0;
145 }
147 static void inline
148 packet_free(struct packet *pak)
149 {
150 del_singleshot_timer_sync(&pak->processing_timer);
151 kfree(pak->data_buffer);
152 /*
153 * cannot do tpmif_put(pak->tpmif); bad things happen
154 * on the last tpmif_put()
155 */
156 kfree(pak);
157 }
159 static int
160 packet_set(struct packet *pak,
161 const unsigned char *buffer, u32 size)
162 {
163 int rc = 0;
164 unsigned char *buf = kmalloc(size, GFP_KERNEL);
165 if (NULL != buf) {
166 pak->data_buffer = buf;
167 memcpy(buf, buffer, size);
168 pak->data_len = size;
169 } else {
170 rc = -ENOMEM;
171 }
172 return rc;
173 }
176 /*
177 * Write data to the shared memory and send it to the FE.
178 */
179 static int
180 packet_write(struct packet *pak,
181 const char *data, size_t size,
182 int userbuffer)
183 {
184 int rc = 0;
186 DPRINTK("Supposed to send %d bytes to front-end!\n",
187 size);
189 if (0 != (pak->flags & PACKET_FLAG_SEND_CONTROLMESSAGE)) {
190 #ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
191 u32 res;
192 memcpy(&res, &data[2+4], sizeof(res));
193 if (res != 0) {
194 /*
195 * Will close down this device and have the
196 * FE notified about closure.
197 */
198 }
199 #endif
200 }
202 if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
203 /* Don't send a respone to this packet. Just acknowledge it. */
204 rc = size;
205 } else {
206 rc = _packet_write(pak, data, size, userbuffer);
207 }
209 return rc;
210 }
213 static int
214 _packet_write(struct packet *pak,
215 const char *data, size_t size,
216 int userbuffer)
217 {
218 /*
219 * Write into the shared memory pages directly
220 * and send it to the front end.
221 */
222 tpmif_t *tpmif = pak->tpmif;
223 u16 handle;
224 int rc = 0;
225 unsigned int i = 0;
226 unsigned int offset = 0;
227 multicall_entry_t *mcl;
229 if (tpmif == NULL)
230 return -EFAULT;
232 if (tpmif->status != CONNECTED) {
233 return size;
234 }
236 mcl = tx_mcl;
237 while (offset < size && i < TPMIF_TX_RING_SIZE) {
238 unsigned int tocopy;
239 struct gnttab_map_grant_ref map_op;
240 struct gnttab_unmap_grant_ref unmap_op;
241 tpmif_tx_request_t *tx;
243 tx = &tpmif->tx->ring[i].req;
245 if (0 == tx->addr) {
246 DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
247 return 0;
248 }
250 map_op.host_addr = MMAP_VADDR(tpmif, i);
251 map_op.flags = GNTMAP_host_map;
252 map_op.ref = tx->ref;
253 map_op.dom = tpmif->domid;
255 if(unlikely(
256 HYPERVISOR_grant_table_op(
257 GNTTABOP_map_grant_ref,
258 &map_op,
259 1))) {
260 BUG();
261 }
263 handle = map_op.handle;
265 if (map_op.handle < 0) {
266 DPRINTK(" Grant table operation failure !\n");
267 return 0;
268 }
269 phys_to_machine_mapping[__pa(MMAP_VADDR(tpmif,i)) >>
270 PAGE_SHIFT] =
271 FOREIGN_FRAME(map_op.dev_bus_addr >> PAGE_SHIFT);
273 tocopy = size - offset;
274 if (tocopy > PAGE_SIZE) {
275 tocopy = PAGE_SIZE;
276 }
277 if (userbuffer) {
278 if (copy_from_user((void *)(MMAP_VADDR(tpmif,i) |
279 (tx->addr & ~PAGE_MASK)),
280 (void __user *)&data[offset],
281 tocopy)) {
282 tpmif_put(tpmif);
283 return -EFAULT;
284 }
285 } else {
286 memcpy((void *)(MMAP_VADDR(tpmif,i) |
287 (tx->addr & ~PAGE_MASK)),
288 &data[offset], tocopy);
289 }
290 tx->size = tocopy;
292 unmap_op.host_addr = MMAP_VADDR(tpmif, i);
293 unmap_op.handle = handle;
294 unmap_op.dev_bus_addr = 0;
296 if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
297 &unmap_op,
298 1))) {
299 BUG();
300 }
302 offset += tocopy;
303 i++;
304 }
306 rc = offset;
307 DPRINTK("Notifying frontend via event channel %d\n",
308 tpmif->evtchn);
309 notify_via_evtchn(tpmif->evtchn);
311 return rc;
312 }
314 /*
315 * Read data from the shared memory and copy it directly into the
316 * provided buffer. Advance the read_last indicator which tells
317 * how many bytes have already been read.
318 */
319 static int
320 packet_read(struct packet *pak, size_t numbytes,
321 char *buffer, size_t buffersize,
322 int userbuffer)
323 {
324 tpmif_t *tpmif = pak->tpmif;
325 /*
326 * I am supposed to read 'numbytes' of data from the
327 * buffer.
328 * The first 4 bytes that are read are the instance number in
329 * network byte order, after that comes the data from the
330 * shared memory buffer.
331 */
332 u32 to_copy;
333 u32 offset = 0;
334 u32 room_left = buffersize;
335 /*
336 * Ensure that we see the request when we copy it.
337 */
338 mb();
340 if (pak->last_read < 4) {
341 /*
342 * copy the instance number into the buffer
343 */
344 u32 instance_no = htonl(pak->tpm_instance);
345 u32 last_read = pak->last_read;
346 to_copy = MIN(4 - last_read, numbytes);
348 if (userbuffer) {
349 if (copy_to_user(&buffer[0],
350 &(((u8 *)&instance_no)[last_read]),
351 to_copy)) {
352 return -EFAULT;
353 }
354 } else {
355 memcpy(&buffer[0],
356 &(((u8 *)&instance_no)[last_read]),
357 to_copy);
358 }
360 pak->last_read += to_copy;
361 offset += to_copy;
362 room_left -= to_copy;
363 }
365 /*
366 * If the packet has a data buffer appended, read from it...
367 */
369 if (room_left > 0) {
370 if (pak->data_buffer) {
371 u32 to_copy = MIN(pak->data_len - offset, room_left);
372 u32 last_read = pak->last_read - 4;
373 if (userbuffer) {
374 if (copy_to_user(&buffer[offset],
375 &pak->data_buffer[last_read],
376 to_copy)) {
377 return -EFAULT;
378 }
379 } else {
380 memcpy(&buffer[offset],
381 &pak->data_buffer[last_read],
382 to_copy);
383 }
384 pak->last_read += to_copy;
385 offset += to_copy;
386 } else {
387 offset = packet_read_shmem(pak,
388 tpmif,
389 offset,
390 buffer,
391 userbuffer,
392 room_left);
393 }
394 }
395 return offset;
396 }
399 static int
400 packet_read_shmem(struct packet *pak,
401 tpmif_t *tpmif,
402 u32 offset,
403 char *buffer,
404 int isuserbuffer,
405 u32 room_left) {
406 u32 last_read = pak->last_read - 4;
407 u32 i = (last_read / PAGE_SIZE);
408 u32 pg_offset = last_read & (PAGE_SIZE - 1);
409 u32 to_copy;
410 u16 handle;
412 tpmif_tx_request_t *tx;
413 tx = &tpmif->tx->ring[0].req;
414 /*
415 * Start copying data at the page with index 'index'
416 * and within that page at offset 'offset'.
417 * Copy a maximum of 'room_left' bytes.
418 */
419 to_copy = MIN(PAGE_SIZE - pg_offset, room_left);
420 while (to_copy > 0) {
421 void *src;
422 struct gnttab_map_grant_ref map_op;
423 struct gnttab_unmap_grant_ref unmap_op;
425 tx = &tpmif->tx->ring[i].req;
427 map_op.host_addr = MMAP_VADDR(tpmif, i);
428 map_op.flags = GNTMAP_host_map;
429 map_op.ref = tx->ref;
430 map_op.dom = tpmif->domid;
432 if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
433 &map_op,
434 1))) {
435 BUG();
436 }
438 if (map_op.handle < 0) {
439 DPRINTK(" Grant table operation failure !\n");
440 return -EFAULT;
441 }
443 handle = map_op.handle;
445 if (to_copy > tx->size) {
446 /*
447 * This is the case when the user wants to read more
448 * than what we have. So we just give him what we
449 * have.
450 */
451 to_copy = MIN(tx->size, to_copy);
452 }
454 DPRINTK("Copying from mapped memory at %08lx\n",
455 (unsigned long)(MMAP_VADDR(tpmif,i) |
456 (tx->addr & ~PAGE_MASK)));
458 src = (void *)(MMAP_VADDR(tpmif,i) | ((tx->addr & ~PAGE_MASK) + pg_offset));
459 if (isuserbuffer) {
460 if (copy_to_user(&buffer[offset],
461 src,
462 to_copy)) {
463 return -EFAULT;
464 }
465 } else {
466 memcpy(&buffer[offset],
467 src,
468 to_copy);
469 }
472 DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
473 tpmif->domid, buffer[offset], buffer[offset+1],buffer[offset+2],buffer[offset+3]);
475 unmap_op.host_addr = MMAP_VADDR(tpmif, i);
476 unmap_op.handle = handle;
477 unmap_op.dev_bus_addr = 0;
479 if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
480 &unmap_op,
481 1))) {
482 BUG();
483 }
485 offset += to_copy;
486 pg_offset = 0;
487 last_read += to_copy;
488 room_left -= to_copy;
490 to_copy = MIN(PAGE_SIZE, room_left);
491 i++;
492 } /* while (to_copy > 0) */
493 /*
494 * Adjust the last_read pointer
495 */
496 pak->last_read = last_read + 4;
497 return offset;
498 }
501 /* ============================================================
502 * The file layer for reading data from this device
503 * ============================================================
504 */
505 static int
506 vtpm_op_open(struct inode *inode, struct file *f)
507 {
508 int rc = 0;
509 unsigned long flags;
511 write_lock_irqsave(&dataex.pak_lock, flags);
512 if (dataex.has_opener == 0) {
513 dataex.has_opener = 1;
514 } else {
515 rc = -EPERM;
516 }
517 write_unlock_irqrestore(&dataex.pak_lock, flags);
518 return rc;
519 }
521 static ssize_t
522 vtpm_op_read(struct file *file,
523 char __user * data, size_t size, loff_t * offset)
524 {
525 int ret_size = -ENODATA;
526 struct packet *pak = NULL;
527 unsigned long flags;
529 write_lock_irqsave(&dataex.pak_lock, flags);
531 if (list_empty(&dataex.pending_pak)) {
532 write_unlock_irqrestore(&dataex.pak_lock, flags);
533 wait_event_interruptible(dataex.wait_queue,
534 !list_empty(&dataex.pending_pak));
535 write_lock_irqsave(&dataex.pak_lock, flags);
536 }
538 if (!list_empty(&dataex.pending_pak)) {
539 unsigned int left;
540 pak = list_entry(dataex.pending_pak.next, struct packet, next);
542 left = pak->data_len - dataex.copied_so_far;
544 DPRINTK("size given by app: %d, available: %d\n", size, left);
546 ret_size = MIN(size,left);
548 ret_size = packet_read(pak, ret_size, data, size, 1);
549 if (ret_size < 0) {
550 ret_size = -EFAULT;
551 } else {
552 DPRINTK("Copied %d bytes to user buffer\n", ret_size);
554 dataex.copied_so_far += ret_size;
555 if (dataex.copied_so_far >= pak->data_len + 4) {
556 DPRINTK("All data from this packet given to app.\n");
557 /* All data given to app */
559 del_singleshot_timer_sync(&pak->processing_timer);
560 list_del(&pak->next);
561 list_add_tail(&pak->next, &dataex.current_pak);
562 /*
563 * The more fontends that are handled at the same time,
564 * the more time we give the TPM to process the request.
565 */
566 mod_timer(&pak->processing_timer,
567 jiffies + (num_frontends * 10 * HZ));
568 dataex.copied_so_far = 0;
569 }
570 }
571 }
572 write_unlock_irqrestore(&dataex.pak_lock, flags);
574 DPRINTK("Returning result from read to app: %d\n", ret_size);
576 return ret_size;
577 }
579 /*
580 * Write operation - only works after a previous read operation!
581 */
582 static ssize_t
583 vtpm_op_write(struct file *file, const char __user * data, size_t size,
584 loff_t * offset)
585 {
586 struct packet *pak;
587 int rc = 0;
588 unsigned int off = 4;
589 unsigned long flags;
590 u32 instance_no = 0;
591 u32 len_no = 0;
593 /*
594 * Minimum required packet size is:
595 * 4 bytes for instance number
596 * 2 bytes for tag
597 * 4 bytes for paramSize
598 * 4 bytes for the ordinal
599 * sum: 14 bytes
600 */
601 if ( size < off + 10 ) {
602 return -EFAULT;
603 }
605 if (copy_from_user(&instance_no,
606 (void __user *)&data[0],
607 4)) {
608 return -EFAULT;
609 }
611 if (copy_from_user(&len_no,
612 (void __user *)&data[off+2],
613 4) ||
614 (off + ntohl(len_no) != size)) {
615 return -EFAULT;
616 }
618 write_lock_irqsave(&dataex.pak_lock, flags);
619 pak = packet_find_instance(&dataex.current_pak, ntohl(instance_no));
621 if (pak == NULL) {
622 write_unlock_irqrestore(&dataex.pak_lock, flags);
623 printk(KERN_ALERT "No associated packet!\n");
624 return -EFAULT;
625 } else {
626 del_singleshot_timer_sync(&pak->processing_timer);
627 list_del(&pak->next);
628 }
630 write_unlock_irqrestore(&dataex.pak_lock, flags);
632 /*
633 * The first 'offset' bytes must be the instance number.
634 * I will just pull that from the packet.
635 */
636 size -= off;
637 data = &data[off];
639 rc = packet_write(pak, data, size, 1);
641 if (rc > 0) {
642 /* I neglected the first 4 bytes */
643 rc += off;
644 }
645 packet_free(pak);
646 return rc;
647 }
649 static int
650 vtpm_op_release(struct inode *inode, struct file *file)
651 {
652 unsigned long flags;
653 vtpm_release_packets(NULL, 1);
654 write_lock_irqsave(&dataex.pak_lock, flags);
655 dataex.has_opener = 0;
656 write_unlock_irqrestore(&dataex.pak_lock, flags);
657 return 0;
658 }
660 static unsigned int
661 vtpm_op_poll(struct file *file, struct poll_table_struct *pst)
662 {
663 return 0;
664 }
666 static struct file_operations vtpm_ops = {
667 .owner = THIS_MODULE,
668 .llseek = no_llseek,
669 .open = vtpm_op_open,
670 .read = vtpm_op_read,
671 .write = vtpm_op_write,
672 .release = vtpm_op_release,
673 .poll = vtpm_op_poll,
674 };
676 static struct miscdevice ibmvtpms_miscdevice = {
677 .minor = 225,
678 .name = "vtpm",
679 .fops = &vtpm_ops,
680 };
683 /***************************************************************
684 Virtual TPM functions and data stuctures
685 ***************************************************************/
687 static u8 create_cmd[] = {
688 1,193, /* 0: TPM_TAG_RQU_COMMAMD */
689 0,0,0,19, /* 2: length */
690 0,0,0,0x1, /* 6: VTPM_ORD_OPEN */
691 0, /* 10: VTPM type */
692 0,0,0,0, /* 11: domain id */
693 0,0,0,0 /* 15: instance id */
694 };
696 static u8 destroy_cmd[] = {
697 1,193, /* 0: TPM_TAG_RQU_COMMAMD */
698 0,0,0,14, /* 2: length */
699 0,0,0,0x2, /* 6: VTPM_ORD_CLOSE */
700 0,0,0,0 /* 10: instance id */
701 };
703 int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domid, u32 instance)
704 {
705 int rc = 0;
706 struct packet *pak = packet_alloc(tpmif, sizeof(create_cmd), create_cmd[0],
707 PACKET_FLAG_DISCARD_RESPONSE|
708 PACKET_FLAG_SEND_CONTROLMESSAGE);
709 if (pak) {
710 u8 buf[sizeof(create_cmd)];
711 u32 domid_no = htonl((u32)domid);
712 u32 instance_no = htonl(instance);
713 memcpy(buf, create_cmd, sizeof(create_cmd));
715 memcpy(&buf[11], &domid_no, sizeof(u32));
716 memcpy(&buf[15], &instance_no, sizeof(u32));
718 /* copy the buffer into the packet */
719 rc = packet_set(pak, buf, sizeof(buf));
721 if (rc == 0) {
722 pak->tpm_instance = 0;
723 rc = vtpm_queue_packet(pak);
724 }
725 if (rc < 0) {
726 /* could not be queued or built */
727 packet_free(pak);
728 }
729 } else {
730 rc = -ENOMEM;
731 }
732 return rc;
733 }
735 int tpmif_vtpm_close(u32 instid)
736 {
737 int rc = 0;
738 struct packet *pak;
740 pak = packet_alloc(NULL,
741 sizeof(create_cmd),
742 create_cmd[0],
743 PACKET_FLAG_DISCARD_RESPONSE|
744 PACKET_FLAG_SEND_CONTROLMESSAGE);
745 if (pak) {
746 u8 buf[sizeof(destroy_cmd)];
747 u32 instid_no = htonl(instid);
748 memcpy(buf, destroy_cmd, sizeof(destroy_cmd));
749 memcpy(&buf[10], &instid_no, sizeof(u32));
751 /* copy the buffer into the packet */
752 rc = packet_set(pak, buf, sizeof(buf));
754 if (rc == 0) {
755 pak->tpm_instance = 0;
756 rc = vtpm_queue_packet(pak);
757 }
758 if (rc < 0) {
759 /* could not be queued or built */
760 packet_free(pak);
761 }
762 } else {
763 rc = -ENOMEM;
764 }
765 return rc;
766 }
769 /***************************************************************
770 Utility functions
771 ***************************************************************/
773 static int
774 tpm_send_fail_message(struct packet *pak, u8 req_tag)
775 {
776 int rc;
777 static const unsigned char tpm_error_message_fail[] = {
778 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x0a,
780 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
781 };
782 unsigned char buffer[sizeof(tpm_error_message_fail)];
784 memcpy(buffer, tpm_error_message_fail, sizeof(tpm_error_message_fail));
785 /*
786 * Insert the right response tag depending on the given tag
787 * All response tags are '+3' to the request tag.
788 */
789 buffer[1] = req_tag + 3;
791 /*
792 * Write the data to shared memory and notify the front-end
793 */
794 rc = packet_write(pak, buffer, sizeof(buffer), 0);
796 return rc;
797 }
800 static void
801 _vtpm_release_packets(struct list_head *head, tpmif_t *tpmif,
802 int send_msgs)
803 {
804 struct packet *pak;
805 struct list_head *pos, *tmp;
807 list_for_each_safe(pos, tmp, head) {
808 pak = list_entry(pos, struct packet, next);
809 if (tpmif == NULL || pak->tpmif == tpmif) {
810 int can_send = 0;
811 del_singleshot_timer_sync(&pak->processing_timer);
812 list_del(&pak->next);
814 if (pak->tpmif && pak->tpmif->status == CONNECTED) {
815 can_send = 1;
816 }
818 if (send_msgs && can_send) {
819 tpm_send_fail_message(pak, pak->req_tag);
820 }
821 packet_free(pak);
822 }
823 }
824 }
827 int
828 vtpm_release_packets(tpmif_t *tpmif, int send_msgs)
829 {
830 unsigned long flags;
832 write_lock_irqsave(&dataex.pak_lock, flags);
834 _vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs);
835 _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
837 write_unlock_irqrestore(&dataex.pak_lock,
838 flags);
839 return 0;
840 }
843 static int vtpm_queue_packet(struct packet *pak)
844 {
845 int rc = 0;
846 if (dataex.has_opener) {
847 unsigned long flags;
848 write_lock_irqsave(&dataex.pak_lock, flags);
849 list_add_tail(&pak->next, &dataex.pending_pak);
850 /* give the TPM some time to pick up the request */
851 mod_timer(&pak->processing_timer, jiffies + (10 * HZ));
852 write_unlock_irqrestore(&dataex.pak_lock,
853 flags);
855 wake_up_interruptible(&dataex.wait_queue);
856 } else {
857 rc = -EFAULT;
858 }
859 return rc;
860 }
863 static int vtpm_receive(tpmif_t *tpmif, u32 size)
864 {
865 int rc = 0;
866 unsigned char buffer[10];
867 __be32 *native_size;
869 struct packet *pak = packet_alloc(tpmif, size, buffer[4], 0);
870 if (NULL == pak) {
871 return -ENOMEM;
872 }
873 /*
874 * Read 10 bytes from the received buffer to test its
875 * content for validity.
876 */
877 if (sizeof(buffer) != packet_read(pak,
878 sizeof(buffer), buffer,
879 sizeof(buffer), 0)) {
880 goto failexit;
881 }
882 /*
883 * Reset the packet read pointer so we can read all its
884 * contents again.
885 */
886 packet_reset(pak);
888 native_size = (__force __be32 *)(&buffer[4+2]);
889 /*
890 * Verify that the size of the packet is correct
891 * as indicated and that there's actually someone reading packets.
892 * The minimum size of the packet is '10' for tag, size indicator
893 * and ordinal.
894 */
895 if (size < 10 ||
896 be32_to_cpu(*native_size) != size ||
897 0 == dataex.has_opener) {
898 rc = -EINVAL;
899 goto failexit;
900 } else {
901 if ((rc = vtpm_queue_packet(pak)) < 0) {
902 goto failexit;
903 }
904 }
905 return 0;
907 failexit:
908 if (pak) {
909 tpm_send_fail_message(pak, buffer[4+1]);
910 packet_free(pak);
911 }
912 return rc;
913 }
916 /*
917 * Timeout function that gets invoked when a packet has not been processed
918 * during the timeout period.
919 * The packet must be on a list when this function is invoked. This
920 * also means that once its taken off a list, the timer must be
921 * destroyed as well.
922 */
923 static void processing_timeout(unsigned long ptr)
924 {
925 struct packet *pak = (struct packet *)ptr;
926 unsigned long flags;
927 write_lock_irqsave(&dataex.pak_lock, flags);
928 /*
929 * The packet needs to be searched whether it
930 * is still on the list.
931 */
932 if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
933 pak == packet_find_packet(&dataex.current_pak, pak) ) {
934 list_del(&pak->next);
935 tpm_send_fail_message(pak, pak->req_tag);
936 packet_free(pak);
937 }
939 write_unlock_irqrestore(&dataex.pak_lock, flags);
940 }
944 static void tpm_tx_action(unsigned long unused);
945 static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
947 #define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
949 static struct list_head tpm_schedule_list;
950 static spinlock_t tpm_schedule_list_lock;
952 static inline void
953 maybe_schedule_tx_action(void)
954 {
955 smp_mb();
956 tasklet_schedule(&tpm_tx_tasklet);
957 }
959 static inline int
960 __on_tpm_schedule_list(tpmif_t * tpmif)
961 {
962 return tpmif->list.next != NULL;
963 }
965 static void
966 remove_from_tpm_schedule_list(tpmif_t * tpmif)
967 {
968 spin_lock_irq(&tpm_schedule_list_lock);
969 if (likely(__on_tpm_schedule_list(tpmif))) {
970 list_del(&tpmif->list);
971 tpmif->list.next = NULL;
972 tpmif_put(tpmif);
973 }
974 spin_unlock_irq(&tpm_schedule_list_lock);
975 }
977 static void
978 add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
979 {
980 if (__on_tpm_schedule_list(tpmif))
981 return;
983 spin_lock_irq(&tpm_schedule_list_lock);
984 if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
985 list_add_tail(&tpmif->list, &tpm_schedule_list);
986 tpmif_get(tpmif);
987 }
988 spin_unlock_irq(&tpm_schedule_list_lock);
989 }
991 void
992 tpmif_schedule_work(tpmif_t * tpmif)
993 {
994 add_to_tpm_schedule_list_tail(tpmif);
995 maybe_schedule_tx_action();
996 }
998 void
999 tpmif_deschedule_work(tpmif_t * tpmif)
1001 remove_from_tpm_schedule_list(tpmif);
1005 static void
1006 tpm_tx_action(unsigned long unused)
1008 struct list_head *ent;
1009 tpmif_t *tpmif;
1010 tpmif_tx_request_t *tx;
1012 DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
1014 while (!list_empty(&tpm_schedule_list)) {
1015 /* Get a tpmif from the list with work to do. */
1016 ent = tpm_schedule_list.next;
1017 tpmif = list_entry(ent, tpmif_t, list);
1018 tpmif_get(tpmif);
1019 remove_from_tpm_schedule_list(tpmif);
1020 /*
1021 * Ensure that we see the request when we read from it.
1022 */
1023 mb();
1025 tx = &tpmif->tx->ring[0].req;
1027 /* pass it up */
1028 vtpm_receive(tpmif, tx->size);
1030 tpmif_put(tpmif);
1034 irqreturn_t
1035 tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
1037 tpmif_t *tpmif = dev_id;
1038 add_to_tpm_schedule_list_tail(tpmif);
1039 maybe_schedule_tx_action();
1040 return IRQ_HANDLED;
1043 static int __init
1044 tpmback_init(void)
1046 int rc;
1047 if (!(xen_start_info->flags & SIF_TPM_BE_DOMAIN) &&
1048 !(xen_start_info->flags & SIF_INITDOMAIN)) {
1049 printk(KERN_ALERT "Neither TPM-BE Domain nor INIT domain!\n");
1050 return 0;
1053 if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) {
1054 printk(KERN_ALERT "Could not register misc device for TPM BE.\n");
1055 return rc;
1058 INIT_LIST_HEAD(&dataex.pending_pak);
1059 INIT_LIST_HEAD(&dataex.current_pak);
1060 dataex.has_opener = 0;
1061 rwlock_init(&dataex.pak_lock);
1062 init_waitqueue_head(&dataex.wait_queue);
1064 spin_lock_init(&tpm_schedule_list_lock);
1065 INIT_LIST_HEAD(&tpm_schedule_list);
1067 tpmif_interface_init();
1068 tpmif_xenbus_init();
1070 printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
1072 return 0;
1075 __initcall(tpmback_init);