direct-io.hg

view linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c @ 8121:58d46463413e

GNTTABOP_map_grant_ref returns error status and handle as
separate fields. Update callers for new interface. Also
use int16_t as standard error code type on all public
interfaces.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Nov 30 17:24:27 2005 +0100 (2005-11-30)
parents fcb7e5616102
children fd9b2c1bb577
line source
1 /******************************************************************************
2 * drivers/xen/tpmback/tpmback.c
3 *
4 * Copyright (c) 2005, IBM Corporation
5 *
6 * Author: Stefan Berger, stefanb@us.ibm.com
7 * Grant table support: Mahadevan Gomathisankaran
8 *
9 * This code has been derived from drivers/xen/netback/netback.c
10 * Copyright (c) 2002-2004, K A Fraser
11 *
12 */
14 #include "common.h"
15 #include <asm-xen/evtchn.h>
17 #include <linux/types.h>
18 #include <linux/list.h>
19 #include <linux/miscdevice.h>
20 #include <linux/poll.h>
21 #include <asm/uaccess.h>
22 #include <asm-xen/xenbus.h>
23 #include <asm-xen/xen-public/grant_table.h>
26 /* local data structures */
27 struct data_exchange {
28 struct list_head pending_pak;
29 struct list_head current_pak;
30 unsigned int copied_so_far;
31 u8 has_opener;
32 rwlock_t pak_lock; // protects all of the previous fields
33 wait_queue_head_t wait_queue;
34 };
36 struct packet {
37 struct list_head next;
38 unsigned int data_len;
39 u8 *data_buffer;
40 tpmif_t *tpmif;
41 u32 tpm_instance;
42 u8 req_tag;
43 u32 last_read;
44 u8 flags;
45 struct timer_list processing_timer;
46 };
48 enum {
49 PACKET_FLAG_DISCARD_RESPONSE = 1,
50 PACKET_FLAG_CHECK_RESPONSESTATUS = 2,
51 };
53 static struct data_exchange dataex;
55 /* local function prototypes */
56 static int vtpm_queue_packet(struct packet *pak);
57 static int _packet_write(struct packet *pak,
58 const char *data, size_t size,
59 int userbuffer);
60 static void processing_timeout(unsigned long ptr);
61 static int packet_read_shmem(struct packet *pak,
62 tpmif_t *tpmif,
63 u32 offset,
64 char *buffer,
65 int isuserbuffer,
66 u32 left);
69 #define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
71 #define MIN(x,y) (x) < (y) ? (x) : (y)
74 /***************************************************************
75 Buffer copying
76 ***************************************************************/
77 static inline int
78 copy_from_buffer(void *to,
79 const void *from,
80 unsigned long size,
81 int userbuffer)
82 {
83 if (userbuffer) {
84 if (copy_from_user(to, from, size))
85 return -EFAULT;
86 } else {
87 memcpy(to, from, size);
88 }
89 return 0;
90 }
92 /***************************************************************
93 Packet-related functions
94 ***************************************************************/
96 static struct packet *
97 packet_find_instance(struct list_head *head, u32 tpm_instance)
98 {
99 struct packet *pak;
100 struct list_head *p;
101 /*
102 * traverse the list of packets and return the first
103 * one with the given instance number
104 */
105 list_for_each(p, head) {
106 pak = list_entry(p, struct packet, next);
107 if (pak->tpm_instance == tpm_instance) {
108 return pak;
109 }
110 }
111 return NULL;
112 }
114 static struct packet *
115 packet_find_packet(struct list_head *head, void *packet)
116 {
117 struct packet *pak;
118 struct list_head *p;
119 /*
120 * traverse the list of packets and return the first
121 * one with the given instance number
122 */
123 list_for_each(p, head) {
124 pak = list_entry(p, struct packet, next);
125 if (pak == packet) {
126 return pak;
127 }
128 }
129 return NULL;
130 }
132 static struct packet *
133 packet_alloc(tpmif_t *tpmif, u32 size, u8 req_tag, u8 flags)
134 {
135 struct packet *pak = NULL;
136 pak = kmalloc(sizeof(struct packet),
137 GFP_KERNEL);
138 if (NULL != pak) {
139 memset(pak, 0x0, sizeof(*pak));
140 if (tpmif) {
141 pak->tpmif = tpmif;
142 pak->tpm_instance = tpmif->tpm_instance;
143 }
144 pak->data_len = size;
145 pak->req_tag = req_tag;
146 pak->last_read = 0;
147 pak->flags = flags;
149 /*
150 * cannot do tpmif_get(tpmif); bad things happen
151 * on the last tpmif_put()
152 */
153 init_timer(&pak->processing_timer);
154 pak->processing_timer.function = processing_timeout;
155 pak->processing_timer.data = (unsigned long)pak;
156 }
157 return pak;
158 }
160 static void inline
161 packet_reset(struct packet *pak)
162 {
163 pak->last_read = 0;
164 }
166 static void inline
167 packet_free(struct packet *pak)
168 {
169 del_singleshot_timer_sync(&pak->processing_timer);
170 kfree(pak->data_buffer);
171 /*
172 * cannot do tpmif_put(pak->tpmif); bad things happen
173 * on the last tpmif_put()
174 */
175 kfree(pak);
176 }
178 static int
179 packet_set(struct packet *pak,
180 const unsigned char *buffer, u32 size)
181 {
182 int rc = 0;
183 unsigned char *buf = kmalloc(size, GFP_KERNEL);
184 if (NULL != buf) {
185 pak->data_buffer = buf;
186 memcpy(buf, buffer, size);
187 pak->data_len = size;
188 } else {
189 rc = -ENOMEM;
190 }
191 return rc;
192 }
195 /*
196 * Write data to the shared memory and send it to the FE.
197 */
198 static int
199 packet_write(struct packet *pak,
200 const char *data, size_t size,
201 int userbuffer)
202 {
203 int rc = 0;
205 DPRINTK("Supposed to send %d bytes to front-end!\n",
206 size);
208 if (0 != (pak->flags & PACKET_FLAG_CHECK_RESPONSESTATUS)) {
209 #ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
210 u32 res;
211 if (copy_from_buffer(&res,
212 &data[2+4],
213 sizeof(res),
214 userbuffer)) {
215 return -EFAULT;
216 }
218 if (res != 0) {
219 /*
220 * Close down this device. Should have the
221 * FE notified about closure.
222 */
223 if (!pak->tpmif) {
224 return -EFAULT;
225 }
226 pak->tpmif->status = DISCONNECTING;
227 }
228 #endif
229 }
231 if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
232 /* Don't send a respone to this packet. Just acknowledge it. */
233 rc = size;
234 } else {
235 rc = _packet_write(pak, data, size, userbuffer);
236 }
238 return rc;
239 }
242 static int
243 _packet_write(struct packet *pak,
244 const char *data, size_t size,
245 int userbuffer)
246 {
247 /*
248 * Write into the shared memory pages directly
249 * and send it to the front end.
250 */
251 tpmif_t *tpmif = pak->tpmif;
252 grant_handle_t handle;
253 int rc = 0;
254 unsigned int i = 0;
255 unsigned int offset = 0;
257 if (tpmif == NULL) {
258 return -EFAULT;
259 }
261 if (tpmif->status == DISCONNECTED) {
262 return size;
263 }
265 while (offset < size && i < TPMIF_TX_RING_SIZE) {
266 unsigned int tocopy;
267 struct gnttab_map_grant_ref map_op;
268 struct gnttab_unmap_grant_ref unmap_op;
269 tpmif_tx_request_t *tx;
271 tx = &tpmif->tx->ring[i].req;
273 if (0 == tx->addr) {
274 DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
275 return 0;
276 }
278 map_op.host_addr = MMAP_VADDR(tpmif, i);
279 map_op.flags = GNTMAP_host_map;
280 map_op.ref = tx->ref;
281 map_op.dom = tpmif->domid;
283 if(unlikely(
284 HYPERVISOR_grant_table_op(
285 GNTTABOP_map_grant_ref,
286 &map_op,
287 1))) {
288 BUG();
289 }
291 handle = map_op.handle;
293 if (map_op.status) {
294 DPRINTK(" Grant table operation failure !\n");
295 return 0;
296 }
297 set_phys_to_machine(__pa(MMAP_VADDR(tpmif,i)) >> PAGE_SHIFT,
298 FOREIGN_FRAME(map_op.dev_bus_addr >> PAGE_SHIFT));
300 tocopy = MIN(size - offset, PAGE_SIZE);
302 if (copy_from_buffer((void *)(MMAP_VADDR(tpmif,i)|
303 (tx->addr & ~PAGE_MASK)),
304 &data[offset],
305 tocopy,
306 userbuffer)) {
307 tpmif_put(tpmif);
308 return -EFAULT;
309 }
310 tx->size = tocopy;
312 unmap_op.host_addr = MMAP_VADDR(tpmif, i);
313 unmap_op.handle = handle;
314 unmap_op.dev_bus_addr = 0;
316 if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
317 &unmap_op,
318 1))) {
319 BUG();
320 }
322 offset += tocopy;
323 i++;
324 }
326 rc = offset;
327 DPRINTK("Notifying frontend via irq %d\n",
328 tpmif->irq);
329 notify_remote_via_irq(tpmif->irq);
331 return rc;
332 }
334 /*
335 * Read data from the shared memory and copy it directly into the
336 * provided buffer. Advance the read_last indicator which tells
337 * how many bytes have already been read.
338 */
339 static int
340 packet_read(struct packet *pak, size_t numbytes,
341 char *buffer, size_t buffersize,
342 int userbuffer)
343 {
344 tpmif_t *tpmif = pak->tpmif;
345 /*
346 * I am supposed to read 'numbytes' of data from the
347 * buffer.
348 * The first 4 bytes that are read are the instance number in
349 * network byte order, after that comes the data from the
350 * shared memory buffer.
351 */
352 u32 to_copy;
353 u32 offset = 0;
354 u32 room_left = buffersize;
355 /*
356 * Ensure that we see the request when we copy it.
357 */
358 mb();
360 if (pak->last_read < 4) {
361 /*
362 * copy the instance number into the buffer
363 */
364 u32 instance_no = htonl(pak->tpm_instance);
365 u32 last_read = pak->last_read;
366 to_copy = MIN(4 - last_read, numbytes);
368 if (userbuffer) {
369 if (copy_to_user(&buffer[0],
370 &(((u8 *)&instance_no)[last_read]),
371 to_copy)) {
372 return -EFAULT;
373 }
374 } else {
375 memcpy(&buffer[0],
376 &(((u8 *)&instance_no)[last_read]),
377 to_copy);
378 }
380 pak->last_read += to_copy;
381 offset += to_copy;
382 room_left -= to_copy;
383 }
385 /*
386 * If the packet has a data buffer appended, read from it...
387 */
389 if (room_left > 0) {
390 if (pak->data_buffer) {
391 u32 to_copy = MIN(pak->data_len - offset, room_left);
392 u32 last_read = pak->last_read - 4;
393 if (userbuffer) {
394 if (copy_to_user(&buffer[offset],
395 &pak->data_buffer[last_read],
396 to_copy)) {
397 return -EFAULT;
398 }
399 } else {
400 memcpy(&buffer[offset],
401 &pak->data_buffer[last_read],
402 to_copy);
403 }
404 pak->last_read += to_copy;
405 offset += to_copy;
406 } else {
407 offset = packet_read_shmem(pak,
408 tpmif,
409 offset,
410 buffer,
411 userbuffer,
412 room_left);
413 }
414 }
415 return offset;
416 }
419 static int
420 packet_read_shmem(struct packet *pak,
421 tpmif_t *tpmif,
422 u32 offset,
423 char *buffer,
424 int isuserbuffer,
425 u32 room_left) {
426 u32 last_read = pak->last_read - 4;
427 u32 i = (last_read / PAGE_SIZE);
428 u32 pg_offset = last_read & (PAGE_SIZE - 1);
429 u32 to_copy;
430 grant_handle_t handle;
432 tpmif_tx_request_t *tx;
433 tx = &tpmif->tx->ring[0].req;
434 /*
435 * Start copying data at the page with index 'index'
436 * and within that page at offset 'offset'.
437 * Copy a maximum of 'room_left' bytes.
438 */
439 to_copy = MIN(PAGE_SIZE - pg_offset, room_left);
440 while (to_copy > 0) {
441 void *src;
442 struct gnttab_map_grant_ref map_op;
443 struct gnttab_unmap_grant_ref unmap_op;
445 tx = &tpmif->tx->ring[i].req;
447 map_op.host_addr = MMAP_VADDR(tpmif, i);
448 map_op.flags = GNTMAP_host_map;
449 map_op.ref = tx->ref;
450 map_op.dom = tpmif->domid;
452 if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
453 &map_op,
454 1))) {
455 BUG();
456 }
458 if (map_op.status) {
459 DPRINTK(" Grant table operation failure !\n");
460 return -EFAULT;
461 }
463 handle = map_op.handle;
465 if (to_copy > tx->size) {
466 /*
467 * This is the case when the user wants to read more
468 * than what we have. So we just give him what we
469 * have.
470 */
471 to_copy = MIN(tx->size, to_copy);
472 }
474 DPRINTK("Copying from mapped memory at %08lx\n",
475 (unsigned long)(MMAP_VADDR(tpmif,i) |
476 (tx->addr & ~PAGE_MASK)));
478 src = (void *)(MMAP_VADDR(tpmif,i) | ((tx->addr & ~PAGE_MASK) + pg_offset));
479 if (isuserbuffer) {
480 if (copy_to_user(&buffer[offset],
481 src,
482 to_copy)) {
483 return -EFAULT;
484 }
485 } else {
486 memcpy(&buffer[offset],
487 src,
488 to_copy);
489 }
492 DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
493 tpmif->domid, buffer[offset], buffer[offset+1],buffer[offset+2],buffer[offset+3]);
495 unmap_op.host_addr = MMAP_VADDR(tpmif, i);
496 unmap_op.handle = handle;
497 unmap_op.dev_bus_addr = 0;
499 if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
500 &unmap_op,
501 1))) {
502 BUG();
503 }
505 offset += to_copy;
506 pg_offset = 0;
507 last_read += to_copy;
508 room_left -= to_copy;
510 to_copy = MIN(PAGE_SIZE, room_left);
511 i++;
512 } /* while (to_copy > 0) */
513 /*
514 * Adjust the last_read pointer
515 */
516 pak->last_read = last_read + 4;
517 return offset;
518 }
521 /* ============================================================
522 * The file layer for reading data from this device
523 * ============================================================
524 */
525 static int
526 vtpm_op_open(struct inode *inode, struct file *f)
527 {
528 int rc = 0;
529 unsigned long flags;
531 write_lock_irqsave(&dataex.pak_lock, flags);
532 if (dataex.has_opener == 0) {
533 dataex.has_opener = 1;
534 } else {
535 rc = -EPERM;
536 }
537 write_unlock_irqrestore(&dataex.pak_lock, flags);
538 return rc;
539 }
541 static ssize_t
542 vtpm_op_read(struct file *file,
543 char __user * data, size_t size, loff_t * offset)
544 {
545 int ret_size = -ENODATA;
546 struct packet *pak = NULL;
547 unsigned long flags;
549 write_lock_irqsave(&dataex.pak_lock, flags);
551 if (list_empty(&dataex.pending_pak)) {
552 write_unlock_irqrestore(&dataex.pak_lock, flags);
553 wait_event_interruptible(dataex.wait_queue,
554 !list_empty(&dataex.pending_pak));
555 write_lock_irqsave(&dataex.pak_lock, flags);
556 }
558 if (!list_empty(&dataex.pending_pak)) {
559 unsigned int left;
560 pak = list_entry(dataex.pending_pak.next, struct packet, next);
562 left = pak->data_len - dataex.copied_so_far;
564 DPRINTK("size given by app: %d, available: %d\n", size, left);
566 ret_size = MIN(size,left);
568 ret_size = packet_read(pak, ret_size, data, size, 1);
569 if (ret_size < 0) {
570 ret_size = -EFAULT;
571 } else {
572 DPRINTK("Copied %d bytes to user buffer\n", ret_size);
574 dataex.copied_so_far += ret_size;
575 if (dataex.copied_so_far >= pak->data_len + 4) {
576 DPRINTK("All data from this packet given to app.\n");
577 /* All data given to app */
579 del_singleshot_timer_sync(&pak->processing_timer);
580 list_del(&pak->next);
581 list_add_tail(&pak->next, &dataex.current_pak);
582 /*
583 * The more fontends that are handled at the same time,
584 * the more time we give the TPM to process the request.
585 */
586 mod_timer(&pak->processing_timer,
587 jiffies + (num_frontends * 60 * HZ));
588 dataex.copied_so_far = 0;
589 }
590 }
591 }
592 write_unlock_irqrestore(&dataex.pak_lock, flags);
594 DPRINTK("Returning result from read to app: %d\n", ret_size);
596 return ret_size;
597 }
599 /*
600 * Write operation - only works after a previous read operation!
601 */
602 static ssize_t
603 vtpm_op_write(struct file *file, const char __user * data, size_t size,
604 loff_t * offset)
605 {
606 struct packet *pak;
607 int rc = 0;
608 unsigned int off = 4;
609 unsigned long flags;
610 u32 instance_no = 0;
611 u32 len_no = 0;
613 /*
614 * Minimum required packet size is:
615 * 4 bytes for instance number
616 * 2 bytes for tag
617 * 4 bytes for paramSize
618 * 4 bytes for the ordinal
619 * sum: 14 bytes
620 */
621 if ( size < off + 10 ) {
622 return -EFAULT;
623 }
625 if (copy_from_user(&instance_no,
626 (void __user *)&data[0],
627 4)) {
628 return -EFAULT;
629 }
631 if (copy_from_user(&len_no,
632 (void __user *)&data[off+2],
633 4) ||
634 (off + ntohl(len_no) != size)) {
635 return -EFAULT;
636 }
638 write_lock_irqsave(&dataex.pak_lock, flags);
639 pak = packet_find_instance(&dataex.current_pak, ntohl(instance_no));
641 if (pak == NULL) {
642 write_unlock_irqrestore(&dataex.pak_lock, flags);
643 printk(KERN_ALERT "No associated packet!\n");
644 return -EFAULT;
645 } else {
646 del_singleshot_timer_sync(&pak->processing_timer);
647 list_del(&pak->next);
648 }
650 write_unlock_irqrestore(&dataex.pak_lock, flags);
652 /*
653 * The first 'offset' bytes must be the instance number.
654 * I will just pull that from the packet.
655 */
656 size -= off;
657 data = &data[off];
659 rc = packet_write(pak, data, size, 1);
661 if (rc > 0) {
662 /* I neglected the first 4 bytes */
663 rc += off;
664 }
665 packet_free(pak);
666 return rc;
667 }
669 static int
670 vtpm_op_release(struct inode *inode, struct file *file)
671 {
672 unsigned long flags;
673 vtpm_release_packets(NULL, 1);
674 write_lock_irqsave(&dataex.pak_lock, flags);
675 dataex.has_opener = 0;
676 write_unlock_irqrestore(&dataex.pak_lock, flags);
677 return 0;
678 }
680 static unsigned int
681 vtpm_op_poll(struct file *file, struct poll_table_struct *pts)
682 {
683 unsigned int flags = POLLOUT | POLLWRNORM;
684 poll_wait(file, &dataex.wait_queue, pts);
685 if (!list_empty(&dataex.pending_pak)) {
686 flags |= POLLIN | POLLRDNORM;
687 }
688 return flags;
689 }
691 static struct file_operations vtpm_ops = {
692 .owner = THIS_MODULE,
693 .llseek = no_llseek,
694 .open = vtpm_op_open,
695 .read = vtpm_op_read,
696 .write = vtpm_op_write,
697 .release = vtpm_op_release,
698 .poll = vtpm_op_poll,
699 };
701 static struct miscdevice ibmvtpms_miscdevice = {
702 .minor = 225,
703 .name = "vtpm",
704 .fops = &vtpm_ops,
705 };
708 /***************************************************************
709 Virtual TPM functions and data stuctures
710 ***************************************************************/
712 static u8 create_cmd[] = {
713 1,193, /* 0: TPM_TAG_RQU_COMMAMD */
714 0,0,0,19, /* 2: length */
715 0,0,0,0x1, /* 6: VTPM_ORD_OPEN */
716 0, /* 10: VTPM type */
717 0,0,0,0, /* 11: domain id */
718 0,0,0,0 /* 15: instance id */
719 };
721 static u8 destroy_cmd[] = {
722 1,193, /* 0: TPM_TAG_RQU_COMMAMD */
723 0,0,0,14, /* 2: length */
724 0,0,0,0x2, /* 6: VTPM_ORD_CLOSE */
725 0,0,0,0 /* 10: instance id */
726 };
728 int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domid, u32 instance)
729 {
730 int rc = 0;
731 struct packet *pak;
733 pak = packet_alloc(tpmif,
734 sizeof(create_cmd),
735 create_cmd[0],
736 PACKET_FLAG_DISCARD_RESPONSE|
737 PACKET_FLAG_CHECK_RESPONSESTATUS);
738 if (pak) {
739 u8 buf[sizeof(create_cmd)];
740 u32 domid_no = htonl((u32)domid);
741 u32 instance_no = htonl(instance);
742 memcpy(buf, create_cmd, sizeof(create_cmd));
744 memcpy(&buf[11], &domid_no, sizeof(u32));
745 memcpy(&buf[15], &instance_no, sizeof(u32));
747 /* copy the buffer into the packet */
748 rc = packet_set(pak, buf, sizeof(buf));
750 if (rc == 0) {
751 pak->tpm_instance = 0;
752 rc = vtpm_queue_packet(pak);
753 }
754 if (rc < 0) {
755 /* could not be queued or built */
756 packet_free(pak);
757 }
758 } else {
759 rc = -ENOMEM;
760 }
761 return rc;
762 }
764 int tpmif_vtpm_close(u32 instid)
765 {
766 int rc = 0;
767 struct packet *pak;
769 pak = packet_alloc(NULL,
770 sizeof(create_cmd),
771 create_cmd[0],
772 PACKET_FLAG_DISCARD_RESPONSE);
773 if (pak) {
774 u8 buf[sizeof(destroy_cmd)];
775 u32 instid_no = htonl(instid);
776 memcpy(buf, destroy_cmd, sizeof(destroy_cmd));
777 memcpy(&buf[10], &instid_no, sizeof(u32));
779 /* copy the buffer into the packet */
780 rc = packet_set(pak, buf, sizeof(buf));
782 if (rc == 0) {
783 pak->tpm_instance = 0;
784 rc = vtpm_queue_packet(pak);
785 }
786 if (rc < 0) {
787 /* could not be queued or built */
788 packet_free(pak);
789 }
790 } else {
791 rc = -ENOMEM;
792 }
793 return rc;
794 }
797 /***************************************************************
798 Utility functions
799 ***************************************************************/
801 static int
802 tpm_send_fail_message(struct packet *pak, u8 req_tag)
803 {
804 int rc;
805 static const unsigned char tpm_error_message_fail[] = {
806 0x00, 0x00,
807 0x00, 0x00, 0x00, 0x0a,
808 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
809 };
810 unsigned char buffer[sizeof(tpm_error_message_fail)];
812 memcpy(buffer, tpm_error_message_fail, sizeof(tpm_error_message_fail));
813 /*
814 * Insert the right response tag depending on the given tag
815 * All response tags are '+3' to the request tag.
816 */
817 buffer[1] = req_tag + 3;
819 /*
820 * Write the data to shared memory and notify the front-end
821 */
822 rc = packet_write(pak, buffer, sizeof(buffer), 0);
824 return rc;
825 }
828 static void
829 _vtpm_release_packets(struct list_head *head, tpmif_t *tpmif,
830 int send_msgs)
831 {
832 struct packet *pak;
833 struct list_head *pos, *tmp;
835 list_for_each_safe(pos, tmp, head) {
836 pak = list_entry(pos, struct packet, next);
837 if (tpmif == NULL || pak->tpmif == tpmif) {
838 int can_send = 0;
839 del_singleshot_timer_sync(&pak->processing_timer);
840 list_del(&pak->next);
842 if (pak->tpmif && pak->tpmif->status == CONNECTED) {
843 can_send = 1;
844 }
846 if (send_msgs && can_send) {
847 tpm_send_fail_message(pak, pak->req_tag);
848 }
849 packet_free(pak);
850 }
851 }
852 }
855 int
856 vtpm_release_packets(tpmif_t *tpmif, int send_msgs)
857 {
858 unsigned long flags;
860 write_lock_irqsave(&dataex.pak_lock, flags);
862 _vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs);
863 _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
865 write_unlock_irqrestore(&dataex.pak_lock,
866 flags);
867 return 0;
868 }
871 static int vtpm_queue_packet(struct packet *pak)
872 {
873 int rc = 0;
874 if (dataex.has_opener) {
875 unsigned long flags;
876 write_lock_irqsave(&dataex.pak_lock, flags);
877 list_add_tail(&pak->next, &dataex.pending_pak);
878 /* give the TPM some time to pick up the request */
879 mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
880 write_unlock_irqrestore(&dataex.pak_lock,
881 flags);
883 wake_up_interruptible(&dataex.wait_queue);
884 } else {
885 rc = -EFAULT;
886 }
887 return rc;
888 }
891 static int vtpm_receive(tpmif_t *tpmif, u32 size)
892 {
893 int rc = 0;
894 unsigned char buffer[10];
895 __be32 *native_size;
897 struct packet *pak = packet_alloc(tpmif, size, buffer[4], 0);
898 if (NULL == pak) {
899 return -ENOMEM;
900 }
901 /*
902 * Read 10 bytes from the received buffer to test its
903 * content for validity.
904 */
905 if (sizeof(buffer) != packet_read(pak,
906 sizeof(buffer), buffer,
907 sizeof(buffer), 0)) {
908 goto failexit;
909 }
910 /*
911 * Reset the packet read pointer so we can read all its
912 * contents again.
913 */
914 packet_reset(pak);
916 native_size = (__force __be32 *)(&buffer[4+2]);
917 /*
918 * Verify that the size of the packet is correct
919 * as indicated and that there's actually someone reading packets.
920 * The minimum size of the packet is '10' for tag, size indicator
921 * and ordinal.
922 */
923 if (size < 10 ||
924 be32_to_cpu(*native_size) != size ||
925 0 == dataex.has_opener ||
926 tpmif->status != CONNECTED) {
927 rc = -EINVAL;
928 goto failexit;
929 } else {
930 if ((rc = vtpm_queue_packet(pak)) < 0) {
931 goto failexit;
932 }
933 }
934 return 0;
936 failexit:
937 if (pak) {
938 tpm_send_fail_message(pak, buffer[4+1]);
939 packet_free(pak);
940 }
941 return rc;
942 }
945 /*
946 * Timeout function that gets invoked when a packet has not been processed
947 * during the timeout period.
948 * The packet must be on a list when this function is invoked. This
949 * also means that once its taken off a list, the timer must be
950 * destroyed as well.
951 */
952 static void processing_timeout(unsigned long ptr)
953 {
954 struct packet *pak = (struct packet *)ptr;
955 unsigned long flags;
956 write_lock_irqsave(&dataex.pak_lock, flags);
957 /*
958 * The packet needs to be searched whether it
959 * is still on the list.
960 */
961 if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
962 pak == packet_find_packet(&dataex.current_pak, pak) ) {
963 list_del(&pak->next);
964 tpm_send_fail_message(pak, pak->req_tag);
965 packet_free(pak);
966 }
968 write_unlock_irqrestore(&dataex.pak_lock, flags);
969 }
973 static void tpm_tx_action(unsigned long unused);
974 static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
976 #define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
978 static struct list_head tpm_schedule_list;
979 static spinlock_t tpm_schedule_list_lock;
981 static inline void
982 maybe_schedule_tx_action(void)
983 {
984 smp_mb();
985 tasklet_schedule(&tpm_tx_tasklet);
986 }
988 static inline int
989 __on_tpm_schedule_list(tpmif_t * tpmif)
990 {
991 return tpmif->list.next != NULL;
992 }
994 static void
995 remove_from_tpm_schedule_list(tpmif_t * tpmif)
996 {
997 spin_lock_irq(&tpm_schedule_list_lock);
998 if (likely(__on_tpm_schedule_list(tpmif))) {
999 list_del(&tpmif->list);
1000 tpmif->list.next = NULL;
1001 tpmif_put(tpmif);
1003 spin_unlock_irq(&tpm_schedule_list_lock);
1006 static void
1007 add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
1009 if (__on_tpm_schedule_list(tpmif))
1010 return;
1012 spin_lock_irq(&tpm_schedule_list_lock);
1013 if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
1014 list_add_tail(&tpmif->list, &tpm_schedule_list);
1015 tpmif_get(tpmif);
1017 spin_unlock_irq(&tpm_schedule_list_lock);
1020 void
1021 tpmif_schedule_work(tpmif_t * tpmif)
1023 add_to_tpm_schedule_list_tail(tpmif);
1024 maybe_schedule_tx_action();
1027 void
1028 tpmif_deschedule_work(tpmif_t * tpmif)
1030 remove_from_tpm_schedule_list(tpmif);
1034 static void
1035 tpm_tx_action(unsigned long unused)
1037 struct list_head *ent;
1038 tpmif_t *tpmif;
1039 tpmif_tx_request_t *tx;
1041 DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
1043 while (!list_empty(&tpm_schedule_list)) {
1044 /* Get a tpmif from the list with work to do. */
1045 ent = tpm_schedule_list.next;
1046 tpmif = list_entry(ent, tpmif_t, list);
1047 tpmif_get(tpmif);
1048 remove_from_tpm_schedule_list(tpmif);
1049 /*
1050 * Ensure that we see the request when we read from it.
1051 */
1052 mb();
1054 tx = &tpmif->tx->ring[0].req;
1056 /* pass it up */
1057 vtpm_receive(tpmif, tx->size);
1059 tpmif_put(tpmif);
1063 irqreturn_t
1064 tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
1066 tpmif_t *tpmif = dev_id;
1067 add_to_tpm_schedule_list_tail(tpmif);
1068 maybe_schedule_tx_action();
1069 return IRQ_HANDLED;
1072 static int __init
1073 tpmback_init(void)
1075 int rc;
1077 if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) {
1078 printk(KERN_ALERT "Could not register misc device for TPM BE.\n");
1079 return rc;
1082 INIT_LIST_HEAD(&dataex.pending_pak);
1083 INIT_LIST_HEAD(&dataex.current_pak);
1084 dataex.has_opener = 0;
1085 rwlock_init(&dataex.pak_lock);
1086 init_waitqueue_head(&dataex.wait_queue);
1088 spin_lock_init(&tpm_schedule_list_lock);
1089 INIT_LIST_HEAD(&tpm_schedule_list);
1091 tpmif_interface_init();
1092 tpmif_xenbus_init();
1094 printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
1096 return 0;
1099 __initcall(tpmback_init);
1101 /*
1102 * Local variables:
1103 * c-file-style: "linux"
1104 * indent-tabs-mode: t
1105 * c-indent-level: 8
1106 * c-basic-offset: 8
1107 * tab-width: 8
1108 * End:
1109 */