ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c @ 8786:e9d78d8bd568

Clean up the TPM stack a bit.

Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
author cl349@firebug.cl.cam.ac.uk
date Tue Feb 07 20:38:44 2006 +0000 (2006-02-07)
parents 514f2585d102
children 201d48272a57
line source
1 /*
2 * Copyright (C) 2004 IBM Corporation
3 *
4 * Authors:
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Dave Safford <safford@watson.ibm.com>
7 * Reiner Sailer <sailer@watson.ibm.com>
8 * Kylene Hall <kjhall@us.ibm.com>
9 * Stefan Berger <stefanb@us.ibm.com>
10 *
11 * Maintained by: <tpmdd_devel@lists.sourceforge.net>
12 *
13 * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
14 * Specifications at www.trustedcomputinggroup.org
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
19 * License.
20 *
21 */
23 #include <asm/uaccess.h>
24 #include <linux/list.h>
25 #include <xen/tpmfe.h>
26 #include <linux/device.h>
27 #include <linux/interrupt.h>
28 #include "tpm.h"
30 /* read status bits */
31 enum {
32 STATUS_BUSY = 0x01,
33 STATUS_DATA_AVAIL = 0x02,
34 STATUS_READY = 0x04
35 };
37 #define MIN(x,y) ((x) < (y)) ? (x) : (y)
39 struct transmission {
40 struct list_head next;
41 unsigned char *request;
42 unsigned int request_len;
43 unsigned char *rcv_buffer;
44 unsigned int buffersize;
45 unsigned int flags;
46 };
48 enum {
49 TRANSMISSION_FLAG_WAS_QUEUED = 0x1
50 };
52 struct data_exchange {
53 struct transmission *current_request;
54 spinlock_t req_list_lock;
55 wait_queue_head_t req_wait_queue;
57 struct list_head queued_requests;
59 struct transmission *current_response;
60 spinlock_t resp_list_lock;
61 wait_queue_head_t resp_wait_queue; // processes waiting for responses
63 struct transmission *req_cancelled; // if a cancellation was encounterd
65 unsigned int fe_status;
66 unsigned int flags;
67 };
69 enum {
70 DATAEX_FLAG_QUEUED_ONLY = 0x1
71 };
73 static struct data_exchange dataex;
75 static unsigned long disconnect_time;
77 static struct tpmfe_device tpmfe;
79 /* local function prototypes */
80 static void __exit cleanup_xen(void);
83 /* =============================================================
84 * Some utility functions
85 * =============================================================
86 */
87 static inline struct transmission *
88 transmission_alloc(void)
89 {
90 return kzalloc(sizeof(struct transmission), GFP_KERNEL);
91 }
93 static inline unsigned char *
94 transmission_set_buffer(struct transmission *t,
95 unsigned char *buffer, unsigned int len)
96 {
97 kfree(t->request);
98 t->request = kmalloc(len, GFP_KERNEL);
99 if (t->request) {
100 memcpy(t->request,
101 buffer,
102 len);
103 t->request_len = len;
104 }
105 return t->request;
106 }
108 static inline void
109 transmission_free(struct transmission *t)
110 {
111 kfree(t->request);
112 kfree(t->rcv_buffer);
113 kfree(t);
114 }
116 /* =============================================================
117 * Interface with the TPM shared memory driver for XEN
118 * =============================================================
119 */
120 static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
121 {
122 int ret_size = 0;
123 struct transmission *t;
125 /*
126 * The list with requests must contain one request
127 * only and the element there must be the one that
128 * was passed to me from the front-end.
129 */
130 if (dataex.current_request != ptr) {
131 printk("WARNING: The request pointer is different than the "
132 "pointer the shared memory driver returned to me. "
133 "%p != %p\n",
134 dataex.current_request, ptr);
135 }
137 /*
138 * If the request has been cancelled, just quit here
139 */
140 if (dataex.req_cancelled == (struct transmission *)ptr) {
141 if (dataex.current_request == dataex.req_cancelled) {
142 dataex.current_request = NULL;
143 }
144 transmission_free(dataex.req_cancelled);
145 dataex.req_cancelled = NULL;
146 return 0;
147 }
149 if (NULL != (t = dataex.current_request)) {
150 transmission_free(t);
151 dataex.current_request = NULL;
152 }
154 t = transmission_alloc();
155 if (t) {
156 unsigned long flags;
157 t->rcv_buffer = kmalloc(count, GFP_KERNEL);
158 if (! t->rcv_buffer) {
159 transmission_free(t);
160 return -ENOMEM;
161 }
162 t->buffersize = count;
163 memcpy(t->rcv_buffer, buffer, count);
164 ret_size = count;
166 spin_lock_irqsave(&dataex.resp_list_lock ,flags);
167 dataex.current_response = t;
168 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
169 wake_up_interruptible(&dataex.resp_wait_queue);
170 }
171 return ret_size;
172 }
175 static void tpm_fe_status(unsigned int flags)
176 {
177 dataex.fe_status = flags;
178 if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
179 disconnect_time = jiffies;
180 }
181 }
183 /* =============================================================
184 * Interface with the generic TPM driver
185 * =============================================================
186 */
187 static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
188 {
189 unsigned long flags;
190 int rc = 0;
192 spin_lock_irqsave(&dataex.resp_list_lock, flags);
193 /*
194 * Check if the previous operation only queued the command
195 * In this case there won't be a response, so I just
196 * return from here and reset that flag. In any other
197 * case I should receive a response from the back-end.
198 */
199 if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
200 dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
201 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
202 /*
203 * a little hack here. The first few measurements
204 * are queued since there's no way to talk to the
205 * TPM yet (due to slowness of the control channel)
206 * So we just make IMA happy by giving it 30 NULL
207 * bytes back where the most important part is
208 * that the result code is '0'.
209 */
211 count = MIN(count, 30);
212 memset(buf, 0x0, count);
213 return count;
214 }
215 /*
216 * Check whether something is in the responselist and if
217 * there's nothing in the list wait for something to appear.
218 */
220 if (NULL == dataex.current_response) {
221 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
222 interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
223 1000);
224 spin_lock_irqsave(&dataex.resp_list_lock ,flags);
225 }
227 if (NULL != dataex.current_response) {
228 struct transmission *t = dataex.current_response;
229 dataex.current_response = NULL;
230 rc = MIN(count, t->buffersize);
231 memcpy(buf, t->rcv_buffer, rc);
232 transmission_free(t);
233 }
235 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
236 return rc;
237 }
239 static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
240 {
241 /*
242 * We simply pass the packet onto the XEN shared
243 * memory driver.
244 */
245 unsigned long flags;
246 int rc;
247 struct transmission *t = transmission_alloc();
249 spin_lock_irqsave(&dataex.req_list_lock, flags);
250 /*
251 * If there's a current request, it must be the
252 * previous request that has timed out.
253 */
254 if (dataex.current_request != NULL) {
255 printk("WARNING: Sending although there is a request outstanding.\n"
256 " Previous request must have timed out.\n");
257 transmission_free(dataex.current_request);
258 dataex.current_request = NULL;
259 }
261 if (t != NULL) {
262 unsigned int error = 0;
263 /*
264 * Queue the packet if the driver below is not
265 * ready, yet, or there is any packet already
266 * in the queue.
267 * If the driver below is ready, unqueue all
268 * packets first before sending our current
269 * packet.
270 * For each unqueued packet, except for the
271 * last (=current) packet, call the function
272 * tpm_xen_recv to wait for the response to come
273 * back.
274 */
275 if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
276 if (time_after(jiffies, disconnect_time + HZ * 10)) {
277 rc = -ENOENT;
278 } else {
279 /*
280 * copy the request into the buffer
281 */
282 if (transmission_set_buffer(t, buf, count)
283 == NULL) {
284 transmission_free(t);
285 rc = -ENOMEM;
286 goto exit;
287 }
288 dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
289 list_add_tail(&t->next, &dataex.queued_requests);
290 rc = 0;
291 }
292 } else {
293 /*
294 * Check whether there are any packets in the queue
295 */
296 while (!list_empty(&dataex.queued_requests)) {
297 /*
298 * Need to dequeue them.
299 * Read the result into a dummy buffer.
300 */
301 unsigned char buffer[1];
302 struct transmission *qt = (struct transmission *) dataex.queued_requests.next;
303 list_del(&qt->next);
304 dataex.current_request = qt;
305 spin_unlock_irqrestore(&dataex.req_list_lock,
306 flags);
308 rc = tpm_fe_send(tpmfe.tpm_private,
309 qt->request,
310 qt->request_len,
311 qt);
313 if (rc < 0) {
314 spin_lock_irqsave(&dataex.req_list_lock, flags);
315 if ((qt = dataex.current_request) != NULL) {
316 /*
317 * requeue it at the beginning
318 * of the list
319 */
320 list_add(&qt->next,
321 &dataex.queued_requests);
322 }
323 dataex.current_request = NULL;
324 error = 1;
325 break;
326 }
327 /*
328 * After this point qt is not valid anymore!
329 * It is freed when the front-end is delivering the data
330 * by calling tpm_recv
331 */
333 /*
334 * Try to receive the response now into the provided dummy
335 * buffer (I don't really care about this response since
336 * there is no receiver anymore for this response)
337 */
338 rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
340 spin_lock_irqsave(&dataex.req_list_lock, flags);
341 }
343 if (error == 0) {
344 /*
345 * Finally, send the current request.
346 */
347 dataex.current_request = t;
348 /*
349 * Call the shared memory driver
350 * Pass to it the buffer with the request, the
351 * amount of bytes in the request and
352 * a void * pointer (here: transmission structure)
353 */
354 rc = tpm_fe_send(tpmfe.tpm_private,
355 buf, count, t);
356 /*
357 * The generic TPM driver will call
358 * the function to receive the response.
359 */
360 if (rc < 0) {
361 dataex.current_request = NULL;
362 goto queue_it;
363 }
364 } else {
365 queue_it:
366 if (transmission_set_buffer(t, buf, count) == NULL) {
367 transmission_free(t);
368 rc = -ENOMEM;
369 goto exit;
370 }
371 /*
372 * An error occurred. Don't event try
373 * to send the current request. Just
374 * queue it.
375 */
376 dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
377 list_add_tail(&t->next,
378 &dataex.queued_requests);
379 rc = 0;
380 }
381 }
382 } else {
383 rc = -ENOMEM;
384 }
386 exit:
387 spin_unlock_irqrestore(&dataex.req_list_lock, flags);
388 return rc;
389 }
391 static void tpm_xen_cancel(struct tpm_chip *chip)
392 {
393 unsigned long flags;
394 spin_lock_irqsave(&dataex.resp_list_lock,flags);
396 dataex.req_cancelled = dataex.current_request;
398 spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
399 }
401 static u8 tpm_xen_status(struct tpm_chip *chip)
402 {
403 unsigned long flags;
404 u8 rc = 0;
405 spin_lock_irqsave(&dataex.resp_list_lock, flags);
406 /*
407 * Data are available if:
408 * - there's a current response
409 * - the last packet was queued only (this is fake, but necessary to
410 * get the generic TPM layer to call the receive function.)
411 */
412 if (NULL != dataex.current_response ||
413 0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
414 rc = STATUS_DATA_AVAIL;
415 }
416 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
417 return rc;
418 }
420 static struct file_operations tpm_xen_ops = {
421 .owner = THIS_MODULE,
422 .llseek = no_llseek,
423 .open = tpm_open,
424 .read = tpm_read,
425 .write = tpm_write,
426 .release = tpm_release,
427 };
429 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
430 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
431 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
432 static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
434 static struct attribute* xen_attrs[] = {
435 &dev_attr_pubek.attr,
436 &dev_attr_pcrs.attr,
437 &dev_attr_caps.attr,
438 &dev_attr_cancel.attr,
439 NULL,
440 };
442 static struct attribute_group xen_attr_grp = { .attrs = xen_attrs };
444 static struct tpm_vendor_specific tpm_xen = {
445 .recv = tpm_xen_recv,
446 .send = tpm_xen_send,
447 .cancel = tpm_xen_cancel,
448 .status = tpm_xen_status,
449 .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
450 .req_complete_val = STATUS_DATA_AVAIL,
451 .req_canceled = STATUS_READY,
452 .base = 0,
453 .attr_group = &xen_attr_grp,
454 .miscdev.fops = &tpm_xen_ops,
455 .buffersize = 64 * 1024,
456 };
458 static struct device tpm_device = {
459 .bus_id = "vtpm",
460 };
462 static struct tpmfe_device tpmfe = {
463 .receive = tpm_recv,
464 .status = tpm_fe_status,
465 };
468 static int __init init_xen(void)
469 {
470 int rc;
472 /*
473 * Register device with the low lever front-end
474 * driver
475 */
476 if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
477 return rc;
478 }
480 /*
481 * Register our device with the system.
482 */
483 if ((rc = device_register(&tpm_device)) < 0) {
484 tpm_fe_unregister_receiver();
485 return rc;
486 }
488 tpm_xen.buffersize = tpmfe.max_tx_size;
490 if ((rc = tpm_register_hardware(&tpm_device, &tpm_xen)) < 0) {
491 device_unregister(&tpm_device);
492 tpm_fe_unregister_receiver();
493 return rc;
494 }
496 dataex.current_request = NULL;
497 spin_lock_init(&dataex.req_list_lock);
498 init_waitqueue_head(&dataex.req_wait_queue);
499 INIT_LIST_HEAD(&dataex.queued_requests);
501 dataex.current_response = NULL;
502 spin_lock_init(&dataex.resp_list_lock);
503 init_waitqueue_head(&dataex.resp_wait_queue);
505 disconnect_time = jiffies;
507 return 0;
508 }
510 static void __exit cleanup_xen(void)
511 {
512 tpm_remove_hardware(&tpm_device);
513 device_unregister(&tpm_device);
514 tpm_fe_unregister_receiver();
515 }
517 module_init(init_xen);
518 module_exit(cleanup_xen);
520 MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
521 MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
522 MODULE_VERSION("1.0");
523 MODULE_LICENSE("GPL");