ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c @ 8745:514f2585d102

Converge the TPM drivers in the Xen repository
with those coming from the 2.6.15 kernel. Some files can now be
taken from 2.6.15 directly and can therefore be removed.

Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
author cl349@firebug.cl.cam.ac.uk
date Fri Feb 03 09:31:28 2006 +0000 (2006-02-03)
parents fd9b2c1bb577
children e9d78d8bd568
line source
1 /*
2 * Copyright (C) 2004 IBM Corporation
3 *
4 * Authors:
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Dave Safford <safford@watson.ibm.com>
7 * Reiner Sailer <sailer@watson.ibm.com>
8 * Kylene Hall <kjhall@us.ibm.com>
9 * Stefan Berger <stefanb@us.ibm.com>
10 *
11 * Maintained by: <tpmdd_devel@lists.sourceforge.net>
12 *
13 * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
14 * Specifications at www.trustedcomputinggroup.org
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
19 * License.
20 *
21 */
23 #include <asm/uaccess.h>
24 #include <linux/list.h>
25 #include <xen/tpmfe.h>
26 #include <linux/device.h>
27 #include <linux/interrupt.h>
28 #include "tpm.h"
30 /* read status bits */
31 enum {
32 STATUS_BUSY = 0x01,
33 STATUS_DATA_AVAIL = 0x02,
34 STATUS_READY = 0x04
35 };
37 #define MIN(x,y) ((x) < (y)) ? (x) : (y)
39 struct transmission {
40 struct list_head next;
41 unsigned char *request;
42 unsigned int request_len;
43 unsigned char *rcv_buffer;
44 unsigned int buffersize;
45 struct tpm_chip *chip;
46 unsigned int flags;
47 };
49 enum {
50 TRANSMISSION_FLAG_WAS_QUEUED = 0x1
51 };
53 struct data_exchange {
54 struct transmission *current_request;
55 spinlock_t req_list_lock;
56 wait_queue_head_t req_wait_queue;
58 struct list_head queued_requests;
60 struct transmission *current_response;
61 spinlock_t resp_list_lock;
62 wait_queue_head_t resp_wait_queue; // processes waiting for responses
64 struct transmission *req_cancelled; // if a cancellation was encounterd
66 unsigned int fe_status;
67 unsigned int flags;
68 };
70 enum {
71 DATAEX_FLAG_QUEUED_ONLY = 0x1
72 };
74 static struct data_exchange dataex;
76 static unsigned long disconnect_time;
78 /* local function prototypes */
79 static void __exit cleanup_xen(void);
82 /* =============================================================
83 * Some utility functions
84 * =============================================================
85 */
86 static inline struct transmission *
87 transmission_alloc(void)
88 {
89 return kzalloc(sizeof(struct transmission), GFP_KERNEL);
90 }
92 static inline unsigned char *
93 transmission_set_buffer(struct transmission *t,
94 unsigned char *buffer, unsigned int len)
95 {
96 kfree(t->request);
97 t->request = kmalloc(len, GFP_KERNEL);
98 if (t->request) {
99 memcpy(t->request,
100 buffer,
101 len);
102 t->request_len = len;
103 }
104 return t->request;
105 }
107 static inline void
108 transmission_free(struct transmission *t)
109 {
110 kfree(t->request);
111 kfree(t->rcv_buffer);
112 kfree(t);
113 }
115 /* =============================================================
116 * Interface with the TPM shared memory driver for XEN
117 * =============================================================
118 */
119 static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
120 {
121 int ret_size = 0;
122 struct transmission *t, *temp;
124 /*
125 * The list with requests must contain one request
126 * only and the element there must be the one that
127 * was passed to me from the front-end.
128 */
129 if (dataex.current_request != ptr) {
130 printk("WARNING: The request pointer is different than the pointer "
131 "the shared memory driver returned to me. %p != %p\n",
132 dataex.current_request, ptr);
133 }
135 /*
136 * If the request has been cancelled, just quit here
137 */
138 if (dataex.req_cancelled == (struct transmission *)ptr) {
139 if (dataex.current_request == dataex.req_cancelled) {
140 dataex.current_request = NULL;
141 }
142 transmission_free(dataex.req_cancelled);
143 dataex.req_cancelled = NULL;
144 return 0;
145 }
147 if (NULL != (temp = dataex.current_request)) {
148 transmission_free(temp);
149 dataex.current_request = NULL;
150 }
152 t = transmission_alloc();
153 if (NULL != t) {
154 unsigned long flags;
155 t->rcv_buffer = kmalloc(count, GFP_KERNEL);
156 if (NULL == t->rcv_buffer) {
157 transmission_free(t);
158 return -ENOMEM;
159 }
160 t->buffersize = count;
161 memcpy(t->rcv_buffer, buffer, count);
162 ret_size = count;
164 spin_lock_irqsave(&dataex.resp_list_lock ,flags);
165 dataex.current_response = t;
166 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
167 wake_up_interruptible(&dataex.resp_wait_queue);
168 }
169 return ret_size;
170 }
173 static void tpm_fe_status(unsigned int flags)
174 {
175 dataex.fe_status = flags;
176 if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
177 disconnect_time = jiffies;
178 }
179 }
181 /* =============================================================
182 * Interface with the generic TPM driver
183 * =============================================================
184 */
185 static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
186 {
187 unsigned long flags;
188 int rc = 0;
190 spin_lock_irqsave(&dataex.resp_list_lock, flags);
191 /*
192 * Check if the previous operation only queued the command
193 * In this case there won't be a response, so I just
194 * return from here and reset that flag. In any other
195 * case I should receive a response from the back-end.
196 */
197 if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
198 dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
199 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
200 /*
201 * a little hack here. The first few measurements
202 * are queued since there's no way to talk to the
203 * TPM yet (due to slowness of the control channel)
204 * So we just make IMA happy by giving it 30 NULL
205 * bytes back where the most important part is
206 * that the result code is '0'.
207 */
209 count = MIN(count, 30);
210 memset(buf, 0x0, count);
211 return count;
212 }
213 /*
214 * Check whether something is in the responselist and if
215 * there's nothing in the list wait for something to appear.
216 */
218 if (NULL == dataex.current_response) {
219 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
220 interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
221 1000);
222 spin_lock_irqsave(&dataex.resp_list_lock ,flags);
223 }
225 if (NULL != dataex.current_response) {
226 struct transmission *t = dataex.current_response;
227 dataex.current_response = NULL;
228 rc = MIN(count, t->buffersize);
229 memcpy(buf, t->rcv_buffer, rc);
230 transmission_free(t);
231 }
233 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
234 return rc;
235 }
237 static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
238 {
239 /*
240 * We simply pass the packet onto the XEN shared
241 * memory driver.
242 */
243 unsigned long flags;
244 int rc;
245 struct transmission *t = transmission_alloc();
247 spin_lock_irqsave(&dataex.req_list_lock, flags);
248 /*
249 * If there's a current request, it must be the
250 * previous request that has timed out.
251 */
252 if (dataex.current_request != NULL) {
253 printk("WARNING: Sending although there is a request outstanding.\n"
254 " Previous request must have timed out.\n");
255 transmission_free(dataex.current_request);
256 dataex.current_request = NULL;
257 }
259 if (t != NULL) {
260 unsigned int error = 0;
261 t->rcv_buffer = NULL;
262 t->buffersize = 0;
263 t->chip = chip;
265 /*
266 * Queue the packet if the driver below is not
267 * ready, yet, or there is any packet already
268 * in the queue.
269 * If the driver below is ready, unqueue all
270 * packets first before sending our current
271 * packet.
272 * For each unqueued packet, except for the
273 * last (=current) packet, call the function
274 * tpm_xen_recv to wait for the response to come
275 * back.
276 */
277 if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
278 if (time_after(jiffies, disconnect_time + HZ * 10)) {
279 rc = -ENOENT;
280 } else {
281 /*
282 * copy the request into the buffer
283 */
284 if (transmission_set_buffer(t, buf, count)
285 == NULL) {
286 transmission_free(t);
287 rc = -ENOMEM;
288 goto exit;
289 }
290 dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
291 list_add_tail(&t->next, &dataex.queued_requests);
292 rc = 0;
293 }
294 } else {
295 /*
296 * Check whether there are any packets in the queue
297 */
298 while (!list_empty(&dataex.queued_requests)) {
299 /*
300 * Need to dequeue them.
301 * Read the result into a dummy buffer.
302 */
303 unsigned char buffer[1];
304 struct transmission *qt = (struct transmission *) dataex.queued_requests.next;
305 list_del(&qt->next);
306 dataex.current_request = qt;
307 spin_unlock_irqrestore(&dataex.req_list_lock, flags);
309 rc = tpm_fe_send(qt->request,
310 qt->request_len,
311 qt);
313 if (rc < 0) {
314 spin_lock_irqsave(&dataex.req_list_lock, flags);
315 if ((qt = dataex.current_request) != NULL) {
316 /*
317 * requeue it at the beginning
318 * of the list
319 */
320 list_add(&qt->next,
321 &dataex.queued_requests);
322 }
323 dataex.current_request = NULL;
324 error = 1;
325 break;
326 }
327 /*
328 * After this point qt is not valid anymore!
329 * It is freed when the front-end is delivering the data
330 * by calling tpm_recv
331 */
333 /*
334 * Try to receive the response now into the provided dummy
335 * buffer (I don't really care about this response since
336 * there is no receiver anymore for this response)
337 */
338 rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
340 spin_lock_irqsave(&dataex.req_list_lock, flags);
341 }
343 if (error == 0) {
344 /*
345 * Finally, send the current request.
346 */
347 dataex.current_request = t;
348 /*
349 * Call the shared memory driver
350 * Pass to it the buffer with the request, the
351 * amount of bytes in the request and
352 * a void * pointer (here: transmission structure)
353 */
354 rc = tpm_fe_send(buf, count, t);
355 /*
356 * The generic TPM driver will call
357 * the function to receive the response.
358 */
359 if (rc < 0) {
360 dataex.current_request = NULL;
361 goto queue_it;
362 }
363 } else {
364 queue_it:
365 if (transmission_set_buffer(t, buf, count) == NULL) {
366 transmission_free(t);
367 rc = -ENOMEM;
368 goto exit;
369 }
370 /*
371 * An error occurred. Don't event try
372 * to send the current request. Just
373 * queue it.
374 */
375 dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
376 list_add_tail(&t->next, &dataex.queued_requests);
377 rc = 0;
378 }
379 }
380 } else {
381 rc = -ENOMEM;
382 }
384 exit:
385 spin_unlock_irqrestore(&dataex.req_list_lock, flags);
386 return rc;
387 }
389 static void tpm_xen_cancel(struct tpm_chip *chip)
390 {
391 unsigned long flags;
392 spin_lock_irqsave(&dataex.resp_list_lock,flags);
394 dataex.req_cancelled = dataex.current_request;
396 spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
397 }
399 static u8 tpm_xen_status(struct tpm_chip *chip)
400 {
401 unsigned long flags;
402 u8 rc = 0;
403 spin_lock_irqsave(&dataex.resp_list_lock, flags);
404 /*
405 * Data are available if:
406 * - there's a current response
407 * - the last packet was queued only (this is fake, but necessary to
408 * get the generic TPM layer to call the receive function.)
409 */
410 if (NULL != dataex.current_response ||
411 0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
412 rc = STATUS_DATA_AVAIL;
413 }
414 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
415 return rc;
416 }
418 static struct file_operations tpm_xen_ops = {
419 .owner = THIS_MODULE,
420 .llseek = no_llseek,
421 .open = tpm_open,
422 .read = tpm_read,
423 .write = tpm_write,
424 .release = tpm_release,
425 };
427 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
428 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
429 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
430 static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
432 static struct attribute* xen_attrs[] = {
433 &dev_attr_pubek.attr,
434 &dev_attr_pcrs.attr,
435 &dev_attr_caps.attr,
436 &dev_attr_cancel.attr,
437 NULL,
438 };
440 static struct attribute_group xen_attr_grp = { .attrs = xen_attrs };
442 static struct tpm_vendor_specific tpm_xen = {
443 .recv = tpm_xen_recv,
444 .send = tpm_xen_send,
445 .cancel = tpm_xen_cancel,
446 .status = tpm_xen_status,
447 .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
448 .req_complete_val = STATUS_DATA_AVAIL,
449 .req_canceled = STATUS_READY,
450 .base = 0,
451 .attr_group = &xen_attr_grp,
452 .miscdev.fops = &tpm_xen_ops,
453 .buffersize = 64 * 1024,
454 };
456 static struct device tpm_device = {
457 .bus_id = "vtpm",
458 };
460 static struct tpmfe_device tpmfe = {
461 .receive = tpm_recv,
462 .status = tpm_fe_status,
463 };
466 static int __init init_xen(void)
467 {
468 int rc;
470 /*
471 * Register device with the low lever front-end
472 * driver
473 */
474 if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
475 return rc;
476 }
478 /*
479 * Register our device with the system.
480 */
481 if ((rc = device_register(&tpm_device)) < 0) {
482 tpm_fe_unregister_receiver();
483 return rc;
484 }
486 tpm_xen.buffersize = tpmfe.max_tx_size;
488 if ((rc = tpm_register_hardware(&tpm_device, &tpm_xen)) < 0) {
489 device_unregister(&tpm_device);
490 tpm_fe_unregister_receiver();
491 return rc;
492 }
494 dataex.current_request = NULL;
495 spin_lock_init(&dataex.req_list_lock);
496 init_waitqueue_head(&dataex.req_wait_queue);
497 INIT_LIST_HEAD(&dataex.queued_requests);
499 dataex.current_response = NULL;
500 spin_lock_init(&dataex.resp_list_lock);
501 init_waitqueue_head(&dataex.resp_wait_queue);
503 disconnect_time = jiffies;
505 return 0;
506 }
508 static void __exit cleanup_xen(void)
509 {
510 tpm_remove_hardware(&tpm_device);
511 device_unregister(&tpm_device);
512 tpm_fe_unregister_receiver();
513 }
515 fs_initcall(init_xen);
516 module_exit(cleanup_xen);
518 MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
519 MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
520 MODULE_VERSION("1.0");
521 MODULE_LICENSE("GPL");