ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c @ 9296:f85bb99187bf

Update interface documentation to include sched_op_new hypercall
and clean up the style a bit. Also clean up the sched_op_new
description in the sched.h public header.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Mar 15 19:19:22 2006 +0100 (2006-03-15)
parents 2401f414d1ad
children 6719dae17b6a
line source
1 /*
2 * Copyright (C) 2004 IBM Corporation
3 *
4 * Authors:
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Dave Safford <safford@watson.ibm.com>
7 * Reiner Sailer <sailer@watson.ibm.com>
8 * Kylene Hall <kjhall@us.ibm.com>
9 * Stefan Berger <stefanb@us.ibm.com>
10 *
11 * Maintained by: <tpmdd_devel@lists.sourceforge.net>
12 *
13 * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
14 * Specifications at www.trustedcomputinggroup.org
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
19 * License.
20 *
21 */
23 #include <asm/uaccess.h>
24 #include <linux/list.h>
25 #include <xen/tpmfe.h>
26 #include <linux/device.h>
27 #include <linux/interrupt.h>
28 #include <linux/platform_device.h>
29 #include "tpm.h"
31 /* read status bits */
32 enum {
33 STATUS_BUSY = 0x01,
34 STATUS_DATA_AVAIL = 0x02,
35 STATUS_READY = 0x04
36 };
38 #define MIN(x,y) ((x) < (y)) ? (x) : (y)
40 struct transmission {
41 struct list_head next;
42 unsigned char *request;
43 unsigned int request_len;
44 unsigned char *rcv_buffer;
45 unsigned int buffersize;
46 unsigned int flags;
47 };
49 enum {
50 TRANSMISSION_FLAG_WAS_QUEUED = 0x1
51 };
53 struct data_exchange {
54 struct transmission *current_request;
55 spinlock_t req_list_lock;
56 wait_queue_head_t req_wait_queue;
58 struct list_head queued_requests;
60 struct transmission *current_response;
61 spinlock_t resp_list_lock;
62 wait_queue_head_t resp_wait_queue; // processes waiting for responses
64 struct transmission *req_cancelled; // if a cancellation was encounterd
66 unsigned int fe_status;
67 unsigned int flags;
68 };
70 enum {
71 DATAEX_FLAG_QUEUED_ONLY = 0x1
72 };
74 static struct data_exchange dataex;
76 static unsigned long disconnect_time;
78 static struct tpmfe_device tpmfe;
80 /* local function prototypes */
81 static void __exit cleanup_xen(void);
84 /* =============================================================
85 * Some utility functions
86 * =============================================================
87 */
88 static inline struct transmission *
89 transmission_alloc(void)
90 {
91 return kzalloc(sizeof(struct transmission), GFP_KERNEL);
92 }
94 static inline unsigned char *
95 transmission_set_buffer(struct transmission *t,
96 unsigned char *buffer, unsigned int len)
97 {
98 kfree(t->request);
99 t->request = kmalloc(len, GFP_KERNEL);
100 if (t->request) {
101 memcpy(t->request,
102 buffer,
103 len);
104 t->request_len = len;
105 }
106 return t->request;
107 }
109 static inline void
110 transmission_free(struct transmission *t)
111 {
112 kfree(t->request);
113 kfree(t->rcv_buffer);
114 kfree(t);
115 }
117 /* =============================================================
118 * Interface with the TPM shared memory driver for XEN
119 * =============================================================
120 */
121 static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
122 {
123 int ret_size = 0;
124 struct transmission *t;
126 /*
127 * The list with requests must contain one request
128 * only and the element there must be the one that
129 * was passed to me from the front-end.
130 */
131 if (dataex.current_request != ptr) {
132 printk("WARNING: The request pointer is different than the "
133 "pointer the shared memory driver returned to me. "
134 "%p != %p\n",
135 dataex.current_request, ptr);
136 }
138 /*
139 * If the request has been cancelled, just quit here
140 */
141 if (dataex.req_cancelled == (struct transmission *)ptr) {
142 if (dataex.current_request == dataex.req_cancelled) {
143 dataex.current_request = NULL;
144 }
145 transmission_free(dataex.req_cancelled);
146 dataex.req_cancelled = NULL;
147 return 0;
148 }
150 if (NULL != (t = dataex.current_request)) {
151 transmission_free(t);
152 dataex.current_request = NULL;
153 }
155 t = transmission_alloc();
156 if (t) {
157 unsigned long flags;
158 t->rcv_buffer = kmalloc(count, GFP_KERNEL);
159 if (! t->rcv_buffer) {
160 transmission_free(t);
161 return -ENOMEM;
162 }
163 t->buffersize = count;
164 memcpy(t->rcv_buffer, buffer, count);
165 ret_size = count;
167 spin_lock_irqsave(&dataex.resp_list_lock ,flags);
168 dataex.current_response = t;
169 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
170 wake_up_interruptible(&dataex.resp_wait_queue);
171 }
172 return ret_size;
173 }
176 static void tpm_fe_status(unsigned int flags)
177 {
178 dataex.fe_status = flags;
179 if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
180 disconnect_time = jiffies;
181 }
182 }
184 /* =============================================================
185 * Interface with the generic TPM driver
186 * =============================================================
187 */
188 static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
189 {
190 unsigned long flags;
191 int rc = 0;
193 spin_lock_irqsave(&dataex.resp_list_lock, flags);
194 /*
195 * Check if the previous operation only queued the command
196 * In this case there won't be a response, so I just
197 * return from here and reset that flag. In any other
198 * case I should receive a response from the back-end.
199 */
200 if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
201 dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
202 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
203 /*
204 * a little hack here. The first few measurements
205 * are queued since there's no way to talk to the
206 * TPM yet (due to slowness of the control channel)
207 * So we just make IMA happy by giving it 30 NULL
208 * bytes back where the most important part is
209 * that the result code is '0'.
210 */
212 count = MIN(count, 30);
213 memset(buf, 0x0, count);
214 return count;
215 }
216 /*
217 * Check whether something is in the responselist and if
218 * there's nothing in the list wait for something to appear.
219 */
221 if (NULL == dataex.current_response) {
222 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
223 interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
224 1000);
225 spin_lock_irqsave(&dataex.resp_list_lock ,flags);
226 }
228 if (NULL != dataex.current_response) {
229 struct transmission *t = dataex.current_response;
230 dataex.current_response = NULL;
231 rc = MIN(count, t->buffersize);
232 memcpy(buf, t->rcv_buffer, rc);
233 transmission_free(t);
234 }
236 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
237 return rc;
238 }
240 static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
241 {
242 /*
243 * We simply pass the packet onto the XEN shared
244 * memory driver.
245 */
246 unsigned long flags;
247 int rc;
248 struct transmission *t = transmission_alloc();
250 spin_lock_irqsave(&dataex.req_list_lock, flags);
251 /*
252 * If there's a current request, it must be the
253 * previous request that has timed out.
254 */
255 if (dataex.current_request != NULL) {
256 printk("WARNING: Sending although there is a request outstanding.\n"
257 " Previous request must have timed out.\n");
258 transmission_free(dataex.current_request);
259 dataex.current_request = NULL;
260 }
262 if (t != NULL) {
263 unsigned int error = 0;
264 /*
265 * Queue the packet if the driver below is not
266 * ready, yet, or there is any packet already
267 * in the queue.
268 * If the driver below is ready, unqueue all
269 * packets first before sending our current
270 * packet.
271 * For each unqueued packet, except for the
272 * last (=current) packet, call the function
273 * tpm_xen_recv to wait for the response to come
274 * back.
275 */
276 if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
277 if (time_after(jiffies, disconnect_time + HZ * 10)) {
278 rc = -ENOENT;
279 } else {
280 /*
281 * copy the request into the buffer
282 */
283 if (transmission_set_buffer(t, buf, count)
284 == NULL) {
285 transmission_free(t);
286 rc = -ENOMEM;
287 goto exit;
288 }
289 dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
290 list_add_tail(&t->next, &dataex.queued_requests);
291 rc = 0;
292 }
293 } else {
294 /*
295 * Check whether there are any packets in the queue
296 */
297 while (!list_empty(&dataex.queued_requests)) {
298 /*
299 * Need to dequeue them.
300 * Read the result into a dummy buffer.
301 */
302 unsigned char buffer[1];
303 struct transmission *qt = (struct transmission *) dataex.queued_requests.next;
304 list_del(&qt->next);
305 dataex.current_request = qt;
306 spin_unlock_irqrestore(&dataex.req_list_lock,
307 flags);
309 rc = tpm_fe_send(tpmfe.tpm_private,
310 qt->request,
311 qt->request_len,
312 qt);
314 if (rc < 0) {
315 spin_lock_irqsave(&dataex.req_list_lock, flags);
316 if ((qt = dataex.current_request) != NULL) {
317 /*
318 * requeue it at the beginning
319 * of the list
320 */
321 list_add(&qt->next,
322 &dataex.queued_requests);
323 }
324 dataex.current_request = NULL;
325 error = 1;
326 break;
327 }
328 /*
329 * After this point qt is not valid anymore!
330 * It is freed when the front-end is delivering the data
331 * by calling tpm_recv
332 */
334 /*
335 * Try to receive the response now into the provided dummy
336 * buffer (I don't really care about this response since
337 * there is no receiver anymore for this response)
338 */
339 rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
341 spin_lock_irqsave(&dataex.req_list_lock, flags);
342 }
344 if (error == 0) {
345 /*
346 * Finally, send the current request.
347 */
348 dataex.current_request = t;
349 /*
350 * Call the shared memory driver
351 * Pass to it the buffer with the request, the
352 * amount of bytes in the request and
353 * a void * pointer (here: transmission structure)
354 */
355 rc = tpm_fe_send(tpmfe.tpm_private,
356 buf, count, t);
357 /*
358 * The generic TPM driver will call
359 * the function to receive the response.
360 */
361 if (rc < 0) {
362 dataex.current_request = NULL;
363 goto queue_it;
364 }
365 } else {
366 queue_it:
367 if (transmission_set_buffer(t, buf, count) == NULL) {
368 transmission_free(t);
369 rc = -ENOMEM;
370 goto exit;
371 }
372 /*
373 * An error occurred. Don't event try
374 * to send the current request. Just
375 * queue it.
376 */
377 dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
378 list_add_tail(&t->next,
379 &dataex.queued_requests);
380 rc = 0;
381 }
382 }
383 } else {
384 rc = -ENOMEM;
385 }
387 exit:
388 spin_unlock_irqrestore(&dataex.req_list_lock, flags);
389 return rc;
390 }
392 static void tpm_xen_cancel(struct tpm_chip *chip)
393 {
394 unsigned long flags;
395 spin_lock_irqsave(&dataex.resp_list_lock,flags);
397 dataex.req_cancelled = dataex.current_request;
399 spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
400 }
402 static u8 tpm_xen_status(struct tpm_chip *chip)
403 {
404 unsigned long flags;
405 u8 rc = 0;
406 spin_lock_irqsave(&dataex.resp_list_lock, flags);
407 /*
408 * Data are available if:
409 * - there's a current response
410 * - the last packet was queued only (this is fake, but necessary to
411 * get the generic TPM layer to call the receive function.)
412 */
413 if (NULL != dataex.current_response ||
414 0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
415 rc = STATUS_DATA_AVAIL;
416 }
417 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
418 return rc;
419 }
421 static struct file_operations tpm_xen_ops = {
422 .owner = THIS_MODULE,
423 .llseek = no_llseek,
424 .open = tpm_open,
425 .read = tpm_read,
426 .write = tpm_write,
427 .release = tpm_release,
428 };
430 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
431 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
432 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
433 static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
435 static struct attribute* xen_attrs[] = {
436 &dev_attr_pubek.attr,
437 &dev_attr_pcrs.attr,
438 &dev_attr_caps.attr,
439 &dev_attr_cancel.attr,
440 NULL,
441 };
443 static struct attribute_group xen_attr_grp = { .attrs = xen_attrs };
445 static struct tpm_vendor_specific tpm_xen = {
446 .recv = tpm_xen_recv,
447 .send = tpm_xen_send,
448 .cancel = tpm_xen_cancel,
449 .status = tpm_xen_status,
450 .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
451 .req_complete_val = STATUS_DATA_AVAIL,
452 .req_canceled = STATUS_READY,
453 .base = 0,
454 .attr_group = &xen_attr_grp,
455 .miscdev.fops = &tpm_xen_ops,
456 .buffersize = 64 * 1024,
457 };
459 static struct platform_device *pdev;
461 static struct tpmfe_device tpmfe = {
462 .receive = tpm_recv,
463 .status = tpm_fe_status,
464 };
467 static int __init init_xen(void)
468 {
469 int rc;
471 if ((xen_start_info->flags & SIF_INITDOMAIN)) {
472 return -EPERM;
473 }
474 /*
475 * Register device with the low lever front-end
476 * driver
477 */
478 if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
479 goto err_exit;
480 }
482 /*
483 * Register our device with the system.
484 */
485 pdev = platform_device_register_simple("tpm_vtpm", -1, NULL, 0);
486 if (IS_ERR(pdev)) {
487 rc = PTR_ERR(pdev);
488 goto err_unreg_fe;
489 }
491 tpm_xen.buffersize = tpmfe.max_tx_size;
493 if ((rc = tpm_register_hardware(&pdev->dev, &tpm_xen)) < 0) {
494 goto err_unreg_pdev;
495 }
497 dataex.current_request = NULL;
498 spin_lock_init(&dataex.req_list_lock);
499 init_waitqueue_head(&dataex.req_wait_queue);
500 INIT_LIST_HEAD(&dataex.queued_requests);
502 dataex.current_response = NULL;
503 spin_lock_init(&dataex.resp_list_lock);
504 init_waitqueue_head(&dataex.resp_wait_queue);
506 disconnect_time = jiffies;
508 return 0;
511 err_unreg_pdev:
512 platform_device_unregister(pdev);
513 err_unreg_fe:
514 tpm_fe_unregister_receiver();
516 err_exit:
517 return rc;
518 }
520 static void __exit cleanup_xen(void)
521 {
522 struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
523 if (chip) {
524 tpm_remove_hardware(chip->dev);
525 platform_device_unregister(pdev);
526 tpm_fe_unregister_receiver();
527 }
528 }
530 module_init(init_xen);
531 module_exit(cleanup_xen);
533 MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
534 MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
535 MODULE_VERSION("1.0");
536 MODULE_LICENSE("GPL");