ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c @ 7189:b7d11c9b6be4

Merged.

Signed-off-by: Ewan Mellor <ewan@xensource.com>
author emellor@ewan
date Tue Oct 04 11:30:51 2005 +0100 (2005-10-04)
parents 06d84bf87159
children
line source
1 /*
2 * Copyright (C) 2004 IBM Corporation
3 *
4 * Authors:
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Dave Safford <safford@watson.ibm.com>
7 * Reiner Sailer <sailer@watson.ibm.com>
8 * Kylene Hall <kjhall@us.ibm.com>
9 * Stefan Berger <stefanb@us.ibm.com>
10 *
11 * Maintained by: <tpmdd_devel@lists.sourceforge.net>
12 *
13 * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
14 * Specifications at www.trustedcomputinggroup.org
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
19 * License.
20 *
21 */
23 #include <asm/uaccess.h>
24 #include <linux/list.h>
25 #include <linux/tpmfe.h>
26 #include <linux/device.h>
27 #include <linux/interrupt.h>
28 #include "tpm_nopci.h"
30 /* read status bits */
31 enum {
32 STATUS_BUSY = 0x01,
33 STATUS_DATA_AVAIL = 0x02,
34 STATUS_READY = 0x04
35 };
37 #define MIN(x,y) ((x) < (y)) ? (x) : (y)
39 struct transmission {
40 struct list_head next;
41 unsigned char *request;
42 unsigned int request_len;
43 unsigned char *rcv_buffer;
44 unsigned int buffersize;
45 struct tpm_chip *chip;
46 unsigned int flags;
47 };
49 enum {
50 TRANSMISSION_FLAG_WAS_QUEUED = 0x1
51 };
53 struct data_exchange {
54 struct transmission *current_request;
55 spinlock_t req_list_lock;
56 wait_queue_head_t req_wait_queue;
58 struct list_head queued_requests;
60 struct transmission *current_response;
61 spinlock_t resp_list_lock;
62 wait_queue_head_t resp_wait_queue; // processes waiting for responses
64 struct transmission *req_cancelled; // if a cancellation was encounterd
66 unsigned int fe_status;
67 unsigned int flags;
68 };
70 enum {
71 DATAEX_FLAG_QUEUED_ONLY = 0x1
72 };
74 static struct data_exchange dataex;
76 static unsigned long disconnect_time;
78 /* local function prototypes */
79 static void __exit cleanup_xen(void);
82 /* =============================================================
83 * Some utility functions
84 * =============================================================
85 */
86 static inline struct transmission *
87 transmission_alloc(void)
88 {
89 struct transmission *t = kmalloc(sizeof(*t), GFP_KERNEL);
90 if (t) {
91 memset(t, 0x0, sizeof(*t));
92 }
93 return t;
94 }
96 static inline unsigned char *
97 transmission_set_buffer(struct transmission *t,
98 unsigned char *buffer, unsigned int len)
99 {
100 if (NULL != t->request) {
101 kfree(t->request);
102 }
103 t->request = kmalloc(len, GFP_KERNEL);
104 if (t->request) {
105 memcpy(t->request,
106 buffer,
107 len);
108 t->request_len = len;
109 }
110 return t->request;
111 }
113 static inline void
114 transmission_free(struct transmission *t)
115 {
116 if (t->request) {
117 kfree(t->request);
118 }
119 if (t->rcv_buffer) {
120 kfree(t->rcv_buffer);
121 }
122 kfree(t);
123 }
125 /* =============================================================
126 * Interface with the TPM shared memory driver for XEN
127 * =============================================================
128 */
129 static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
130 {
131 int ret_size = 0;
132 struct transmission *t, *temp;
134 /*
135 * The list with requests must contain one request
136 * only and the element there must be the one that
137 * was passed to me from the front-end.
138 */
139 if (dataex.current_request != ptr) {
140 printk("WARNING: The request pointer is different than the pointer "
141 "the shared memory driver returned to me. %p != %p\n",
142 dataex.current_request, ptr);
143 }
145 /*
146 * If the request has been cancelled, just quit here
147 */
148 if (dataex.req_cancelled == (struct transmission *)ptr) {
149 if (dataex.current_request == dataex.req_cancelled) {
150 dataex.current_request = NULL;
151 }
152 transmission_free(dataex.req_cancelled);
153 dataex.req_cancelled = NULL;
154 return 0;
155 }
157 if (NULL != (temp = dataex.current_request)) {
158 transmission_free(temp);
159 dataex.current_request = NULL;
160 }
162 t = transmission_alloc();
163 if (NULL != t) {
164 unsigned long flags;
165 t->rcv_buffer = kmalloc(count, GFP_KERNEL);
166 if (NULL == t->rcv_buffer) {
167 transmission_free(t);
168 return -ENOMEM;
169 }
170 t->buffersize = count;
171 memcpy(t->rcv_buffer, buffer, count);
172 ret_size = count;
174 spin_lock_irqsave(&dataex.resp_list_lock ,flags);
175 dataex.current_response = t;
176 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
177 wake_up_interruptible(&dataex.resp_wait_queue);
178 }
179 return ret_size;
180 }
183 static void tpm_fe_status(unsigned int flags)
184 {
185 dataex.fe_status = flags;
186 if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
187 disconnect_time = jiffies;
188 }
189 }
191 /* =============================================================
192 * Interface with the generic TPM driver
193 * =============================================================
194 */
195 static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
196 {
197 unsigned long flags;
198 int rc = 0;
200 spin_lock_irqsave(&dataex.resp_list_lock, flags);
201 /*
202 * Check if the previous operation only queued the command
203 * In this case there won't be a response, so I just
204 * return from here and reset that flag. In any other
205 * case I should receive a response from the back-end.
206 */
207 if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
208 dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
209 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
210 /*
211 * a little hack here. The first few measurements
212 * are queued since there's no way to talk to the
213 * TPM yet (due to slowness of the control channel)
214 * So we just make IMA happy by giving it 30 NULL
215 * bytes back where the most important part is
216 * that the result code is '0'.
217 */
219 count = MIN(count, 30);
220 memset(buf, 0x0, count);
221 return count;
222 }
223 /*
224 * Check whether something is in the responselist and if
225 * there's nothing in the list wait for something to appear.
226 */
228 if (NULL == dataex.current_response) {
229 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
230 interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
231 1000);
232 spin_lock_irqsave(&dataex.resp_list_lock ,flags);
233 }
235 if (NULL != dataex.current_response) {
236 struct transmission *t = dataex.current_response;
237 dataex.current_response = NULL;
238 rc = MIN(count, t->buffersize);
239 memcpy(buf, t->rcv_buffer, rc);
240 transmission_free(t);
241 }
243 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
244 return rc;
245 }
247 static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
248 {
249 /*
250 * We simply pass the packet onto the XEN shared
251 * memory driver.
252 */
253 unsigned long flags;
254 int rc;
255 struct transmission *t = transmission_alloc();
257 spin_lock_irqsave(&dataex.req_list_lock, flags);
258 /*
259 * If there's a current request, it must be the
260 * previous request that has timed out.
261 */
262 if (dataex.current_request != NULL) {
263 printk("WARNING: Sending although there is a request outstanding.\n"
264 " Previous request must have timed out.\n");
265 transmission_free(dataex.current_request);
266 dataex.current_request = NULL;
267 }
269 if (t != NULL) {
270 unsigned int error = 0;
271 t->rcv_buffer = NULL;
272 t->buffersize = 0;
273 t->chip = chip;
275 /*
276 * Queue the packet if the driver below is not
277 * ready, yet, or there is any packet already
278 * in the queue.
279 * If the driver below is ready, unqueue all
280 * packets first before sending our current
281 * packet.
282 * For each unqueued packet, except for the
283 * last (=current) packet, call the function
284 * tpm_xen_recv to wait for the response to come
285 * back.
286 */
287 if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
288 if (time_after(jiffies, disconnect_time + HZ * 10)) {
289 rc = -ENOENT;
290 } else {
291 /*
292 * copy the request into the buffer
293 */
294 if (transmission_set_buffer(t, buf, count)
295 == NULL) {
296 transmission_free(t);
297 rc = -ENOMEM;
298 goto exit;
299 }
300 dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
301 list_add_tail(&t->next, &dataex.queued_requests);
302 rc = 0;
303 }
304 } else {
305 /*
306 * Check whether there are any packets in the queue
307 */
308 while (!list_empty(&dataex.queued_requests)) {
309 /*
310 * Need to dequeue them.
311 * Read the result into a dummy buffer.
312 */
313 unsigned char buffer[1];
314 struct transmission *qt = (struct transmission *) dataex.queued_requests.next;
315 list_del(&qt->next);
316 dataex.current_request = qt;
317 spin_unlock_irqrestore(&dataex.req_list_lock, flags);
319 rc = tpm_fe_send(qt->request,
320 qt->request_len,
321 qt);
323 if (rc < 0) {
324 spin_lock_irqsave(&dataex.req_list_lock, flags);
325 if ((qt = dataex.current_request) != NULL) {
326 /*
327 * requeue it at the beginning
328 * of the list
329 */
330 list_add(&qt->next,
331 &dataex.queued_requests);
332 }
333 dataex.current_request = NULL;
334 error = 1;
335 break;
336 }
337 /*
338 * After this point qt is not valid anymore!
339 * It is freed when the front-end is delivering the data
340 * by calling tpm_recv
341 */
343 /*
344 * Try to receive the response now into the provided dummy
345 * buffer (I don't really care about this response since
346 * there is no receiver anymore for this response)
347 */
348 rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
350 spin_lock_irqsave(&dataex.req_list_lock, flags);
351 }
353 if (error == 0) {
354 /*
355 * Finally, send the current request.
356 */
357 dataex.current_request = t;
358 /*
359 * Call the shared memory driver
360 * Pass to it the buffer with the request, the
361 * amount of bytes in the request and
362 * a void * pointer (here: transmission structure)
363 */
364 rc = tpm_fe_send(buf, count, t);
365 /*
366 * The generic TPM driver will call
367 * the function to receive the response.
368 */
369 if (rc < 0) {
370 dataex.current_request = NULL;
371 goto queue_it;
372 }
373 } else {
374 queue_it:
375 if (transmission_set_buffer(t, buf, count) == NULL) {
376 transmission_free(t);
377 rc = -ENOMEM;
378 goto exit;
379 }
380 /*
381 * An error occurred. Don't event try
382 * to send the current request. Just
383 * queue it.
384 */
385 dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
386 list_add_tail(&t->next, &dataex.queued_requests);
387 rc = 0;
388 }
389 }
390 } else {
391 rc = -ENOMEM;
392 }
394 exit:
395 spin_unlock_irqrestore(&dataex.req_list_lock, flags);
396 return rc;
397 }
399 static void tpm_xen_cancel(struct tpm_chip *chip)
400 {
401 unsigned long flags;
402 spin_lock_irqsave(&dataex.resp_list_lock,flags);
404 dataex.req_cancelled = dataex.current_request;
406 spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
407 }
409 static u8 tpm_xen_status(struct tpm_chip *chip)
410 {
411 unsigned long flags;
412 u8 rc = 0;
413 spin_lock_irqsave(&dataex.resp_list_lock, flags);
414 /*
415 * Data are available if:
416 * - there's a current response
417 * - the last packet was queued only (this is fake, but necessary to
418 * get the generic TPM layer to call the receive function.)
419 */
420 if (NULL != dataex.current_response ||
421 0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
422 rc = STATUS_DATA_AVAIL;
423 }
424 spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
425 return rc;
426 }
428 static struct file_operations tpm_xen_ops = {
429 .owner = THIS_MODULE,
430 .llseek = no_llseek,
431 .open = tpm_open,
432 .read = tpm_read,
433 .write = tpm_write,
434 .release = tpm_release,
435 };
437 static struct tpm_vendor_specific tpm_xen = {
438 .recv = tpm_xen_recv,
439 .send = tpm_xen_send,
440 .cancel = tpm_xen_cancel,
441 .status = tpm_xen_status,
442 .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
443 .req_complete_val = STATUS_DATA_AVAIL,
444 .req_canceled = STATUS_READY,
445 .base = 0,
446 .attr = TPM_DEVICE_ATTRS,
447 .miscdev.fops = &tpm_xen_ops,
448 };
450 static struct device tpm_device = {
451 .bus_id = "vtpm",
452 };
454 static struct tpmfe_device tpmfe = {
455 .receive = tpm_recv,
456 .status = tpm_fe_status,
457 };
460 static int __init init_xen(void)
461 {
462 int rc;
464 /*
465 * Register device with the low lever front-end
466 * driver
467 */
468 if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
469 return rc;
470 }
472 /*
473 * Register our device with the system.
474 */
475 if ((rc = device_register(&tpm_device)) < 0) {
476 tpm_fe_unregister_receiver();
477 return rc;
478 }
480 if ((rc = tpm_register_hardware_nopci(&tpm_device, &tpm_xen)) < 0) {
481 device_unregister(&tpm_device);
482 tpm_fe_unregister_receiver();
483 return rc;
484 }
486 dataex.current_request = NULL;
487 spin_lock_init(&dataex.req_list_lock);
488 init_waitqueue_head(&dataex.req_wait_queue);
489 INIT_LIST_HEAD(&dataex.queued_requests);
491 dataex.current_response = NULL;
492 spin_lock_init(&dataex.resp_list_lock);
493 init_waitqueue_head(&dataex.resp_wait_queue);
495 disconnect_time = jiffies;
497 return 0;
498 }
500 static void __exit cleanup_xen(void)
501 {
502 tpm_remove_hardware(&tpm_device);
503 device_unregister(&tpm_device);
504 tpm_fe_unregister_receiver();
505 }
507 fs_initcall(init_xen);
508 module_exit(cleanup_xen);
510 MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
511 MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
512 MODULE_VERSION("1.0");
513 MODULE_LICENSE("GPL");