ia64/xen-unstable

changeset 9931:6719dae17b6a

This patch
- moves the TPM frontend completely into the char/tpm directory where it
is a plug-in to the generic TPM driver
- removes a now obsolete include file
- adapts part of the documentation
- fixes some locking problems where copy_to/from_user was called with
IRQs blocked

Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu May 04 11:22:19 2006 +0100 (2006-05-04)
parents 62c8e97d56cf
children 1a84eec74331
files docs/misc/vtpm.txt linux-2.6-xen-sparse/drivers/char/tpm/Kconfig linux-2.6-xen-sparse/drivers/char/tpm/Makefile linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.c linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.h linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c linux-2.6-xen-sparse/drivers/xen/Kconfig linux-2.6-xen-sparse/drivers/xen/Makefile
line diff
     1.1 --- a/docs/misc/vtpm.txt	Thu May 04 11:19:27 2006 +0100
     1.2 +++ b/docs/misc/vtpm.txt	Thu May 04 11:22:19 2006 +0100
     1.3 @@ -21,11 +21,11 @@ Compile the XEN tree as usual after the 
     1.4  linux-2.6.??-xen/.config file:
     1.5  
     1.6  CONFIG_XEN_TPMDEV_BACKEND=y
     1.7 -CONFIG_XEN_TPMDEV_GRANT=y
     1.8  
     1.9 -CONFIG_TCG_TPM=m
    1.10 +CONFIG_TCG_TPM=y
    1.11  CONFIG_TCG_NSC=m
    1.12  CONFIG_TCG_ATMEL=m
    1.13 +CONFIG_TCG_XEN=y
    1.14  
    1.15  You must also enable the virtual TPM to be built:
    1.16  
    1.17 @@ -33,6 +33,12 @@ In Config.mk in the Xen root directory s
    1.18  
    1.19  VTPM_TOOLS ?= y
    1.20  
    1.21 +and in
    1.22 +
    1.23 +tools/vtpm/Rules.mk set the line
    1.24 +
    1.25 +BUILD_EMULATOR = y
    1.26 +
    1.27  Now build the Xen sources from Xen's root directory:
    1.28  
    1.29  make install
     2.1 --- a/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig	Thu May 04 11:19:27 2006 +0100
     2.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig	Thu May 04 11:22:19 2006 +0100
     2.3 @@ -51,7 +51,7 @@ config TCG_INFINEON
     2.4  
     2.5  config TCG_XEN
     2.6  	tristate "XEN TPM Interface"
     2.7 -	depends on TCG_TPM && XEN && XEN_TPMDEV_FRONTEND
     2.8 +	depends on TCG_TPM && XEN
     2.9  	---help---
    2.10  	  If you want to make TPM support available to a Xen
    2.11  	  user domain, say Yes and it will
    2.12 @@ -60,4 +60,3 @@ config TCG_XEN
    2.13            tpm_xen.
    2.14  
    2.15  endmenu
    2.16 -
     3.1 --- a/linux-2.6-xen-sparse/drivers/char/tpm/Makefile	Thu May 04 11:19:27 2006 +0100
     3.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/Makefile	Thu May 04 11:22:19 2006 +0100
     3.3 @@ -8,4 +8,4 @@ endif
     3.4  obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
     3.5  obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
     3.6  obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
     3.7 -obj-$(CONFIG_TCG_XEN) += tpm_xen.o
     3.8 +obj-$(CONFIG_TCG_XEN) += tpm_xen.o tpm_vtpm.o
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.c	Thu May 04 11:22:19 2006 +0100
     4.3 @@ -0,0 +1,546 @@
     4.4 +/*
     4.5 + * Copyright (C) 2006 IBM Corporation
     4.6 + *
     4.7 + * Authors:
     4.8 + * Stefan Berger <stefanb@us.ibm.com>
     4.9 + *
    4.10 + * Generic device driver part for device drivers in a virtualized
    4.11 + * environment.
    4.12 + *
    4.13 + * This program is free software; you can redistribute it and/or
    4.14 + * modify it under the terms of the GNU General Public License as
    4.15 + * published by the Free Software Foundation, version 2 of the
    4.16 + * License.
    4.17 + *
    4.18 + */
    4.19 +
    4.20 +#include <asm/uaccess.h>
    4.21 +#include <linux/list.h>
    4.22 +#include <linux/device.h>
    4.23 +#include <linux/interrupt.h>
    4.24 +#include <linux/platform_device.h>
    4.25 +#include "tpm.h"
    4.26 +#include "tpm_vtpm.h"
    4.27 +
    4.28 +/* read status bits */
    4.29 +enum {
    4.30 +	STATUS_BUSY = 0x01,
    4.31 +	STATUS_DATA_AVAIL = 0x02,
    4.32 +	STATUS_READY = 0x04
    4.33 +};
    4.34 +
    4.35 +#define MIN(x,y)  ((x) < (y)) ? (x) : (y)
    4.36 +
    4.37 +struct transmission {
    4.38 +	struct list_head next;
    4.39 +
    4.40 +	unsigned char *request;
    4.41 +	size_t  request_len;
    4.42 +	size_t  request_buflen;
    4.43 +
    4.44 +	unsigned char *response;
    4.45 +	size_t  response_len;
    4.46 +	size_t  response_buflen;
    4.47 +
    4.48 +	unsigned int flags;
    4.49 +};
    4.50 +
    4.51 +enum {
    4.52 +	TRANSMISSION_FLAG_WAS_QUEUED = 0x1
    4.53 +};
    4.54 +
    4.55 +struct vtpm_state {
    4.56 +	struct transmission *current_request;
    4.57 +	spinlock_t           req_list_lock;
    4.58 +	wait_queue_head_t    req_wait_queue;
    4.59 +
    4.60 +	struct list_head     queued_requests;
    4.61 +
    4.62 +	struct transmission *current_response;
    4.63 +	spinlock_t           resp_list_lock;
    4.64 +	wait_queue_head_t    resp_wait_queue;     // processes waiting for responses
    4.65 +
    4.66 +	struct transmission *req_cancelled;       // if a cancellation was encounterd
    4.67 +
    4.68 +	u8                   vd_status;
    4.69 +	u8                   flags;
    4.70 +
    4.71 +	unsigned long        disconnect_time;
    4.72 +
    4.73 +	struct tpm_virtual_device *tpmvd;
    4.74 +};
    4.75 +
    4.76 +enum {
    4.77 +	DATAEX_FLAG_QUEUED_ONLY = 0x1
    4.78 +};
    4.79 +
    4.80 +
    4.81 +/* local variables */
    4.82 +static struct vtpm_state *vtpms;
    4.83 +
    4.84 +/* local function prototypes */
    4.85 +static int _vtpm_send_queued(struct tpm_chip *chip);
    4.86 +
    4.87 +
    4.88 +/* =============================================================
    4.89 + * Some utility functions
    4.90 + * =============================================================
    4.91 + */
    4.92 +static void vtpm_state_init(struct vtpm_state *vtpms)
    4.93 +{
    4.94 +	vtpms->current_request = NULL;
    4.95 +	spin_lock_init(&vtpms->req_list_lock);
    4.96 +	init_waitqueue_head(&vtpms->req_wait_queue);
    4.97 +	INIT_LIST_HEAD(&vtpms->queued_requests);
    4.98 +
    4.99 +	vtpms->current_response = NULL;
   4.100 +	spin_lock_init(&vtpms->resp_list_lock);
   4.101 +	init_waitqueue_head(&vtpms->resp_wait_queue);
   4.102 +
   4.103 +	vtpms->disconnect_time = jiffies;
   4.104 +}
   4.105 +
   4.106 +
   4.107 +static inline struct transmission *transmission_alloc(void)
   4.108 +{
   4.109 +	return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
   4.110 +}
   4.111 +
   4.112 +static unsigned char *
   4.113 +transmission_set_req_buffer(struct transmission *t,
   4.114 +                            unsigned char *buffer, size_t len)
   4.115 +{
   4.116 +	if (t->request_buflen < len) {
   4.117 +		kfree(t->request);
   4.118 +		t->request = kmalloc(len, GFP_KERNEL);
   4.119 +		if (!t->request) {
   4.120 +			t->request_buflen = 0;
   4.121 +			return NULL;
   4.122 +		}
   4.123 +		t->request_buflen = len;
   4.124 +	}
   4.125 +
   4.126 +	memcpy(t->request, buffer, len);
   4.127 +	t->request_len = len;
   4.128 +
   4.129 +	return t->request;
   4.130 +}
   4.131 +
   4.132 +static unsigned char *
   4.133 +transmission_set_res_buffer(struct transmission *t,
   4.134 +                            const unsigned char *buffer, size_t len)
   4.135 +{
   4.136 +	if (t->response_buflen < len) {
   4.137 +		kfree(t->response);
   4.138 +		t->response = kmalloc(len, GFP_ATOMIC);
   4.139 +		if (!t->response) {
   4.140 +			t->response_buflen = 0;
   4.141 +			return NULL;
   4.142 +		}
   4.143 +		t->response_buflen = len;
   4.144 +	}
   4.145 +
   4.146 +	memcpy(t->response, buffer, len);
   4.147 +	t->response_len = len;
   4.148 +
   4.149 +	return t->response;
   4.150 +}
   4.151 +
   4.152 +static inline void transmission_free(struct transmission *t)
   4.153 +{
   4.154 +	kfree(t->request);
   4.155 +	kfree(t->response);
   4.156 +	kfree(t);
   4.157 +}
   4.158 +
   4.159 +/* =============================================================
   4.160 + * Interface with the lower layer driver
   4.161 + * =============================================================
   4.162 + */
   4.163 +/*
   4.164 + * Lower layer uses this function to make a response available.
   4.165 + */
   4.166 +int vtpm_vd_recv(const unsigned char *buffer, size_t count, const void *ptr)
   4.167 +{
   4.168 +	unsigned long flags;
   4.169 +	int ret_size = 0;
   4.170 +	struct transmission *t;
   4.171 +
   4.172 +	/*
   4.173 +	 * The list with requests must contain one request
   4.174 +	 * only and the element there must be the one that
   4.175 +	 * was passed to me from the front-end.
   4.176 +	 */
   4.177 +	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
   4.178 +	if (vtpms->current_request != ptr) {
   4.179 +		printk("WARNING: The request pointer is different than the "
   4.180 +		       "pointer the shared memory driver returned to me. "
   4.181 +		       "%p != %p\n",
   4.182 +		       vtpms->current_request, ptr);
   4.183 +	}
   4.184 +
   4.185 +	/*
   4.186 +	 * If the request has been cancelled, just quit here
   4.187 +	 */
   4.188 +	if (vtpms->req_cancelled == (struct transmission *)ptr) {
   4.189 +		if (vtpms->current_request == vtpms->req_cancelled) {
   4.190 +			vtpms->current_request = NULL;
   4.191 +		}
   4.192 +		transmission_free(vtpms->req_cancelled);
   4.193 +		vtpms->req_cancelled = NULL;
   4.194 +		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
   4.195 +		return 0;
   4.196 +	}
   4.197 +
   4.198 +	if (NULL != (t = vtpms->current_request)) {
   4.199 +		transmission_free(t);
   4.200 +		vtpms->current_request = NULL;
   4.201 +	}
   4.202 +
   4.203 +	t = transmission_alloc();
   4.204 +	if (t) {
   4.205 +		if (!transmission_set_res_buffer(t, buffer, count)) {
   4.206 +			transmission_free(t);
   4.207 +			spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
   4.208 +			return -ENOMEM;
   4.209 +		}
   4.210 +		ret_size = count;
   4.211 +		vtpms->current_response = t;
   4.212 +		wake_up_interruptible(&vtpms->resp_wait_queue);
   4.213 +	}
   4.214 +	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
   4.215 +
   4.216 +	return ret_size;
   4.217 +}
   4.218 +
   4.219 +
   4.220 +/*
   4.221 + * Lower layer indicates its status (connected/disconnected)
   4.222 + */
   4.223 +void vtpm_vd_status(u8 vd_status)
   4.224 +{
   4.225 +	vtpms->vd_status = vd_status;
   4.226 +	if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
   4.227 +		vtpms->disconnect_time = jiffies;
   4.228 +	}
   4.229 +}
   4.230 +
   4.231 +/* =============================================================
   4.232 + * Interface with the generic TPM driver
   4.233 + * =============================================================
   4.234 + */
   4.235 +static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
   4.236 +{
   4.237 +	int rc = 0;
   4.238 +	unsigned long flags;
   4.239 +
   4.240 +	/*
   4.241 +	 * Check if the previous operation only queued the command
   4.242 +	 * In this case there won't be a response, so I just
   4.243 +	 * return from here and reset that flag. In any other
   4.244 +	 * case I should receive a response from the back-end.
   4.245 +	 */
   4.246 +	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
   4.247 +	if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
   4.248 +		vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
   4.249 +		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
   4.250 +		/*
   4.251 +		 * The first few commands (measurements) must be
   4.252 +		 * queued since it might not be possible to talk to the
   4.253 +		 * TPM, yet.
   4.254 +		 * Return a response of up to 30 '0's.
   4.255 +		 */
   4.256 +
   4.257 +		count = MIN(count, 30);
   4.258 +		memset(buf, 0x0, count);
   4.259 +		return count;
   4.260 +	}
   4.261 +	/*
   4.262 +	 * Check whether something is in the responselist and if
   4.263 +	 * there's nothing in the list wait for something to appear.
   4.264 +	 */
   4.265 +
   4.266 +	if (!vtpms->current_response) {
   4.267 +		spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
   4.268 +		interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
   4.269 +		                               1000);
   4.270 +		spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
   4.271 +	}
   4.272 +
   4.273 +	if (vtpms->current_response) {
   4.274 +		struct transmission *t = vtpms->current_response;
   4.275 +		vtpms->current_response = NULL;
   4.276 +		rc = MIN(count, t->response_len);
   4.277 +		memcpy(buf, t->response, rc);
   4.278 +		transmission_free(t);
   4.279 +	}
   4.280 +
   4.281 +	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
   4.282 +	return rc;
   4.283 +}
   4.284 +
   4.285 +static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
   4.286 +{
   4.287 +	int rc = 0;
   4.288 +	unsigned long flags;
   4.289 +	struct transmission *t = transmission_alloc();
   4.290 +
   4.291 +	if (!t)
   4.292 +		return -ENOMEM;
   4.293 +	/*
   4.294 +	 * If there's a current request, it must be the
   4.295 +	 * previous request that has timed out.
   4.296 +	 */
   4.297 +	spin_lock_irqsave(&vtpms->req_list_lock, flags);
   4.298 +	if (vtpms->current_request != NULL) {
   4.299 +		printk("WARNING: Sending although there is a request outstanding.\n"
   4.300 +		       "         Previous request must have timed out.\n");
   4.301 +		transmission_free(vtpms->current_request);
   4.302 +		vtpms->current_request = NULL;
   4.303 +	}
   4.304 +	spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
   4.305 +
   4.306 +	/*
   4.307 +	 * Queue the packet if the driver below is not
   4.308 +	 * ready, yet, or there is any packet already
   4.309 +	 * in the queue.
   4.310 +	 * If the driver below is ready, unqueue all
   4.311 +	 * packets first before sending our current
   4.312 +	 * packet.
   4.313 +	 * For each unqueued packet, except for the
   4.314 +	 * last (=current) packet, call the function
   4.315 +	 * tpm_xen_recv to wait for the response to come
   4.316 +	 * back.
   4.317 +	 */
   4.318 +	if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
   4.319 +		if (time_after(jiffies,
   4.320 +		               vtpms->disconnect_time + HZ * 10)) {
   4.321 +			rc = -ENOENT;
   4.322 +		} else {
   4.323 +			goto queue_it;
   4.324 +		}
   4.325 +	} else {
   4.326 +		/*
   4.327 +		 * Send all queued packets.
   4.328 +		 */
   4.329 +		if (_vtpm_send_queued(chip) == 0) {
   4.330 +
   4.331 +			vtpms->current_request = t;
   4.332 +
   4.333 +			rc = vtpm_vd_send(chip,
   4.334 +			                  vtpms->tpmvd->tpm_private,
   4.335 +			                  buf,
   4.336 +			                  count,
   4.337 +			                  t);
   4.338 +			/*
   4.339 +			 * The generic TPM driver will call
   4.340 +			 * the function to receive the response.
   4.341 +			 */
   4.342 +			if (rc < 0) {
   4.343 +				vtpms->current_request = NULL;
   4.344 +				goto queue_it;
   4.345 +			}
   4.346 +		} else {
   4.347 +queue_it:
   4.348 +			if (!transmission_set_req_buffer(t, buf, count)) {
   4.349 +				transmission_free(t);
   4.350 +				rc = -ENOMEM;
   4.351 +				goto exit;
   4.352 +			}
   4.353 +			/*
   4.354 +			 * An error occurred. Don't event try
   4.355 +			 * to send the current request. Just
   4.356 +			 * queue it.
   4.357 +			 */
   4.358 +			spin_lock_irqsave(&vtpms->req_list_lock, flags);
   4.359 +			vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
   4.360 +			list_add_tail(&t->next, &vtpms->queued_requests);
   4.361 +			spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
   4.362 +		}
   4.363 +	}
   4.364 +
   4.365 +exit:
   4.366 +	return rc;
   4.367 +}
   4.368 +
   4.369 +
   4.370 +/*
   4.371 + * Send all queued requests.
   4.372 + */
   4.373 +static int _vtpm_send_queued(struct tpm_chip *chip)
   4.374 +{
   4.375 +	int rc;
   4.376 +	int error = 0;
   4.377 +	long flags;
   4.378 +	unsigned char buffer[1];
   4.379 +
   4.380 +	spin_lock_irqsave(&vtpms->req_list_lock, flags);
   4.381 +
   4.382 +	while (!list_empty(&vtpms->queued_requests)) {
   4.383 +		/*
   4.384 +		 * Need to dequeue them.
   4.385 +		 * Read the result into a dummy buffer.
   4.386 +		 */
   4.387 +		struct transmission *qt = (struct transmission *)
   4.388 +		                          vtpms->queued_requests.next;
   4.389 +		list_del(&qt->next);
   4.390 +		vtpms->current_request = qt;
   4.391 +		spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
   4.392 +
   4.393 +		rc = vtpm_vd_send(chip,
   4.394 +		                  vtpms->tpmvd->tpm_private,
   4.395 +		                  qt->request,
   4.396 +		                  qt->request_len,
   4.397 +		                  qt);
   4.398 +
   4.399 +		if (rc < 0) {
   4.400 +			spin_lock_irqsave(&vtpms->req_list_lock, flags);
   4.401 +			if ((qt = vtpms->current_request) != NULL) {
   4.402 +				/*
   4.403 +				 * requeue it at the beginning
   4.404 +				 * of the list
   4.405 +				 */
   4.406 +				list_add(&qt->next,
   4.407 +				         &vtpms->queued_requests);
   4.408 +			}
   4.409 +			vtpms->current_request = NULL;
   4.410 +			error = 1;
   4.411 +			break;
   4.412 +		}
   4.413 +		/*
   4.414 +		 * After this point qt is not valid anymore!
   4.415 +		 * It is freed when the front-end is delivering
   4.416 +		 * the data by calling tpm_recv
   4.417 +		 */
   4.418 +		/*
   4.419 +		 * Receive response into provided dummy buffer
   4.420 +		 */
   4.421 +		rc = vtpm_recv(chip, buffer, sizeof(buffer));
   4.422 +		spin_lock_irqsave(&vtpms->req_list_lock, flags);
   4.423 +	}
   4.424 +
   4.425 +	spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
   4.426 +
   4.427 +	return error;
   4.428 +}
   4.429 +
   4.430 +static void vtpm_cancel(struct tpm_chip *chip)
   4.431 +{
   4.432 +	unsigned long flags;
   4.433 +	spin_lock_irqsave(&vtpms->resp_list_lock,flags);
   4.434 +
   4.435 +	vtpms->req_cancelled = vtpms->current_request;
   4.436 +
   4.437 +	spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
   4.438 +}
   4.439 +
   4.440 +static u8 vtpm_status(struct tpm_chip *chip)
   4.441 +{
   4.442 +	u8 rc = 0;
   4.443 +	unsigned long flags;
   4.444 +
   4.445 +	spin_lock_irqsave(&vtpms->resp_list_lock, flags);
   4.446 +	/*
   4.447 +	 * Data are available if:
   4.448 +	 *  - there's a current response
   4.449 +	 *  - the last packet was queued only (this is fake, but necessary to
   4.450 +	 *      get the generic TPM layer to call the receive function.)
   4.451 +	 */
   4.452 +	if (vtpms->current_response ||
   4.453 +	    0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
   4.454 +		rc = STATUS_DATA_AVAIL;
   4.455 +	}
   4.456 +	spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
   4.457 +	return rc;
   4.458 +}
   4.459 +
   4.460 +static struct file_operations vtpm_ops = {
   4.461 +	.owner = THIS_MODULE,
   4.462 +	.llseek = no_llseek,
   4.463 +	.open = tpm_open,
   4.464 +	.read = tpm_read,
   4.465 +	.write = tpm_write,
   4.466 +	.release = tpm_release,
   4.467 +};
   4.468 +
   4.469 +static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
   4.470 +static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
   4.471 +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
   4.472 +static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
   4.473 +
   4.474 +static struct attribute *vtpm_attrs[] = {
   4.475 +	&dev_attr_pubek.attr,
   4.476 +	&dev_attr_pcrs.attr,
   4.477 +	&dev_attr_caps.attr,
   4.478 +	&dev_attr_cancel.attr,
   4.479 +	NULL,
   4.480 +};
   4.481 +
   4.482 +static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
   4.483 +
   4.484 +static struct tpm_vendor_specific tpm_vtpm = {
   4.485 +	.recv = vtpm_recv,
   4.486 +	.send = vtpm_send,
   4.487 +	.cancel = vtpm_cancel,
   4.488 +	.status = vtpm_status,
   4.489 +	.req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
   4.490 +	.req_complete_val  = STATUS_DATA_AVAIL,
   4.491 +	.req_canceled = STATUS_READY,
   4.492 +	.base = 0,
   4.493 +	.attr_group = &vtpm_attr_grp,
   4.494 +	.miscdev = {
   4.495 +		.fops = &vtpm_ops,
   4.496 +	},
   4.497 +};
   4.498 +
   4.499 +static struct platform_device *pdev;
   4.500 +
   4.501 +int __init init_vtpm(struct tpm_virtual_device *tvd)
   4.502 +{
   4.503 +	int rc;
   4.504 +
   4.505 +	/* vtpms is global - only allow one user */
   4.506 +	if (vtpms)
   4.507 +		return -EBUSY;
   4.508 +
   4.509 +	vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
   4.510 +	if (!vtpms)
   4.511 +		return -ENOMEM;
   4.512 +
   4.513 +	vtpm_state_init(vtpms);
   4.514 +	vtpms->tpmvd = tvd;
   4.515 +
   4.516 +	pdev = platform_device_register_simple("tpm_vtpm", -1, NULL, 0);
   4.517 +	if (IS_ERR(pdev)) {
   4.518 +		rc = PTR_ERR(pdev);
   4.519 +		goto err_free_mem;
   4.520 +	}
   4.521 +
   4.522 +	if (tvd)
   4.523 +		tpm_vtpm.buffersize = tvd->max_tx_size;
   4.524 +
   4.525 +	if ((rc = tpm_register_hardware(&pdev->dev, &tpm_vtpm)) < 0) {
   4.526 +		goto err_unreg_pdev;
   4.527 +	}
   4.528 +
   4.529 +	return 0;
   4.530 +
   4.531 +err_unreg_pdev:
   4.532 +	platform_device_unregister(pdev);
   4.533 +err_free_mem:
   4.534 +	kfree(vtpms);
   4.535 +	vtpms = NULL;
   4.536 +
   4.537 +	return rc;
   4.538 +}
   4.539 +
   4.540 +void __exit cleanup_vtpm(void)
   4.541 +{
   4.542 +	struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
   4.543 +	if (chip) {
   4.544 +		tpm_remove_hardware(chip->dev);
   4.545 +		platform_device_unregister(pdev);
   4.546 +	}
   4.547 +	kfree(vtpms);
   4.548 +	vtpms = NULL;
   4.549 +}
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.h	Thu May 04 11:22:19 2006 +0100
     5.3 @@ -0,0 +1,38 @@
     5.4 +#ifndef TPM_VTPM_H
     5.5 +#define TPM_VTPM_H
     5.6 +
     5.7 +struct tpm_chip;
     5.8 +struct tpm_private;
     5.9 +
    5.10 +struct tpm_virtual_device {
    5.11 +	/*
    5.12 +	 * This field indicates the maximum size the driver can
    5.13 +	 * transfer in one chunk. It is filled in by the front-end
    5.14 +	 * driver and should be propagated to the generic tpm driver
    5.15 +	 * for allocation of buffers.
    5.16 +	 */
    5.17 +	unsigned int max_tx_size;
    5.18 +	/*
    5.19 +	 * The following is a private structure of the underlying
    5.20 +	 * driver. It is passed as parameter in the send function.
    5.21 +	 */
    5.22 +	struct tpm_private *tpm_private;
    5.23 +};
    5.24 +
    5.25 +enum vdev_status {
    5.26 +	TPM_VD_STATUS_DISCONNECTED = 0x0,
    5.27 +	TPM_VD_STATUS_CONNECTED = 0x1
    5.28 +};
    5.29 +
    5.30 +/* this function is called from tpm_vtpm.c */
    5.31 +int vtpm_vd_send(struct tpm_chip *tc,
    5.32 +                 struct tpm_private * tp,
    5.33 +                 const u8 * buf, size_t count, void *ptr);
    5.34 +
    5.35 +/* these functions are offered by tpm_vtpm.c */
    5.36 +int __init init_vtpm(struct tpm_virtual_device *);
    5.37 +void __exit cleanup_vtpm(void);
    5.38 +int vtpm_vd_recv(const unsigned char *buffer, size_t count, const void *ptr);
    5.39 +void vtpm_vd_status(u8 status);
    5.40 +
    5.41 +#endif
     6.1 --- a/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c	Thu May 04 11:19:27 2006 +0100
     6.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c	Thu May 04 11:22:19 2006 +0100
     6.3 @@ -1,536 +1,767 @@
     6.4  /*
     6.5 - * Copyright (C) 2004 IBM Corporation
     6.6 + * Copyright (c) 2005, IBM Corporation
     6.7   *
     6.8 - * Authors:
     6.9 - * Leendert van Doorn <leendert@watson.ibm.com>
    6.10 - * Dave Safford <safford@watson.ibm.com>
    6.11 - * Reiner Sailer <sailer@watson.ibm.com>
    6.12 - * Kylene Hall <kjhall@us.ibm.com>
    6.13 - * Stefan Berger <stefanb@us.ibm.com>
    6.14 + * Author: Stefan Berger, stefanb@us.ibm.com
    6.15 + * Grant table support: Mahadevan Gomathisankaran
    6.16   *
    6.17 - * Maintained by: <tpmdd_devel@lists.sourceforge.net>
    6.18 + * This code has been derived from drivers/xen/netfront/netfront.c
    6.19   *
    6.20 - * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
    6.21 - * Specifications at www.trustedcomputinggroup.org
    6.22 + * Copyright (c) 2002-2004, K A Fraser
    6.23   *
    6.24   * This program is free software; you can redistribute it and/or
    6.25 - * modify it under the terms of the GNU General Public License as
    6.26 - * published by the Free Software Foundation, version 2 of the
    6.27 - * License.
    6.28 + * modify it under the terms of the GNU General Public License version 2
    6.29 + * as published by the Free Software Foundation; or, when distributed
    6.30 + * separately from the Linux kernel or incorporated into other
    6.31 + * software packages, subject to the following license:
    6.32   *
    6.33 + * Permission is hereby granted, free of charge, to any person obtaining a copy
    6.34 + * of this source file (the "Software"), to deal in the Software without
    6.35 + * restriction, including without limitation the rights to use, copy, modify,
    6.36 + * merge, publish, distribute, sublicense, and/or sell copies of the Software,
    6.37 + * and to permit persons to whom the Software is furnished to do so, subject to
    6.38 + * the following conditions:
    6.39 + *
    6.40 + * The above copyright notice and this permission notice shall be included in
    6.41 + * all copies or substantial portions of the Software.
    6.42 + *
    6.43 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    6.44 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    6.45 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    6.46 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    6.47 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
    6.48 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
    6.49 + * IN THE SOFTWARE.
    6.50   */
    6.51  
    6.52 -#include <asm/uaccess.h>
    6.53 -#include <linux/list.h>
    6.54 -#include <xen/tpmfe.h>
    6.55 -#include <linux/device.h>
    6.56 +#include <linux/errno.h>
    6.57  #include <linux/interrupt.h>
    6.58 -#include <linux/platform_device.h>
    6.59 -#include "tpm.h"
    6.60 +#include <linux/mutex.h>
    6.61 +#include <asm/uaccess.h>
    6.62 +#include <xen/evtchn.h>
    6.63 +#include <xen/interface/grant_table.h>
    6.64 +#include <xen/interface/io/tpmif.h>
    6.65 +#include <xen/xenbus.h>
    6.66 +#include "tpm_vtpm.h"
    6.67 +
    6.68 +#undef DEBUG
    6.69  
    6.70 -/* read status bits */
    6.71 -enum {
    6.72 -	STATUS_BUSY = 0x01,
    6.73 -	STATUS_DATA_AVAIL = 0x02,
    6.74 -	STATUS_READY = 0x04
    6.75 +/* local structures */
    6.76 +struct tpm_private {
    6.77 +	tpmif_tx_interface_t *tx;
    6.78 +	atomic_t refcnt;
    6.79 +	unsigned int evtchn;
    6.80 +	unsigned int irq;
    6.81 +	u8 is_connected;
    6.82 +	u8 is_suspended;
    6.83 +
    6.84 +	spinlock_t tx_lock;
    6.85 +
    6.86 +	struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
    6.87 +
    6.88 +	atomic_t tx_busy;
    6.89 +	void *tx_remember;
    6.90 +	domid_t backend_id;
    6.91 +	wait_queue_head_t wait_q;
    6.92 +
    6.93 +	struct xenbus_device *dev;
    6.94 +	int ring_ref;
    6.95  };
    6.96  
    6.97 -#define MIN(x,y)  ((x) < (y)) ? (x) : (y)
    6.98 -
    6.99 -struct transmission {
   6.100 -	struct list_head next;
   6.101 -	unsigned char *request;
   6.102 -	unsigned int request_len;
   6.103 -	unsigned char *rcv_buffer;
   6.104 -	unsigned int  buffersize;
   6.105 -	unsigned int flags;
   6.106 -};
   6.107 -
   6.108 -enum {
   6.109 -	TRANSMISSION_FLAG_WAS_QUEUED = 0x1
   6.110 +struct tx_buffer {
   6.111 +	unsigned int size;	// available space in data
   6.112 +	unsigned int len;	// used space in data
   6.113 +	unsigned char *data;	// pointer to a page
   6.114  };
   6.115  
   6.116 -struct data_exchange {
   6.117 -	struct transmission *current_request;
   6.118 -	spinlock_t           req_list_lock;
   6.119 -	wait_queue_head_t    req_wait_queue;
   6.120 -
   6.121 -	struct list_head     queued_requests;
   6.122 -
   6.123 -	struct transmission *current_response;
   6.124 -	spinlock_t           resp_list_lock;
   6.125 -	wait_queue_head_t    resp_wait_queue;     // processes waiting for responses
   6.126 -
   6.127 -	struct transmission *req_cancelled;       // if a cancellation was encounterd
   6.128 -
   6.129 -	unsigned int         fe_status;
   6.130 -	unsigned int         flags;
   6.131 -};
   6.132 -
   6.133 -enum {
   6.134 -	DATAEX_FLAG_QUEUED_ONLY = 0x1
   6.135 -};
   6.136 -
   6.137 -static struct data_exchange dataex;
   6.138 -
   6.139 -static unsigned long disconnect_time;
   6.140 -
   6.141 -static struct tpmfe_device tpmfe;
   6.142 -
   6.143 -/* local function prototypes */
   6.144 -static void __exit cleanup_xen(void);
   6.145 -
   6.146  
   6.147 -/* =============================================================
   6.148 - * Some utility functions
   6.149 - * =============================================================
   6.150 - */
   6.151 -static inline struct transmission *
   6.152 -transmission_alloc(void)
   6.153 -{
   6.154 -	return kzalloc(sizeof(struct transmission), GFP_KERNEL);
   6.155 -}
   6.156 +/* locally visible variables */
   6.157 +static grant_ref_t gref_head;
   6.158 +static struct tpm_private *my_priv;
   6.159  
   6.160 -static inline unsigned char *
   6.161 -transmission_set_buffer(struct transmission *t,
   6.162 -                        unsigned char *buffer, unsigned int len)
   6.163 +/* local function prototypes */
   6.164 +static irqreturn_t tpmif_int(int irq,
   6.165 +                             void *tpm_priv,
   6.166 +                             struct pt_regs *ptregs);
   6.167 +static void tpmif_rx_action(unsigned long unused);
   6.168 +static int tpmif_connect(struct xenbus_device *dev,
   6.169 +                         struct tpm_private *tp,
   6.170 +                         domid_t domid);
   6.171 +static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
   6.172 +static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
   6.173 +static void tpmif_free_tx_buffers(struct tpm_private *tp);
   6.174 +static void tpmif_set_connected_state(struct tpm_private *tp,
   6.175 +                                      u8 newstate);
   6.176 +static int tpm_xmit(struct tpm_private *tp,
   6.177 +                    const u8 * buf, size_t count, int userbuffer,
   6.178 +                    void *remember);
   6.179 +static void destroy_tpmring(struct tpm_private *tp);
   6.180 +
   6.181 +#define DPRINTK(fmt, args...) \
   6.182 +    pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
   6.183 +#define IPRINTK(fmt, args...) \
   6.184 +    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
   6.185 +#define WPRINTK(fmt, args...) \
   6.186 +    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
   6.187 +
   6.188 +#define GRANT_INVALID_REF	0
   6.189 +
   6.190 +
   6.191 +static inline int
   6.192 +tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
   6.193 +               int isuserbuffer)
   6.194  {
   6.195 -	kfree(t->request);
   6.196 -	t->request = kmalloc(len, GFP_KERNEL);
   6.197 -	if (t->request) {
   6.198 -		memcpy(t->request,
   6.199 -		       buffer,
   6.200 -		       len);
   6.201 -		t->request_len = len;
   6.202 +	int copied = len;
   6.203 +
   6.204 +	if (len > txb->size) {
   6.205 +		copied = txb->size;
   6.206  	}
   6.207 -	return t->request;
   6.208 -}
   6.209 -
   6.210 -static inline void
   6.211 -transmission_free(struct transmission *t)
   6.212 -{
   6.213 -	kfree(t->request);
   6.214 -	kfree(t->rcv_buffer);
   6.215 -	kfree(t);
   6.216 +	if (isuserbuffer) {
   6.217 +		if (copy_from_user(txb->data, src, copied))
   6.218 +			return -EFAULT;
   6.219 +	} else {
   6.220 +		memcpy(txb->data, src, copied);
   6.221 +	}
   6.222 +	txb->len = len;
   6.223 +	return copied;
   6.224  }
   6.225  
   6.226 -/* =============================================================
   6.227 - * Interface with the TPM shared memory driver for XEN
   6.228 - * =============================================================
   6.229 - */
   6.230 -static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
   6.231 +static inline struct tx_buffer *tx_buffer_alloc(void)
   6.232  {
   6.233 -	int ret_size = 0;
   6.234 -	struct transmission *t;
   6.235 +	struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
   6.236 +					GFP_KERNEL);
   6.237  
   6.238 -	/*
   6.239 -	 * The list with requests must contain one request
   6.240 -	 * only and the element there must be the one that
   6.241 -	 * was passed to me from the front-end.
   6.242 -	 */
   6.243 -	if (dataex.current_request != ptr) {
   6.244 -		printk("WARNING: The request pointer is different than the "
   6.245 -		       "pointer the shared memory driver returned to me. "
   6.246 -		       "%p != %p\n",
   6.247 -		       dataex.current_request, ptr);
   6.248 -	}
   6.249 -
   6.250 -	/*
   6.251 -	 * If the request has been cancelled, just quit here
   6.252 -	 */
   6.253 -	if (dataex.req_cancelled == (struct transmission *)ptr) {
   6.254 -		if (dataex.current_request == dataex.req_cancelled) {
   6.255 -			dataex.current_request = NULL;
   6.256 +	if (txb) {
   6.257 +		txb->len = 0;
   6.258 +		txb->size = PAGE_SIZE;
   6.259 +		txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
   6.260 +		if (txb->data == NULL) {
   6.261 +			kfree(txb);
   6.262 +			txb = NULL;
   6.263  		}
   6.264 -		transmission_free(dataex.req_cancelled);
   6.265 -		dataex.req_cancelled = NULL;
   6.266 -		return 0;
   6.267  	}
   6.268 -
   6.269 -	if (NULL != (t = dataex.current_request)) {
   6.270 -		transmission_free(t);
   6.271 -		dataex.current_request = NULL;
   6.272 -	}
   6.273 -
   6.274 -	t = transmission_alloc();
   6.275 -	if (t) {
   6.276 -		unsigned long flags;
   6.277 -		t->rcv_buffer = kmalloc(count, GFP_KERNEL);
   6.278 -		if (! t->rcv_buffer) {
   6.279 -			transmission_free(t);
   6.280 -			return -ENOMEM;
   6.281 -		}
   6.282 -		t->buffersize = count;
   6.283 -		memcpy(t->rcv_buffer, buffer, count);
   6.284 -		ret_size = count;
   6.285 -
   6.286 -		spin_lock_irqsave(&dataex.resp_list_lock ,flags);
   6.287 -		dataex.current_response = t;
   6.288 -		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
   6.289 -		wake_up_interruptible(&dataex.resp_wait_queue);
   6.290 -	}
   6.291 -	return ret_size;
   6.292 +	return txb;
   6.293  }
   6.294  
   6.295  
   6.296 -static void tpm_fe_status(unsigned int flags)
   6.297 +static inline void tx_buffer_free(struct tx_buffer *txb)
   6.298  {
   6.299 -	dataex.fe_status = flags;
   6.300 -	if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
   6.301 -		disconnect_time = jiffies;
   6.302 +	if (txb) {
   6.303 +		free_page((long)txb->data);
   6.304 +		kfree(txb);
   6.305 +	}
   6.306 +}
   6.307 +
   6.308 +/**************************************************************
   6.309 + Utility function for the tpm_private structure
   6.310 +**************************************************************/
   6.311 +static inline void tpm_private_init(struct tpm_private *tp)
   6.312 +{
   6.313 +	spin_lock_init(&tp->tx_lock);
   6.314 +	init_waitqueue_head(&tp->wait_q);
   6.315 +	atomic_set(&tp->refcnt, 1);
   6.316 +}
   6.317 +
   6.318 +static inline void tpm_private_put(void)
   6.319 +{
   6.320 +	if ( atomic_dec_and_test(&my_priv->refcnt)) {
   6.321 +		tpmif_free_tx_buffers(my_priv);
   6.322 +		kfree(my_priv);
   6.323 +		my_priv = NULL;
   6.324  	}
   6.325  }
   6.326  
   6.327 -/* =============================================================
   6.328 - * Interface with the generic TPM driver
   6.329 - * =============================================================
   6.330 - */
   6.331 -static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
   6.332 -{
   6.333 -	unsigned long flags;
   6.334 -	int rc = 0;
   6.335 -
   6.336 -	spin_lock_irqsave(&dataex.resp_list_lock, flags);
   6.337 -	/*
   6.338 -	 * Check if the previous operation only queued the command
   6.339 -	 * In this case there won't be a response, so I just
   6.340 -	 * return from here and reset that flag. In any other
   6.341 -	 * case I should receive a response from the back-end.
   6.342 -	 */
   6.343 -	if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
   6.344 -		dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
   6.345 -		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
   6.346 -		/*
   6.347 -		 * a little hack here. The first few measurements
   6.348 -		 * are queued since there's no way to talk to the
   6.349 -		 * TPM yet (due to slowness of the control channel)
   6.350 -		 * So we just make IMA happy by giving it 30 NULL
   6.351 -		 * bytes back where the most important part is
   6.352 -		 * that the result code is '0'.
   6.353 -		 */
   6.354 -
   6.355 -		count = MIN(count, 30);
   6.356 -		memset(buf, 0x0, count);
   6.357 -		return count;
   6.358 -	}
   6.359 -	/*
   6.360 -	 * Check whether something is in the responselist and if
   6.361 -	 * there's nothing in the list wait for something to appear.
   6.362 -	 */
   6.363 -
   6.364 -	if (NULL == dataex.current_response) {
   6.365 -		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
   6.366 -		interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
   6.367 -		                               1000);
   6.368 -		spin_lock_irqsave(&dataex.resp_list_lock ,flags);
   6.369 -	}
   6.370 -
   6.371 -	if (NULL != dataex.current_response) {
   6.372 -		struct transmission *t = dataex.current_response;
   6.373 -		dataex.current_response = NULL;
   6.374 -		rc = MIN(count, t->buffersize);
   6.375 -		memcpy(buf, t->rcv_buffer, rc);
   6.376 -		transmission_free(t);
   6.377 -	}
   6.378 -
   6.379 -	spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
   6.380 -	return rc;
   6.381 -}
   6.382 -
   6.383 -static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
   6.384 +static struct tpm_private *tpm_private_get(void)
   6.385  {
   6.386 -	/*
   6.387 -	 * We simply pass the packet onto the XEN shared
   6.388 -	 * memory driver.
   6.389 -	 */
   6.390 -	unsigned long flags;
   6.391 -	int rc;
   6.392 -	struct transmission *t = transmission_alloc();
   6.393 -
   6.394 -	spin_lock_irqsave(&dataex.req_list_lock, flags);
   6.395 -	/*
   6.396 -	 * If there's a current request, it must be the
   6.397 -	 * previous request that has timed out.
   6.398 -	 */
   6.399 -	if (dataex.current_request != NULL) {
   6.400 -		printk("WARNING: Sending although there is a request outstanding.\n"
   6.401 -		       "         Previous request must have timed out.\n");
   6.402 -		transmission_free(dataex.current_request);
   6.403 -		dataex.current_request = NULL;
   6.404 -	}
   6.405 -
   6.406 -	if (t != NULL) {
   6.407 -		unsigned int error = 0;
   6.408 -		/*
   6.409 -		 * Queue the packet if the driver below is not
   6.410 -		 * ready, yet, or there is any packet already
   6.411 -		 * in the queue.
   6.412 -		 * If the driver below is ready, unqueue all
   6.413 -		 * packets first before sending our current
   6.414 -		 * packet.
   6.415 -		 * For each unqueued packet, except for the
   6.416 -		 * last (=current) packet, call the function
   6.417 -		 * tpm_xen_recv to wait for the response to come
   6.418 -		 * back.
   6.419 -		 */
   6.420 -		if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
   6.421 -			if (time_after(jiffies, disconnect_time + HZ * 10)) {
   6.422 -				rc = -ENOENT;
   6.423 -			} else {
   6.424 -				/*
   6.425 -				 * copy the request into the buffer
   6.426 -				 */
   6.427 -				if (transmission_set_buffer(t, buf, count)
   6.428 -				    == NULL) {
   6.429 -					transmission_free(t);
   6.430 -					rc = -ENOMEM;
   6.431 -					goto exit;
   6.432 -				}
   6.433 -				dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
   6.434 -				list_add_tail(&t->next, &dataex.queued_requests);
   6.435 -				rc = 0;
   6.436 -			}
   6.437 -		} else {
   6.438 -			/*
   6.439 -			 * Check whether there are any packets in the queue
   6.440 -			 */
   6.441 -			while (!list_empty(&dataex.queued_requests)) {
   6.442 -				/*
   6.443 -				 * Need to dequeue them.
   6.444 -				 * Read the result into a dummy buffer.
   6.445 -				 */
   6.446 -				unsigned char buffer[1];
   6.447 -				struct transmission *qt = (struct transmission *) dataex.queued_requests.next;
   6.448 -				list_del(&qt->next);
   6.449 -				dataex.current_request = qt;
   6.450 -				spin_unlock_irqrestore(&dataex.req_list_lock,
   6.451 -				                       flags);
   6.452 -
   6.453 -				rc = tpm_fe_send(tpmfe.tpm_private,
   6.454 -				                 qt->request,
   6.455 -				                 qt->request_len,
   6.456 -				                 qt);
   6.457 -
   6.458 -				if (rc < 0) {
   6.459 -					spin_lock_irqsave(&dataex.req_list_lock, flags);
   6.460 -					if ((qt = dataex.current_request) != NULL) {
   6.461 -						/*
   6.462 -						 * requeue it at the beginning
   6.463 -						 * of the list
   6.464 -						 */
   6.465 -						list_add(&qt->next,
   6.466 -						         &dataex.queued_requests);
   6.467 -					}
   6.468 -					dataex.current_request = NULL;
   6.469 -					error = 1;
   6.470 -					break;
   6.471 -				}
   6.472 -				/*
   6.473 -				 * After this point qt is not valid anymore!
   6.474 -				 * It is freed when the front-end is delivering the data
   6.475 -				 * by calling tpm_recv
   6.476 -				 */
   6.477 -
   6.478 -				/*
   6.479 -				 * Try to receive the response now into the provided dummy
   6.480 -				 * buffer (I don't really care about this response since
   6.481 -				 * there is no receiver anymore for this response)
   6.482 -				 */
   6.483 -				rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
   6.484 -
   6.485 -				spin_lock_irqsave(&dataex.req_list_lock, flags);
   6.486 -			}
   6.487 -
   6.488 -			if (error == 0) {
   6.489 -				/*
   6.490 -				 * Finally, send the current request.
   6.491 -				 */
   6.492 -				dataex.current_request = t;
   6.493 -				/*
   6.494 -				 * Call the shared memory driver
   6.495 -				 * Pass to it the buffer with the request, the
   6.496 -				 * amount of bytes in the request and
   6.497 -				 * a void * pointer (here: transmission structure)
   6.498 -				 */
   6.499 -				rc = tpm_fe_send(tpmfe.tpm_private,
   6.500 -				                 buf, count, t);
   6.501 -				/*
   6.502 -				 * The generic TPM driver will call
   6.503 -				 * the function to receive the response.
   6.504 -				 */
   6.505 -				if (rc < 0) {
   6.506 -					dataex.current_request = NULL;
   6.507 -					goto queue_it;
   6.508 -				}
   6.509 -			} else {
   6.510 -queue_it:
   6.511 -				if (transmission_set_buffer(t, buf, count) == NULL) {
   6.512 -					transmission_free(t);
   6.513 -					rc = -ENOMEM;
   6.514 -					goto exit;
   6.515 -				}
   6.516 -				/*
   6.517 -				 * An error occurred. Don't event try
   6.518 -				 * to send the current request. Just
   6.519 -				 * queue it.
   6.520 -				 */
   6.521 -				dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
   6.522 -				list_add_tail(&t->next,
   6.523 -				              &dataex.queued_requests);
   6.524 -				rc = 0;
   6.525 +	int err;
   6.526 +	if (!my_priv) {
   6.527 +		my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
   6.528 +		if (my_priv) {
   6.529 +			tpm_private_init(my_priv);
   6.530 +			err = tpmif_allocate_tx_buffers(my_priv);
   6.531 +			if (err < 0) {
   6.532 +				tpm_private_put();
   6.533  			}
   6.534  		}
   6.535  	} else {
   6.536 -		rc = -ENOMEM;
   6.537 +		atomic_inc(&my_priv->refcnt);
   6.538  	}
   6.539 +	return my_priv;
   6.540 +}
   6.541 +
   6.542 +/**************************************************************
   6.543 +
   6.544 + The interface to let the tpm plugin register its callback
   6.545 + function and send data to another partition using this module
   6.546 +
   6.547 +**************************************************************/
   6.548  
   6.549 -exit:
   6.550 -	spin_unlock_irqrestore(&dataex.req_list_lock, flags);
   6.551 -	return rc;
   6.552 +static DEFINE_MUTEX(suspend_lock);
   6.553 +/*
   6.554 + * Send data via this module by calling this function
   6.555 + */
   6.556 +int vtpm_vd_send(struct tpm_chip *chip,
   6.557 +                 struct tpm_private *tp,
   6.558 +                 const u8 * buf, size_t count, void *ptr)
   6.559 +{
   6.560 +	int sent;
   6.561 +
   6.562 +	mutex_lock(&suspend_lock);
   6.563 +	sent = tpm_xmit(tp, buf, count, 0, ptr);
   6.564 +	mutex_unlock(&suspend_lock);
   6.565 +
   6.566 +	return sent;
   6.567  }
   6.568  
   6.569 -static void tpm_xen_cancel(struct tpm_chip *chip)
   6.570 +/**************************************************************
   6.571 + XENBUS support code
   6.572 +**************************************************************/
   6.573 +
   6.574 +static int setup_tpmring(struct xenbus_device *dev,
   6.575 +                         struct tpm_private *tp)
   6.576  {
   6.577 -	unsigned long flags;
   6.578 -	spin_lock_irqsave(&dataex.resp_list_lock,flags);
   6.579 +	tpmif_tx_interface_t *sring;
   6.580 +	int err;
   6.581 +
   6.582 +	tp->ring_ref = GRANT_INVALID_REF;
   6.583 +
   6.584 +	sring = (void *)__get_free_page(GFP_KERNEL);
   6.585 +	if (!sring) {
   6.586 +		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
   6.587 +		return -ENOMEM;
   6.588 +	}
   6.589 +	tp->tx = sring;
   6.590 +
   6.591 +	err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
   6.592 +	if (err < 0) {
   6.593 +		free_page((unsigned long)sring);
   6.594 +		tp->tx = NULL;
   6.595 +		xenbus_dev_fatal(dev, err, "allocating grant reference");
   6.596 +		goto fail;
   6.597 +	}
   6.598 +	tp->ring_ref = err;
   6.599  
   6.600 -	dataex.req_cancelled = dataex.current_request;
   6.601 +	err = tpmif_connect(dev, tp, dev->otherend_id);
   6.602 +	if (err)
   6.603 +		goto fail;
   6.604 +
   6.605 +	return 0;
   6.606 +fail:
   6.607 +	destroy_tpmring(tp);
   6.608 +	return err;
   6.609 +}
   6.610 +
   6.611  
   6.612 -	spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
   6.613 +static void destroy_tpmring(struct tpm_private *tp)
   6.614 +{
   6.615 +	tpmif_set_connected_state(tp, 0);
   6.616 +
   6.617 +	if (tp->ring_ref != GRANT_INVALID_REF) {
   6.618 +		gnttab_end_foreign_access(tp->ring_ref, 0,
   6.619 +					  (unsigned long)tp->tx);
   6.620 +		tp->ring_ref = GRANT_INVALID_REF;
   6.621 +		tp->tx = NULL;
   6.622 +	}
   6.623 +
   6.624 +	if (tp->irq)
   6.625 +		unbind_from_irqhandler(tp->irq, tp);
   6.626 +
   6.627 +	tp->evtchn = tp->irq = 0;
   6.628  }
   6.629  
   6.630 -static u8 tpm_xen_status(struct tpm_chip *chip)
   6.631 +
   6.632 +static int talk_to_backend(struct xenbus_device *dev,
   6.633 +                           struct tpm_private *tp)
   6.634 +{
   6.635 +	const char *message = NULL;
   6.636 +	int err;
   6.637 +	xenbus_transaction_t xbt;
   6.638 +
   6.639 +	err = setup_tpmring(dev, tp);
   6.640 +	if (err) {
   6.641 +		xenbus_dev_fatal(dev, err, "setting up ring");
   6.642 +		goto out;
   6.643 +	}
   6.644 +
   6.645 +again:
   6.646 +	err = xenbus_transaction_start(&xbt);
   6.647 +	if (err) {
   6.648 +		xenbus_dev_fatal(dev, err, "starting transaction");
   6.649 +		goto destroy_tpmring;
   6.650 +	}
   6.651 +
   6.652 +	err = xenbus_printf(xbt, dev->nodename,
   6.653 +	                    "ring-ref","%u", tp->ring_ref);
   6.654 +	if (err) {
   6.655 +		message = "writing ring-ref";
   6.656 +		goto abort_transaction;
   6.657 +	}
   6.658 +
   6.659 +	err = xenbus_printf(xbt, dev->nodename,
   6.660 +			    "event-channel", "%u", tp->evtchn);
   6.661 +	if (err) {
   6.662 +		message = "writing event-channel";
   6.663 +		goto abort_transaction;
   6.664 +	}
   6.665 +
   6.666 +	err = xenbus_transaction_end(xbt, 0);
   6.667 +	if (err == -EAGAIN)
   6.668 +		goto again;
   6.669 +	if (err) {
   6.670 +		xenbus_dev_fatal(dev, err, "completing transaction");
   6.671 +		goto destroy_tpmring;
   6.672 +	}
   6.673 +
   6.674 +	xenbus_switch_state(dev, XenbusStateConnected);
   6.675 +
   6.676 +	return 0;
   6.677 +
   6.678 +abort_transaction:
   6.679 +	xenbus_transaction_end(xbt, 1);
   6.680 +	if (message)
   6.681 +		xenbus_dev_error(dev, err, "%s", message);
   6.682 +destroy_tpmring:
   6.683 +	destroy_tpmring(tp);
   6.684 +out:
   6.685 +	return err;
   6.686 +}
   6.687 +
   6.688 +/**
   6.689 + * Callback received when the backend's state changes.
   6.690 + */
   6.691 +static void backend_changed(struct xenbus_device *dev,
   6.692 +			    XenbusState backend_state)
   6.693  {
   6.694 -	unsigned long flags;
   6.695 -	u8 rc = 0;
   6.696 -	spin_lock_irqsave(&dataex.resp_list_lock, flags);
   6.697 -	/*
   6.698 -	 * Data are available if:
   6.699 -	 *  - there's a current response
   6.700 -	 *  - the last packet was queued only (this is fake, but necessary to
   6.701 -	 *      get the generic TPM layer to call the receive function.)
   6.702 -	 */
   6.703 -	if (NULL != dataex.current_response ||
   6.704 -	    0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
   6.705 -		rc = STATUS_DATA_AVAIL;
   6.706 +	struct tpm_private *tp = dev->data;
   6.707 +	DPRINTK("\n");
   6.708 +
   6.709 +	switch (backend_state) {
   6.710 +	case XenbusStateInitialising:
   6.711 +	case XenbusStateInitWait:
   6.712 +	case XenbusStateInitialised:
   6.713 +	case XenbusStateUnknown:
   6.714 +		break;
   6.715 +
   6.716 +	case XenbusStateConnected:
   6.717 +		tpmif_set_connected_state(tp, 1);
   6.718 +		break;
   6.719 +
   6.720 +	case XenbusStateClosing:
   6.721 +		tpmif_set_connected_state(tp, 0);
   6.722 +		break;
   6.723 +
   6.724 +	case XenbusStateClosed:
   6.725 +		if (tp->is_suspended == 0) {
   6.726 +			device_unregister(&dev->dev);
   6.727 +		}
   6.728 +		xenbus_switch_state(dev, XenbusStateClosed);
   6.729 +		break;
   6.730 +	}
   6.731 +}
   6.732 +
   6.733 +
   6.734 +static int tpmfront_probe(struct xenbus_device *dev,
   6.735 +                          const struct xenbus_device_id *id)
   6.736 +{
   6.737 +	int err;
   6.738 +	int handle;
   6.739 +	struct tpm_private *tp = tpm_private_get();
   6.740 +
   6.741 +	if (!tp)
   6.742 +		return -ENOMEM;
   6.743 +
   6.744 +	err = xenbus_scanf(XBT_NULL, dev->nodename,
   6.745 +	                   "handle", "%i", &handle);
   6.746 +	if (XENBUS_EXIST_ERR(err))
   6.747 +		return err;
   6.748 +
   6.749 +	if (err < 0) {
   6.750 +		xenbus_dev_fatal(dev,err,"reading virtual-device");
   6.751 +		return err;
   6.752  	}
   6.753 -	spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
   6.754 -	return rc;
   6.755 +
   6.756 +	tp->dev = dev;
   6.757 +	dev->data = tp;
   6.758 +
   6.759 +	err = talk_to_backend(dev, tp);
   6.760 +	if (err) {
   6.761 +		tpm_private_put();
   6.762 +		dev->data = NULL;
   6.763 +		return err;
   6.764 +	}
   6.765 +	return 0;
   6.766 +}
   6.767 +
   6.768 +
   6.769 +static int tpmfront_remove(struct xenbus_device *dev)
   6.770 +{
   6.771 +	struct tpm_private *tp = (struct tpm_private *)dev->data;
   6.772 +	destroy_tpmring(tp);
   6.773 +	return 0;
   6.774 +}
   6.775 +
   6.776 +static int tpmfront_suspend(struct xenbus_device *dev)
   6.777 +{
   6.778 +	struct tpm_private *tp = (struct tpm_private *)dev->data;
   6.779 +	u32 ctr;
   6.780 +
   6.781 +	/* lock, so no app can send */
   6.782 +	mutex_lock(&suspend_lock);
   6.783 +	tp->is_suspended = 1;
   6.784 +
   6.785 +	for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
   6.786 +		if ((ctr % 10) == 0)
   6.787 +			printk("TPM-FE [INFO]: Waiting for outstanding request.\n");
   6.788 +		/*
   6.789 +		 * Wait for a request to be responded to.
   6.790 +		 */
   6.791 +		interruptible_sleep_on_timeout(&tp->wait_q, 100);
   6.792 +	}
   6.793 +	xenbus_switch_state(dev, XenbusStateClosed);
   6.794 +
   6.795 +	if (atomic_read(&tp->tx_busy)) {
   6.796 +		/*
   6.797 +		 * A temporary work-around.
   6.798 +		 */
   6.799 +		printk("TPM-FE [WARNING]: Resetting busy flag.");
   6.800 +		atomic_set(&tp->tx_busy, 0);
   6.801 +	}
   6.802 +
   6.803 +	return 0;
   6.804 +}
   6.805 +
   6.806 +static int tpmfront_resume(struct xenbus_device *dev)
   6.807 +{
   6.808 +	struct tpm_private *tp = (struct tpm_private *)dev->data;
   6.809 +	destroy_tpmring(tp);
   6.810 +	return talk_to_backend(dev, tp);
   6.811  }
   6.812  
   6.813 -static struct file_operations tpm_xen_ops = {
   6.814 -	.owner = THIS_MODULE,
   6.815 -	.llseek = no_llseek,
   6.816 -	.open = tpm_open,
   6.817 -	.read = tpm_read,
   6.818 -	.write = tpm_write,
   6.819 -	.release = tpm_release,
   6.820 +static int tpmif_connect(struct xenbus_device *dev,
   6.821 +                         struct tpm_private *tp,
   6.822 +                         domid_t domid)
   6.823 +{
   6.824 +	int err;
   6.825 +
   6.826 +	tp->backend_id = domid;
   6.827 +
   6.828 +	err = xenbus_alloc_evtchn(dev, &tp->evtchn);
   6.829 +	if (err)
   6.830 +		return err;
   6.831 +
   6.832 +	err = bind_evtchn_to_irqhandler(tp->evtchn,
   6.833 +					tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
   6.834 +					tp);
   6.835 +	if (err <= 0) {
   6.836 +		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
   6.837 +		return err;
   6.838 +	}
   6.839 +
   6.840 +	tp->irq = err;
   6.841 +	return 0;
   6.842 +}
   6.843 +
   6.844 +static struct xenbus_device_id tpmfront_ids[] = {
   6.845 +	{ "vtpm" },
   6.846 +	{ "" }
   6.847  };
   6.848  
   6.849 -static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
   6.850 -static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
   6.851 -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
   6.852 -static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
   6.853 -
   6.854 -static struct attribute* xen_attrs[] = {
   6.855 -	&dev_attr_pubek.attr,
   6.856 -	&dev_attr_pcrs.attr,
   6.857 -	&dev_attr_caps.attr,
   6.858 -	&dev_attr_cancel.attr,
   6.859 -	NULL,
   6.860 +static struct xenbus_driver tpmfront = {
   6.861 +	.name = "vtpm",
   6.862 +	.owner = THIS_MODULE,
   6.863 +	.ids = tpmfront_ids,
   6.864 +	.probe = tpmfront_probe,
   6.865 +	.remove =  tpmfront_remove,
   6.866 +	.resume = tpmfront_resume,
   6.867 +	.otherend_changed = backend_changed,
   6.868 +	.suspend = tpmfront_suspend,
   6.869  };
   6.870  
   6.871 -static struct attribute_group xen_attr_grp = { .attrs = xen_attrs };
   6.872 +static void __init init_tpm_xenbus(void)
   6.873 +{
   6.874 +	xenbus_register_frontend(&tpmfront);
   6.875 +}
   6.876 +
   6.877 +static void __exit exit_tpm_xenbus(void)
   6.878 +{
   6.879 +	xenbus_unregister_driver(&tpmfront);
   6.880 +}
   6.881 +
   6.882 +static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
   6.883 +{
   6.884 +	unsigned int i;
   6.885 +
   6.886 +	for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
   6.887 +		tp->tx_buffers[i] = tx_buffer_alloc();
   6.888 +		if (!tp->tx_buffers[i]) {
   6.889 +			tpmif_free_tx_buffers(tp);
   6.890 +			return -ENOMEM;
   6.891 +		}
   6.892 +	}
   6.893 +	return 0;
   6.894 +}
   6.895 +
   6.896 +static void tpmif_free_tx_buffers(struct tpm_private *tp)
   6.897 +{
   6.898 +	unsigned int i;
   6.899 +
   6.900 +	for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
   6.901 +		tx_buffer_free(tp->tx_buffers[i]);
   6.902 +	}
   6.903 +}
   6.904 +
   6.905 +static void tpmif_rx_action(unsigned long priv)
   6.906 +{
   6.907 +	struct tpm_private *tp = (struct tpm_private *)priv;
   6.908 +
   6.909 +	int i = 0;
   6.910 +	unsigned int received;
   6.911 +	unsigned int offset = 0;
   6.912 +	u8 *buffer;
   6.913 +	tpmif_tx_request_t *tx;
   6.914 +	tx = &tp->tx->ring[i].req;
   6.915 +
   6.916 +	atomic_set(&tp->tx_busy, 0);
   6.917 +	wake_up_interruptible(&tp->wait_q);
   6.918 +
   6.919 +	received = tx->size;
   6.920 +
   6.921 +	buffer = kmalloc(received, GFP_ATOMIC);
   6.922 +	if (NULL == buffer) {
   6.923 +		goto exit;
   6.924 +	}
   6.925 +
   6.926 +	for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
   6.927 +		struct tx_buffer *txb = tp->tx_buffers[i];
   6.928 +		tpmif_tx_request_t *tx;
   6.929 +		unsigned int tocopy;
   6.930 +
   6.931 +		tx = &tp->tx->ring[i].req;
   6.932 +		tocopy = tx->size;
   6.933 +		if (tocopy > PAGE_SIZE) {
   6.934 +			tocopy = PAGE_SIZE;
   6.935 +		}
   6.936 +
   6.937 +		memcpy(&buffer[offset], txb->data, tocopy);
   6.938 +
   6.939 +		gnttab_release_grant_reference(&gref_head, tx->ref);
   6.940 +
   6.941 +		offset += tocopy;
   6.942 +	}
   6.943 +
   6.944 +	vtpm_vd_recv(buffer, received, tp->tx_remember);
   6.945 +	kfree(buffer);
   6.946 +
   6.947 +exit:
   6.948 +
   6.949 +	return;
   6.950 +}
   6.951 +
   6.952 +
   6.953 +static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
   6.954 +{
   6.955 +	struct tpm_private *tp = tpm_priv;
   6.956 +	unsigned long flags;
   6.957 +
   6.958 +	spin_lock_irqsave(&tp->tx_lock, flags);
   6.959 +	tpmif_rx_tasklet.data = (unsigned long)tp;
   6.960 +	tasklet_schedule(&tpmif_rx_tasklet);
   6.961 +	spin_unlock_irqrestore(&tp->tx_lock, flags);
   6.962 +
   6.963 +	return IRQ_HANDLED;
   6.964 +}
   6.965 +
   6.966 +
   6.967 +static int tpm_xmit(struct tpm_private *tp,
   6.968 +                    const u8 * buf, size_t count, int isuserbuffer,
   6.969 +                    void *remember)
   6.970 +{
   6.971 +	tpmif_tx_request_t *tx;
   6.972 +	TPMIF_RING_IDX i;
   6.973 +	unsigned int offset = 0;
   6.974 +
   6.975 +	spin_lock_irq(&tp->tx_lock);
   6.976 +
   6.977 +	if (unlikely(atomic_read(&tp->tx_busy))) {
   6.978 +		printk("tpm_xmit: There's an outstanding request/response "
   6.979 +		       "on the way!\n");
   6.980 +		spin_unlock_irq(&tp->tx_lock);
   6.981 +		return -EBUSY;
   6.982 +	}
   6.983  
   6.984 -static struct tpm_vendor_specific tpm_xen = {
   6.985 -	.recv = tpm_xen_recv,
   6.986 -	.send = tpm_xen_send,
   6.987 -	.cancel = tpm_xen_cancel,
   6.988 -	.status = tpm_xen_status,
   6.989 -	.req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
   6.990 -	.req_complete_val  = STATUS_DATA_AVAIL,
   6.991 -	.req_canceled = STATUS_READY,
   6.992 -	.base = 0,
   6.993 -	.attr_group = &xen_attr_grp,
   6.994 -	.miscdev.fops = &tpm_xen_ops,
   6.995 -	.buffersize = 64 * 1024,
   6.996 +	if (tp->is_connected != 1) {
   6.997 +		spin_unlock_irq(&tp->tx_lock);
   6.998 +		return -EIO;
   6.999 +	}
  6.1000 +
  6.1001 +	for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
  6.1002 +		struct tx_buffer *txb = tp->tx_buffers[i];
  6.1003 +		int copied;
  6.1004 +
  6.1005 +		if (NULL == txb) {
  6.1006 +			DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
  6.1007 +				"Not transmitting anything!\n", i);
  6.1008 +			spin_unlock_irq(&tp->tx_lock);
  6.1009 +			return -EFAULT;
  6.1010 +		}
  6.1011 +		copied = tx_buffer_copy(txb, &buf[offset], count,
  6.1012 +		                        isuserbuffer);
  6.1013 +		if (copied < 0) {
  6.1014 +			/* An error occurred */
  6.1015 +			spin_unlock_irq(&tp->tx_lock);
  6.1016 +			return copied;
  6.1017 +		}
  6.1018 +		count -= copied;
  6.1019 +		offset += copied;
  6.1020 +
  6.1021 +		tx = &tp->tx->ring[i].req;
  6.1022 +
  6.1023 +		tx->addr = virt_to_machine(txb->data);
  6.1024 +		tx->size = txb->len;
  6.1025 +
  6.1026 +		DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
  6.1027 +		        txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
  6.1028 +
  6.1029 +		/* get the granttable reference for this page */
  6.1030 +		tx->ref = gnttab_claim_grant_reference(&gref_head);
  6.1031 +
  6.1032 +		if (-ENOSPC == tx->ref) {
  6.1033 +			spin_unlock_irq(&tp->tx_lock);
  6.1034 +			DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
  6.1035 +			return -ENOSPC;
  6.1036 +		}
  6.1037 +		gnttab_grant_foreign_access_ref( tx->ref,
  6.1038 +		                                 tp->backend_id,
  6.1039 +		                                 (tx->addr >> PAGE_SHIFT),
  6.1040 +		                                 0 /*RW*/);
  6.1041 +		wmb();
  6.1042 +	}
  6.1043 +
  6.1044 +	atomic_set(&tp->tx_busy, 1);
  6.1045 +	tp->tx_remember = remember;
  6.1046 +	mb();
  6.1047 +
  6.1048 +	DPRINTK("Notifying backend via event channel %d\n",
  6.1049 +	        tp->evtchn);
  6.1050 +
  6.1051 +	notify_remote_via_irq(tp->irq);
  6.1052 +
  6.1053 +	spin_unlock_irq(&tp->tx_lock);
  6.1054 +	return offset;
  6.1055 +}
  6.1056 +
  6.1057 +
  6.1058 +static void tpmif_notify_upperlayer(struct tpm_private *tp)
  6.1059 +{
  6.1060 +	/*
  6.1061 +	 * Notify upper layer about the state of the connection
  6.1062 +	 * to the BE.
  6.1063 +	 */
  6.1064 +	if (tp->is_connected) {
  6.1065 +		vtpm_vd_status(TPM_VD_STATUS_CONNECTED);
  6.1066 +	} else {
  6.1067 +		vtpm_vd_status(TPM_VD_STATUS_DISCONNECTED);
  6.1068 +	}
  6.1069 +}
  6.1070 +
  6.1071 +
  6.1072 +static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
  6.1073 +{
  6.1074 +	/*
  6.1075 +	 * Don't notify upper layer if we are in suspend mode and
  6.1076 +	 * should disconnect - assumption is that we will resume
  6.1077 +	 * The mutex keeps apps from sending.
  6.1078 +	 */
  6.1079 +	if (is_connected == 0 && tp->is_suspended == 1) {
  6.1080 +		return;
  6.1081 +	}
  6.1082 +
  6.1083 +	/*
  6.1084 +	 * Unlock the mutex if we are connected again
  6.1085 +	 * after being suspended - now resuming.
  6.1086 +	 * This also removes the suspend state.
  6.1087 +	 */
  6.1088 +	if (is_connected == 1 && tp->is_suspended == 1) {
  6.1089 +		tp->is_suspended = 0;
  6.1090 +		/* unlock, so apps can resume sending */
  6.1091 +		mutex_unlock(&suspend_lock);
  6.1092 +	}
  6.1093 +
  6.1094 +	if (is_connected != tp->is_connected) {
  6.1095 +		tp->is_connected = is_connected;
  6.1096 +		tpmif_notify_upperlayer(tp);
  6.1097 +	}
  6.1098 +}
  6.1099 +
  6.1100 +
  6.1101 +
  6.1102 +/* =================================================================
  6.1103 + * Initialization function.
  6.1104 + * =================================================================
  6.1105 + */
  6.1106 +
  6.1107 +struct tpm_virtual_device tvd = {
  6.1108 +	.max_tx_size = PAGE_SIZE * TPMIF_TX_RING_SIZE,
  6.1109  };
  6.1110  
  6.1111 -static struct platform_device *pdev;
  6.1112 -
  6.1113 -static struct tpmfe_device tpmfe = {
  6.1114 -	.receive = tpm_recv,
  6.1115 -	.status  = tpm_fe_status,
  6.1116 -};
  6.1117 -
  6.1118 -
  6.1119 -static int __init init_xen(void)
  6.1120 +static int __init tpmif_init(void)
  6.1121  {
  6.1122  	int rc;
  6.1123 +	struct tpm_private *tp;
  6.1124  
  6.1125  	if ((xen_start_info->flags & SIF_INITDOMAIN)) {
  6.1126  		return -EPERM;
  6.1127  	}
  6.1128 -	/*
  6.1129 -	 * Register device with the low lever front-end
  6.1130 -	 * driver
  6.1131 -	 */
  6.1132 -	if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
  6.1133 -		goto err_exit;
  6.1134 -	}
  6.1135  
  6.1136 -	/*
  6.1137 -	 * Register our device with the system.
  6.1138 -	 */
  6.1139 -	pdev = platform_device_register_simple("tpm_vtpm", -1, NULL, 0);
  6.1140 -	if (IS_ERR(pdev)) {
  6.1141 -		rc = PTR_ERR(pdev);
  6.1142 -		goto err_unreg_fe;
  6.1143 +	tp = tpm_private_get();
  6.1144 +	if (!tp) {
  6.1145 +		rc = -ENOMEM;
  6.1146 +		goto failexit;
  6.1147  	}
  6.1148  
  6.1149 -	tpm_xen.buffersize = tpmfe.max_tx_size;
  6.1150 +	tvd.tpm_private = tp;
  6.1151 +	rc = init_vtpm(&tvd);
  6.1152 +	if (rc)
  6.1153 +		goto init_vtpm_failed;
  6.1154  
  6.1155 -	if ((rc = tpm_register_hardware(&pdev->dev, &tpm_xen)) < 0) {
  6.1156 -		goto err_unreg_pdev;
  6.1157 +	IPRINTK("Initialising the vTPM driver.\n");
  6.1158 +	if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
  6.1159 +	                                     &gref_head ) < 0) {
  6.1160 +		rc = -EFAULT;
  6.1161 +		goto gnttab_alloc_failed;
  6.1162  	}
  6.1163  
  6.1164 -	dataex.current_request = NULL;
  6.1165 -	spin_lock_init(&dataex.req_list_lock);
  6.1166 -	init_waitqueue_head(&dataex.req_wait_queue);
  6.1167 -	INIT_LIST_HEAD(&dataex.queued_requests);
  6.1168 -
  6.1169 -	dataex.current_response = NULL;
  6.1170 -	spin_lock_init(&dataex.resp_list_lock);
  6.1171 -	init_waitqueue_head(&dataex.resp_wait_queue);
  6.1172 -
  6.1173 -	disconnect_time = jiffies;
  6.1174 -
  6.1175 +	init_tpm_xenbus();
  6.1176  	return 0;
  6.1177  
  6.1178 +gnttab_alloc_failed:
  6.1179 +	cleanup_vtpm();
  6.1180 +init_vtpm_failed:
  6.1181 +	tpm_private_put();
  6.1182 +failexit:
  6.1183  
  6.1184 -err_unreg_pdev:
  6.1185 -	platform_device_unregister(pdev);
  6.1186 -err_unreg_fe:
  6.1187 -	tpm_fe_unregister_receiver();
  6.1188 -
  6.1189 -err_exit:
  6.1190  	return rc;
  6.1191  }
  6.1192  
  6.1193 -static void __exit cleanup_xen(void)
  6.1194 +
  6.1195 +static void __exit tpmif_exit(void)
  6.1196  {
  6.1197 -	struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
  6.1198 -	if (chip) {
  6.1199 -		tpm_remove_hardware(chip->dev);
  6.1200 -		platform_device_unregister(pdev);
  6.1201 -		tpm_fe_unregister_receiver();
  6.1202 -	}
  6.1203 +	cleanup_vtpm();
  6.1204 +	tpm_private_put();
  6.1205 +	exit_tpm_xenbus();
  6.1206 +	gnttab_free_grant_references(gref_head);
  6.1207  }
  6.1208  
  6.1209 -module_init(init_xen);
  6.1210 -module_exit(cleanup_xen);
  6.1211 +module_init(tpmif_init);
  6.1212 +module_exit(tpmif_exit);
  6.1213 +
  6.1214 +MODULE_LICENSE("Dual BSD/GPL");
  6.1215  
  6.1216 -MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
  6.1217 -MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
  6.1218 -MODULE_VERSION("1.0");
  6.1219 -MODULE_LICENSE("GPL");
  6.1220 +/*
  6.1221 + * Local variables:
  6.1222 + *  c-file-style: "linux"
  6.1223 + *  indent-tabs-mode: t
  6.1224 + *  c-indent-level: 8
  6.1225 + *  c-basic-offset: 8
  6.1226 + *  tab-width: 8
  6.1227 + * End:
  6.1228 + */
     7.1 --- a/linux-2.6-xen-sparse/drivers/xen/Kconfig	Thu May 04 11:19:27 2006 +0100
     7.2 +++ b/linux-2.6-xen-sparse/drivers/xen/Kconfig	Thu May 04 11:22:19 2006 +0100
     7.3 @@ -173,14 +173,6 @@ config XEN_BLKDEV_TAP
     7.4  	  to a character device, allowing device prototyping in application
     7.5  	  space.  Odds are that you want to say N here.
     7.6  
     7.7 -config XEN_TPMDEV_FRONTEND
     7.8 -	tristate "TPM-device frontend driver"
     7.9 -	default n
    7.10 -	select TCG_TPM
    7.11 -	select TCG_XEN
    7.12 -	help
    7.13 -	  The TPM-device frontend driver.
    7.14 -
    7.15  config XEN_SCRUB_PAGES
    7.16  	bool "Scrub memory before freeing it to Xen"
    7.17  	default y
     8.1 --- a/linux-2.6-xen-sparse/drivers/xen/Makefile	Thu May 04 11:19:27 2006 +0100
     8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/Makefile	Thu May 04 11:22:19 2006 +0100
     8.3 @@ -16,7 +16,6 @@ obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmb
     8.4  obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= blkfront/
     8.5  obj-$(CONFIG_XEN_NETDEV_FRONTEND)	+= netfront/
     8.6  obj-$(CONFIG_XEN_BLKDEV_TAP)    	+= blktap/
     8.7 -obj-$(CONFIG_XEN_TPMDEV_FRONTEND)	+= tpmfront/
     8.8  obj-$(CONFIG_XEN_PCIDEV_BACKEND)	+= pciback/
     8.9  obj-$(CONFIG_XEN_PCIDEV_FRONTEND)	+= pcifront/
    8.10  
     9.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile	Thu May 04 11:19:27 2006 +0100
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,2 +0,0 @@
     9.4 -
     9.5 -obj-$(CONFIG_XEN_TPMDEV_FRONTEND)	+= tpmfront.o
    10.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Thu May 04 11:19:27 2006 +0100
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,767 +0,0 @@
    10.4 -/*
    10.5 - * Copyright (c) 2005, IBM Corporation
    10.6 - *
    10.7 - * Author: Stefan Berger, stefanb@us.ibm.com
    10.8 - * Grant table support: Mahadevan Gomathisankaran
    10.9 - *
   10.10 - * This code has been derived from drivers/xen/netfront/netfront.c
   10.11 - *
   10.12 - * Copyright (c) 2002-2004, K A Fraser
   10.13 - *
   10.14 - * This program is free software; you can redistribute it and/or
   10.15 - * modify it under the terms of the GNU General Public License version 2
   10.16 - * as published by the Free Software Foundation; or, when distributed
   10.17 - * separately from the Linux kernel or incorporated into other
   10.18 - * software packages, subject to the following license:
   10.19 - * 
   10.20 - * Permission is hereby granted, free of charge, to any person obtaining a copy
   10.21 - * of this source file (the "Software"), to deal in the Software without
   10.22 - * restriction, including without limitation the rights to use, copy, modify,
   10.23 - * merge, publish, distribute, sublicense, and/or sell copies of the Software,
   10.24 - * and to permit persons to whom the Software is furnished to do so, subject to
   10.25 - * the following conditions:
   10.26 - *
   10.27 - * The above copyright notice and this permission notice shall be included in
   10.28 - * all copies or substantial portions of the Software.
   10.29 - *
   10.30 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   10.31 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   10.32 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   10.33 - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   10.34 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   10.35 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
   10.36 - * IN THE SOFTWARE.
   10.37 - */
   10.38 -
   10.39 -#include <linux/config.h>
   10.40 -#include <linux/module.h>
   10.41 -#include <linux/version.h>
   10.42 -#include <linux/kernel.h>
   10.43 -#include <linux/slab.h>
   10.44 -#include <linux/errno.h>
   10.45 -#include <linux/interrupt.h>
   10.46 -#include <linux/init.h>
   10.47 -#include <xen/tpmfe.h>
   10.48 -#include <linux/err.h>
   10.49 -#include <linux/mutex.h>
   10.50 -#include <asm/io.h>
   10.51 -#include <xen/evtchn.h>
   10.52 -#include <xen/interface/grant_table.h>
   10.53 -#include <xen/interface/io/tpmif.h>
   10.54 -#include <asm/uaccess.h>
   10.55 -#include <xen/xenbus.h>
   10.56 -#include <xen/interface/grant_table.h>
   10.57 -
   10.58 -#include "tpmfront.h"
   10.59 -
   10.60 -#undef DEBUG
   10.61 -
   10.62 -/* locally visible variables */
   10.63 -static grant_ref_t gref_head;
   10.64 -static struct tpm_private *my_priv;
   10.65 -
   10.66 -/* local function prototypes */
   10.67 -static irqreturn_t tpmif_int(int irq,
   10.68 -                             void *tpm_priv,
   10.69 -                             struct pt_regs *ptregs);
   10.70 -static void tpmif_rx_action(unsigned long unused);
   10.71 -static int tpmif_connect(struct xenbus_device *dev,
   10.72 -                         struct tpm_private *tp,
   10.73 -                         domid_t domid);
   10.74 -static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
   10.75 -static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
   10.76 -static void tpmif_free_tx_buffers(struct tpm_private *tp);
   10.77 -static void tpmif_set_connected_state(struct tpm_private *tp,
   10.78 -                                      u8 newstate);
   10.79 -static int tpm_xmit(struct tpm_private *tp,
   10.80 -                    const u8 * buf, size_t count, int userbuffer,
   10.81 -                    void *remember);
   10.82 -static void destroy_tpmring(struct tpm_private *tp);
   10.83 -
   10.84 -#define DPRINTK(fmt, args...) \
   10.85 -    pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
   10.86 -#define IPRINTK(fmt, args...) \
   10.87 -    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
   10.88 -#define WPRINTK(fmt, args...) \
   10.89 -    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
   10.90 -
   10.91 -#define GRANT_INVALID_REF	0
   10.92 -
   10.93 -
   10.94 -static inline int
   10.95 -tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
   10.96 -               int isuserbuffer)
   10.97 -{
   10.98 -	int copied = len;
   10.99 -
  10.100 -	if (len > txb->size) {
  10.101 -		copied = txb->size;
  10.102 -	}
  10.103 -	if (isuserbuffer) {
  10.104 -		if (copy_from_user(txb->data, src, copied))
  10.105 -			return -EFAULT;
  10.106 -	} else {
  10.107 -		memcpy(txb->data, src, copied);
  10.108 -	}
  10.109 -	txb->len = len;
  10.110 -	return copied;
  10.111 -}
  10.112 -
  10.113 -static inline struct tx_buffer *tx_buffer_alloc(void)
  10.114 -{
  10.115 -	struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
  10.116 -					GFP_KERNEL);
  10.117 -
  10.118 -	if (txb) {
  10.119 -		txb->len = 0;
  10.120 -		txb->size = PAGE_SIZE;
  10.121 -		txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
  10.122 -		if (txb->data == NULL) {
  10.123 -			kfree(txb);
  10.124 -			txb = NULL;
  10.125 -		}
  10.126 -	}
  10.127 -	return txb;
  10.128 -}
  10.129 -
  10.130 -
  10.131 -static inline void tx_buffer_free(struct tx_buffer *txb)
  10.132 -{
  10.133 -	if (txb) {
  10.134 -		free_page((long)txb->data);
  10.135 -		kfree(txb);
  10.136 -	}
  10.137 -}
  10.138 -
  10.139 -/**************************************************************
  10.140 - Utility function for the tpm_private structure
  10.141 -**************************************************************/
  10.142 -static inline void tpm_private_init(struct tpm_private *tp)
  10.143 -{
  10.144 -	spin_lock_init(&tp->tx_lock);
  10.145 -	init_waitqueue_head(&tp->wait_q);
  10.146 -}
  10.147 -
  10.148 -static inline void tpm_private_free(void)
  10.149 -{
  10.150 -	tpmif_free_tx_buffers(my_priv);
  10.151 -	kfree(my_priv);
  10.152 -	my_priv = NULL;
  10.153 -}
  10.154 -
  10.155 -static struct tpm_private *tpm_private_get(void)
  10.156 -{
  10.157 -	int err;
  10.158 -	if (!my_priv) {
  10.159 -		my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
  10.160 -		if (my_priv) {
  10.161 -			tpm_private_init(my_priv);
  10.162 -			err = tpmif_allocate_tx_buffers(my_priv);
  10.163 -			if (err < 0) {
  10.164 -				tpm_private_free();
  10.165 -			}
  10.166 -		}
  10.167 -	}
  10.168 -	return my_priv;
  10.169 -}
  10.170 -
  10.171 -/**************************************************************
  10.172 -
  10.173 - The interface to let the tpm plugin register its callback
  10.174 - function and send data to another partition using this module
  10.175 -
  10.176 -**************************************************************/
  10.177 -
  10.178 -static DEFINE_MUTEX(upperlayer_lock);
  10.179 -static DEFINE_MUTEX(suspend_lock);
  10.180 -static struct tpmfe_device *upperlayer_tpmfe;
  10.181 -
  10.182 -/*
  10.183 - * Send data via this module by calling this function
  10.184 - */
  10.185 -int tpm_fe_send(struct tpm_private *tp, const u8 * buf, size_t count, void *ptr)
  10.186 -{
  10.187 -	int sent;
  10.188 -
  10.189 -	mutex_lock(&suspend_lock);
  10.190 -	sent = tpm_xmit(tp, buf, count, 0, ptr);
  10.191 -	mutex_unlock(&suspend_lock);
  10.192 -
  10.193 -	return sent;
  10.194 -}
  10.195 -EXPORT_SYMBOL(tpm_fe_send);
  10.196 -
  10.197 -/*
  10.198 - * Register a callback for receiving data from this module
  10.199 - */
  10.200 -int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
  10.201 -{
  10.202 -	int rc = 0;
  10.203 -
  10.204 -	mutex_lock(&upperlayer_lock);
  10.205 -	if (NULL == upperlayer_tpmfe) {
  10.206 -		upperlayer_tpmfe = tpmfe_dev;
  10.207 -		tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
  10.208 -		tpmfe_dev->tpm_private = tpm_private_get();
  10.209 -		if (!tpmfe_dev->tpm_private) {
  10.210 -			rc = -ENOMEM;
  10.211 -		}
  10.212 -	} else {
  10.213 -		rc = -EBUSY;
  10.214 -	}
  10.215 -	mutex_unlock(&upperlayer_lock);
  10.216 -	return rc;
  10.217 -}
  10.218 -EXPORT_SYMBOL(tpm_fe_register_receiver);
  10.219 -
  10.220 -/*
  10.221 - * Unregister the callback for receiving data from this module
  10.222 - */
  10.223 -void tpm_fe_unregister_receiver(void)
  10.224 -{
  10.225 -	mutex_lock(&upperlayer_lock);
  10.226 -	upperlayer_tpmfe = NULL;
  10.227 -	mutex_unlock(&upperlayer_lock);
  10.228 -}
  10.229 -EXPORT_SYMBOL(tpm_fe_unregister_receiver);
  10.230 -
  10.231 -/*
  10.232 - * Call this function to send data to the upper layer's
  10.233 - * registered receiver function.
  10.234 - */
  10.235 -static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
  10.236 -                                  const void *ptr)
  10.237 -{
  10.238 -	int rc = 0;
  10.239 -
  10.240 -	mutex_lock(&upperlayer_lock);
  10.241 -
  10.242 -	if (upperlayer_tpmfe && upperlayer_tpmfe->receive)
  10.243 -		rc = upperlayer_tpmfe->receive(buf, count, ptr);
  10.244 -
  10.245 -	mutex_unlock(&upperlayer_lock);
  10.246 -	return rc;
  10.247 -}
  10.248 -
  10.249 -/**************************************************************
  10.250 - XENBUS support code
  10.251 -**************************************************************/
  10.252 -
  10.253 -static int setup_tpmring(struct xenbus_device *dev,
  10.254 -                         struct tpm_private *tp)
  10.255 -{
  10.256 -	tpmif_tx_interface_t *sring;
  10.257 -	int err;
  10.258 -
  10.259 -	tp->ring_ref = GRANT_INVALID_REF;
  10.260 -
  10.261 -	sring = (void *)__get_free_page(GFP_KERNEL);
  10.262 -	if (!sring) {
  10.263 -		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
  10.264 -		return -ENOMEM;
  10.265 -	}
  10.266 -	tp->tx = sring;
  10.267 -
  10.268 -	err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
  10.269 -	if (err < 0) {
  10.270 -		free_page((unsigned long)sring);
  10.271 -		tp->tx = NULL;
  10.272 -		xenbus_dev_fatal(dev, err, "allocating grant reference");
  10.273 -		goto fail;
  10.274 -	}
  10.275 -	tp->ring_ref = err;
  10.276 -
  10.277 -	err = tpmif_connect(dev, tp, dev->otherend_id);
  10.278 -	if (err)
  10.279 -		goto fail;
  10.280 -
  10.281 -	return 0;
  10.282 -fail:
  10.283 -	destroy_tpmring(tp);
  10.284 -	return err;
  10.285 -}
  10.286 -
  10.287 -
  10.288 -static void destroy_tpmring(struct tpm_private *tp)
  10.289 -{
  10.290 -	tpmif_set_connected_state(tp, 0);
  10.291 -
  10.292 -	if (tp->ring_ref != GRANT_INVALID_REF) {
  10.293 -		gnttab_end_foreign_access(tp->ring_ref, 0,
  10.294 -					  (unsigned long)tp->tx);
  10.295 -		tp->ring_ref = GRANT_INVALID_REF;
  10.296 -		tp->tx = NULL;
  10.297 -	}
  10.298 -
  10.299 -	if (tp->irq)
  10.300 -		unbind_from_irqhandler(tp->irq, tp);
  10.301 -
  10.302 -	tp->evtchn = tp->irq = 0;
  10.303 -}
  10.304 -
  10.305 -
  10.306 -static int talk_to_backend(struct xenbus_device *dev,
  10.307 -                           struct tpm_private *tp)
  10.308 -{
  10.309 -	const char *message = NULL;
  10.310 -	int err;
  10.311 -	xenbus_transaction_t xbt;
  10.312 -
  10.313 -	err = setup_tpmring(dev, tp);
  10.314 -	if (err) {
  10.315 -		xenbus_dev_fatal(dev, err, "setting up ring");
  10.316 -		goto out;
  10.317 -	}
  10.318 -
  10.319 -again:
  10.320 -	err = xenbus_transaction_start(&xbt);
  10.321 -	if (err) {
  10.322 -		xenbus_dev_fatal(dev, err, "starting transaction");
  10.323 -		goto destroy_tpmring;
  10.324 -	}
  10.325 -
  10.326 -	err = xenbus_printf(xbt, dev->nodename,
  10.327 -	                    "ring-ref","%u", tp->ring_ref);
  10.328 -	if (err) {
  10.329 -		message = "writing ring-ref";
  10.330 -		goto abort_transaction;
  10.331 -	}
  10.332 -
  10.333 -	err = xenbus_printf(xbt, dev->nodename,
  10.334 -			    "event-channel", "%u", tp->evtchn);
  10.335 -	if (err) {
  10.336 -		message = "writing event-channel";
  10.337 -		goto abort_transaction;
  10.338 -	}
  10.339 -
  10.340 -	err = xenbus_transaction_end(xbt, 0);
  10.341 -	if (err == -EAGAIN)
  10.342 -		goto again;
  10.343 -	if (err) {
  10.344 -		xenbus_dev_fatal(dev, err, "completing transaction");
  10.345 -		goto destroy_tpmring;
  10.346 -	}
  10.347 -
  10.348 -	xenbus_switch_state(dev, XenbusStateConnected);
  10.349 -
  10.350 -	return 0;
  10.351 -
  10.352 -abort_transaction:
  10.353 -	xenbus_transaction_end(xbt, 1);
  10.354 -	if (message)
  10.355 -		xenbus_dev_error(dev, err, "%s", message);
  10.356 -destroy_tpmring:
  10.357 -	destroy_tpmring(tp);
  10.358 -out:
  10.359 -	return err;
  10.360 -}
  10.361 -
  10.362 -/**
  10.363 - * Callback received when the backend's state changes.
  10.364 - */
  10.365 -static void backend_changed(struct xenbus_device *dev,
  10.366 -			    XenbusState backend_state)
  10.367 -{
  10.368 -	struct tpm_private *tp = dev->data;
  10.369 -	DPRINTK("\n");
  10.370 -
  10.371 -	switch (backend_state) {
  10.372 -	case XenbusStateInitialising:
  10.373 -	case XenbusStateInitWait:
  10.374 -	case XenbusStateInitialised:
  10.375 -	case XenbusStateUnknown:
  10.376 -		break;
  10.377 -
  10.378 -	case XenbusStateConnected:
  10.379 -		tpmif_set_connected_state(tp, 1);
  10.380 -		break;
  10.381 -
  10.382 -	case XenbusStateClosing:
  10.383 -		tpmif_set_connected_state(tp, 0);
  10.384 -		break;
  10.385 -
  10.386 -	case XenbusStateClosed:
  10.387 -		if (tp->is_suspended == 0) {
  10.388 -			device_unregister(&dev->dev);
  10.389 -		}
  10.390 -		xenbus_switch_state(dev, XenbusStateClosed);
  10.391 -		break;
  10.392 -	}
  10.393 -}
  10.394 -
  10.395 -
  10.396 -static int tpmfront_probe(struct xenbus_device *dev,
  10.397 -                          const struct xenbus_device_id *id)
  10.398 -{
  10.399 -	int err;
  10.400 -	int handle;
  10.401 -	struct tpm_private *tp = tpm_private_get();
  10.402 -
  10.403 -	if (!tp)
  10.404 -		return -ENOMEM;
  10.405 -
  10.406 -	err = xenbus_scanf(XBT_NULL, dev->nodename,
  10.407 -	                   "handle", "%i", &handle);
  10.408 -	if (XENBUS_EXIST_ERR(err))
  10.409 -		return err;
  10.410 -
  10.411 -	if (err < 0) {
  10.412 -		xenbus_dev_fatal(dev,err,"reading virtual-device");
  10.413 -		return err;
  10.414 -	}
  10.415 -
  10.416 -	tp->dev = dev;
  10.417 -	dev->data = tp;
  10.418 -
  10.419 -	err = talk_to_backend(dev, tp);
  10.420 -	if (err) {
  10.421 -		tpm_private_free();
  10.422 -		dev->data = NULL;
  10.423 -		return err;
  10.424 -	}
  10.425 -	return 0;
  10.426 -}
  10.427 -
  10.428 -
  10.429 -static int tpmfront_remove(struct xenbus_device *dev)
  10.430 -{
  10.431 -	struct tpm_private *tp = (struct tpm_private *)dev->data;
  10.432 -	destroy_tpmring(tp);
  10.433 -	return 0;
  10.434 -}
  10.435 -
  10.436 -static int tpmfront_suspend(struct xenbus_device *dev)
  10.437 -{
  10.438 -	struct tpm_private *tp = (struct tpm_private *)dev->data;
  10.439 -	u32 ctr;
  10.440 -
  10.441 -	/* lock, so no app can send */
  10.442 -	mutex_lock(&suspend_lock);
  10.443 -	xenbus_switch_state(dev, XenbusStateClosed);
  10.444 -	tp->is_suspended = 1;
  10.445 -
  10.446 -	for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
  10.447 -		if ((ctr % 10) == 0)
  10.448 -			printk("TPM-FE [INFO]: Waiting for outstanding request.\n");
  10.449 -		/*
  10.450 -		 * Wait for a request to be responded to.
  10.451 -		 */
  10.452 -		interruptible_sleep_on_timeout(&tp->wait_q, 100);
  10.453 -	}
  10.454 -
  10.455 -	if (atomic_read(&tp->tx_busy)) {
  10.456 -		/*
  10.457 -		 * A temporary work-around.
  10.458 -		 */
  10.459 -		printk("TPM-FE [WARNING]: Resetting busy flag.");
  10.460 -		atomic_set(&tp->tx_busy, 0);
  10.461 -	}
  10.462 -
  10.463 -	return 0;
  10.464 -}
  10.465 -
  10.466 -static int tpmfront_resume(struct xenbus_device *dev)
  10.467 -{
  10.468 -	struct tpm_private *tp = (struct tpm_private *)dev->data;
  10.469 -	destroy_tpmring(tp);
  10.470 -	return talk_to_backend(dev, tp);
  10.471 -}
  10.472 -
  10.473 -static int tpmif_connect(struct xenbus_device *dev,
  10.474 -                         struct tpm_private *tp,
  10.475 -                         domid_t domid)
  10.476 -{
  10.477 -	int err;
  10.478 -
  10.479 -	tp->backend_id = domid;
  10.480 -
  10.481 -	err = xenbus_alloc_evtchn(dev, &tp->evtchn);
  10.482 -	if (err)
  10.483 -		return err;
  10.484 -
  10.485 -	err = bind_evtchn_to_irqhandler(tp->evtchn,
  10.486 -					tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
  10.487 -					tp);
  10.488 -	if (err <= 0) {
  10.489 -		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
  10.490 -		return err;
  10.491 -	}
  10.492 -
  10.493 -	tp->irq = err;
  10.494 -	return 0;
  10.495 -}
  10.496 -
  10.497 -static struct xenbus_device_id tpmfront_ids[] = {
  10.498 -	{ "vtpm" },
  10.499 -	{ "" }
  10.500 -};
  10.501 -
  10.502 -static struct xenbus_driver tpmfront = {
  10.503 -	.name = "vtpm",
  10.504 -	.owner = THIS_MODULE,
  10.505 -	.ids = tpmfront_ids,
  10.506 -	.probe = tpmfront_probe,
  10.507 -	.remove =  tpmfront_remove,
  10.508 -	.resume = tpmfront_resume,
  10.509 -	.otherend_changed = backend_changed,
  10.510 -	.suspend = tpmfront_suspend,
  10.511 -};
  10.512 -
  10.513 -static void __init init_tpm_xenbus(void)
  10.514 -{
  10.515 -	xenbus_register_frontend(&tpmfront);
  10.516 -}
  10.517 -
  10.518 -static void __exit exit_tpm_xenbus(void)
  10.519 -{
  10.520 -	xenbus_unregister_driver(&tpmfront);
  10.521 -}
  10.522 -
  10.523 -static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
  10.524 -{
  10.525 -	unsigned int i;
  10.526 -
  10.527 -	for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
  10.528 -		tp->tx_buffers[i] = tx_buffer_alloc();
  10.529 -		if (!tp->tx_buffers[i]) {
  10.530 -			tpmif_free_tx_buffers(tp);
  10.531 -			return -ENOMEM;
  10.532 -		}
  10.533 -	}
  10.534 -	return 0;
  10.535 -}
  10.536 -
  10.537 -static void tpmif_free_tx_buffers(struct tpm_private *tp)
  10.538 -{
  10.539 -	unsigned int i;
  10.540 -
  10.541 -	for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
  10.542 -		tx_buffer_free(tp->tx_buffers[i]);
  10.543 -	}
  10.544 -}
  10.545 -
  10.546 -static void tpmif_rx_action(unsigned long priv)
  10.547 -{
  10.548 -	struct tpm_private *tp = (struct tpm_private *)priv;
  10.549 -
  10.550 -	int i = 0;
  10.551 -	unsigned int received;
  10.552 -	unsigned int offset = 0;
  10.553 -	u8 *buffer;
  10.554 -	tpmif_tx_request_t *tx;
  10.555 -	tx = &tp->tx->ring[i].req;
  10.556 -
  10.557 -	received = tx->size;
  10.558 -
  10.559 -	buffer = kmalloc(received, GFP_KERNEL);
  10.560 -	if (NULL == buffer) {
  10.561 -		goto exit;
  10.562 -	}
  10.563 -
  10.564 -	for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
  10.565 -		struct tx_buffer *txb = tp->tx_buffers[i];
  10.566 -		tpmif_tx_request_t *tx;
  10.567 -		unsigned int tocopy;
  10.568 -
  10.569 -		tx = &tp->tx->ring[i].req;
  10.570 -		tocopy = tx->size;
  10.571 -		if (tocopy > PAGE_SIZE) {
  10.572 -			tocopy = PAGE_SIZE;
  10.573 -		}
  10.574 -
  10.575 -		memcpy(&buffer[offset], txb->data, tocopy);
  10.576 -
  10.577 -		gnttab_release_grant_reference(&gref_head, tx->ref);
  10.578 -
  10.579 -		offset += tocopy;
  10.580 -	}
  10.581 -
  10.582 -	tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
  10.583 -	kfree(buffer);
  10.584 -
  10.585 -exit:
  10.586 -	atomic_set(&tp->tx_busy, 0);
  10.587 -	wake_up_interruptible(&tp->wait_q);
  10.588 -}
  10.589 -
  10.590 -
  10.591 -static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
  10.592 -{
  10.593 -	struct tpm_private *tp = tpm_priv;
  10.594 -	unsigned long flags;
  10.595 -
  10.596 -	spin_lock_irqsave(&tp->tx_lock, flags);
  10.597 -	tpmif_rx_tasklet.data = (unsigned long)tp;
  10.598 -	tasklet_schedule(&tpmif_rx_tasklet);
  10.599 -	spin_unlock_irqrestore(&tp->tx_lock, flags);
  10.600 -
  10.601 -	return IRQ_HANDLED;
  10.602 -}
  10.603 -
  10.604 -
  10.605 -static int tpm_xmit(struct tpm_private *tp,
  10.606 -                    const u8 * buf, size_t count, int isuserbuffer,
  10.607 -                    void *remember)
  10.608 -{
  10.609 -	tpmif_tx_request_t *tx;
  10.610 -	TPMIF_RING_IDX i;
  10.611 -	unsigned int offset = 0;
  10.612 -
  10.613 -	spin_lock_irq(&tp->tx_lock);
  10.614 -
  10.615 -	if (unlikely(atomic_read(&tp->tx_busy))) {
  10.616 -		printk("tpm_xmit: There's an outstanding request/response "
  10.617 -		       "on the way!\n");
  10.618 -		spin_unlock_irq(&tp->tx_lock);
  10.619 -		return -EBUSY;
  10.620 -	}
  10.621 -
  10.622 -	if (tp->is_connected != 1) {
  10.623 -		spin_unlock_irq(&tp->tx_lock);
  10.624 -		return -EIO;
  10.625 -	}
  10.626 -
  10.627 -	for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
  10.628 -		struct tx_buffer *txb = tp->tx_buffers[i];
  10.629 -		int copied;
  10.630 -
  10.631 -		if (NULL == txb) {
  10.632 -			DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
  10.633 -				"Not transmitting anything!\n", i);
  10.634 -			spin_unlock_irq(&tp->tx_lock);
  10.635 -			return -EFAULT;
  10.636 -		}
  10.637 -		copied = tx_buffer_copy(txb, &buf[offset], count,
  10.638 -		                        isuserbuffer);
  10.639 -		if (copied < 0) {
  10.640 -			/* An error occurred */
  10.641 -			spin_unlock_irq(&tp->tx_lock);
  10.642 -			return copied;
  10.643 -		}
  10.644 -		count -= copied;
  10.645 -		offset += copied;
  10.646 -
  10.647 -		tx = &tp->tx->ring[i].req;
  10.648 -
  10.649 -		tx->addr = virt_to_machine(txb->data);
  10.650 -		tx->size = txb->len;
  10.651 -
  10.652 -		DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
  10.653 -		        txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
  10.654 -
  10.655 -		/* get the granttable reference for this page */
  10.656 -		tx->ref = gnttab_claim_grant_reference(&gref_head);
  10.657 -
  10.658 -		if (-ENOSPC == tx->ref) {
  10.659 -			spin_unlock_irq(&tp->tx_lock);
  10.660 -			DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
  10.661 -			return -ENOSPC;
  10.662 -		}
  10.663 -		gnttab_grant_foreign_access_ref( tx->ref,
  10.664 -		                                 tp->backend_id,
  10.665 -		                                 (tx->addr >> PAGE_SHIFT),
  10.666 -		                                 0 /*RW*/);
  10.667 -		wmb();
  10.668 -	}
  10.669 -
  10.670 -	atomic_set(&tp->tx_busy, 1);
  10.671 -	tp->tx_remember = remember;
  10.672 -	mb();
  10.673 -
  10.674 -	DPRINTK("Notifying backend via event channel %d\n",
  10.675 -	        tp->evtchn);
  10.676 -
  10.677 -	notify_remote_via_irq(tp->irq);
  10.678 -
  10.679 -	spin_unlock_irq(&tp->tx_lock);
  10.680 -	return offset;
  10.681 -}
  10.682 -
  10.683 -
  10.684 -static void tpmif_notify_upperlayer(struct tpm_private *tp)
  10.685 -{
  10.686 -	/*
  10.687 -	 * Notify upper layer about the state of the connection
  10.688 -	 * to the BE.
  10.689 -	 */
  10.690 -	mutex_lock(&upperlayer_lock);
  10.691 -
  10.692 -	if (upperlayer_tpmfe != NULL) {
  10.693 -		if (tp->is_connected) {
  10.694 -			upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
  10.695 -		} else {
  10.696 -			upperlayer_tpmfe->status(0);
  10.697 -		}
  10.698 -	}
  10.699 -	mutex_unlock(&upperlayer_lock);
  10.700 -}
  10.701 -
  10.702 -
  10.703 -static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
  10.704 -{
  10.705 -	/*
  10.706 -	 * Don't notify upper layer if we are in suspend mode and
  10.707 -	 * should disconnect - assumption is that we will resume
  10.708 -	 * The mutex keeps apps from sending.
  10.709 -	 */
  10.710 -	if (is_connected == 0 && tp->is_suspended == 1) {
  10.711 -		return;
  10.712 -	}
  10.713 -
  10.714 -	/*
  10.715 -	 * Unlock the mutex if we are connected again
  10.716 -	 * after being suspended - now resuming.
  10.717 -	 * This also removes the suspend state.
  10.718 -	 */
  10.719 -	if (is_connected == 1 && tp->is_suspended == 1) {
  10.720 -		tp->is_suspended = 0;
  10.721 -		/* unlock, so apps can resume sending */
  10.722 -		mutex_unlock(&suspend_lock);
  10.723 -	}
  10.724 -
  10.725 -	if (is_connected != tp->is_connected) {
  10.726 -		tp->is_connected = is_connected;
  10.727 -		tpmif_notify_upperlayer(tp);
  10.728 -	}
  10.729 -}
  10.730 -
  10.731 -
  10.732 -/* =================================================================
  10.733 - * Initialization function.
  10.734 - * =================================================================
  10.735 - */
  10.736 -
  10.737 -static int __init tpmif_init(void)
  10.738 -{
  10.739 -	IPRINTK("Initialising the vTPM driver.\n");
  10.740 -	if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
  10.741 -	                                     &gref_head ) < 0) {
  10.742 -		return -EFAULT;
  10.743 -	}
  10.744 -
  10.745 -	init_tpm_xenbus();
  10.746 -
  10.747 -	return 0;
  10.748 -}
  10.749 -
  10.750 -module_init(tpmif_init);
  10.751 -
  10.752 -static void __exit tpmif_exit(void)
  10.753 -{
  10.754 -	exit_tpm_xenbus();
  10.755 -	gnttab_free_grant_references(gref_head);
  10.756 -}
  10.757 -
  10.758 -module_exit(tpmif_exit);
  10.759 -
  10.760 -MODULE_LICENSE("Dual BSD/GPL");
  10.761 -
  10.762 -/*
  10.763 - * Local variables:
  10.764 - *  c-file-style: "linux"
  10.765 - *  indent-tabs-mode: t
  10.766 - *  c-indent-level: 8
  10.767 - *  c-basic-offset: 8
  10.768 - *  tab-width: 8
  10.769 - * End:
  10.770 - */
    11.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Thu May 04 11:19:27 2006 +0100
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,40 +0,0 @@
    11.4 -#ifndef TPM_FRONT_H
    11.5 -#define TPM_FRONT_H
    11.6 -
    11.7 -struct tpm_private {
    11.8 -	tpmif_tx_interface_t *tx;
    11.9 -	unsigned int evtchn;
   11.10 -	unsigned int irq;
   11.11 -	u8 is_connected;
   11.12 -	u8 is_suspended;
   11.13 -
   11.14 -	spinlock_t tx_lock;
   11.15 -
   11.16 -	struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
   11.17 -
   11.18 -	atomic_t tx_busy;
   11.19 -	void *tx_remember;
   11.20 -	domid_t backend_id;
   11.21 -	wait_queue_head_t wait_q;
   11.22 -
   11.23 -	struct xenbus_device *dev;
   11.24 -	int ring_ref;
   11.25 -};
   11.26 -
   11.27 -struct tx_buffer {
   11.28 -	unsigned int size;	// available space in data
   11.29 -	unsigned int len;	// used space in data
   11.30 -	unsigned char *data;	// pointer to a page
   11.31 -};
   11.32 -
   11.33 -#endif
   11.34 -
   11.35 -/*
   11.36 - * Local variables:
   11.37 - *  c-file-style: "linux"
   11.38 - *  indent-tabs-mode: t
   11.39 - *  c-indent-level: 8
   11.40 - *  c-basic-offset: 8
   11.41 - *  tab-width: 8
   11.42 - * End:
   11.43 - */
    12.1 --- a/linux-2.6-xen-sparse/include/xen/tpmfe.h	Thu May 04 11:19:27 2006 +0100
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,40 +0,0 @@
    12.4 -#ifndef TPM_FE_H
    12.5 -#define TPM_FE_H
    12.6 -
    12.7 -struct tpm_private;
    12.8 -
    12.9 -struct tpmfe_device {
   12.10 -	/*
   12.11 -	 * Let upper layer receive data from front-end
   12.12 -	 */
   12.13 -	int (*receive)(const u8 *buffer, size_t count, const void *ptr);
   12.14 -	/*
   12.15 -	 * Indicate the status of the front-end to the upper
   12.16 -	 * layer.
   12.17 -	 */
   12.18 -	void (*status)(unsigned int flags);
   12.19 -
   12.20 -	/*
   12.21 -	 * This field indicates the maximum size the driver can
   12.22 -	 * transfer in one chunk. It is filled out by the front-end
   12.23 -	 * driver and should be propagated to the generic tpm driver
   12.24 -	 * for allocation of buffers.
   12.25 -	 */
   12.26 -	unsigned int max_tx_size;
   12.27 -	/*
   12.28 -	 * The following is a private structure of the underlying
   12.29 -	 * driver. It's expected as first parameter in the send function.
   12.30 -	 */
   12.31 -	struct tpm_private *tpm_private;
   12.32 -};
   12.33 -
   12.34 -enum {
   12.35 -	TPMFE_STATUS_DISCONNECTED = 0x0,
   12.36 -	TPMFE_STATUS_CONNECTED = 0x1
   12.37 -};
   12.38 -
   12.39 -int tpm_fe_send(struct tpm_private * tp, const u8 * buf, size_t count, void *ptr);
   12.40 -int tpm_fe_register_receiver(struct tpmfe_device *);
   12.41 -void tpm_fe_unregister_receiver(void);
   12.42 -
   12.43 -#endif