ia64/xen-unstable

changeset 6574:ff536c11c178

TPM front-end and back-end implementation, and configuration updates.

Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
Signed-off-by: Kylene Hall <kjhall@us.ibm.com>
Signed-off-by: Mahadevan Gomathisankaran <gmdev@iastate.edu>
Signed-off-by: Steven Hand <steven@xensource.com>
author shand@ubuntu.eng.hq.xensource.com
date Tue Aug 30 11:48:08 2005 -0800 (2005-08-30)
parents 9ba52ccadc06
children 0db6e392c380
files MSG linux-2.6-xen-sparse/arch/xen/Kconfig linux-2.6-xen-sparse/arch/xen/Kconfig.drivers linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32 linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64 linux-2.6-xen-sparse/drivers/char/tpm/Kconfig.domU linux-2.6-xen-sparse/drivers/char/tpm/Makefile linux-2.6-xen-sparse/drivers/char/tpm/tpm.c linux-2.6-xen-sparse/drivers/char/tpm/tpm.h linux-2.6-xen-sparse/drivers/char/tpm/tpm_atmel.c linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.c linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.h linux-2.6-xen-sparse/drivers/char/tpm/tpm_nsc.c linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c linux-2.6-xen-sparse/drivers/xen/Makefile linux-2.6-xen-sparse/drivers/xen/tpmback/Makefile linux-2.6-xen-sparse/drivers/xen/tpmback/common.h linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h linux-2.6-xen-sparse/include/linux/tpmfe.h xen/include/public/io/tpmif.h xen/include/public/xen.h
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/MSG	Tue Aug 30 11:48:08 2005 -0800
     1.3 @@ -0,0 +1,7 @@
     1.4 +TPM FE/BE code using xenbus (some control message stuff still there; to 
     1.5 +be removed soon). 
     1.6 +
     1.7 +Signed-off-by: Stefan Berger <stefanb@us.ibm.com>
     1.8 +Signed-off-by: Kylene Hall <kjhall@us.ibm.com>
     1.9 +Signed-off-by: Mahadevan Gomathisankaran <gmdev@iastate.edu>
    1.10 +Signed-off-by: Steven Hand <steven@xensource.com>
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/Kconfig	Tue Aug 30 11:39:25 2005 -0800
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/Kconfig	Tue Aug 30 11:48:08 2005 -0800
     2.3 @@ -70,6 +70,27 @@ config XEN_NETDEV_BACKEND
     2.4  	  network devices to other guests via a high-performance shared-memory
     2.5  	  interface.
     2.6  
     2.7 +config XEN_TPMDEV_FRONTEND
     2.8 +        bool "TPM-device frontend driver"
     2.9 +        default y
    2.10 +        help
    2.11 +          The TPM-device frontend driver.
    2.12 +
    2.13 +config XEN_TPMDEV_BACKEND
    2.14 +        bool "TPM-device backend driver"
    2.15 +        default n
    2.16 +        help
    2.17 +          The TPM-device backend driver
    2.18 +
    2.19 +config XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
    2.20 +        bool "TPM backend closes upon vTPM failure"
    2.21 +        depends on XEN_TPMDEV_BACKEND
    2.22 +        default n
    2.23 +        help
    2.24 +          The TPM backend closes the channel if the vTPM in userspace indicates
    2.25 +          a failure. The corresponding domain's channel will be closed.
    2.26 +          Say Y if you want this feature.
    2.27 +
    2.28  config XEN_BLKDEV_FRONTEND
    2.29  	bool "Block-device frontend driver"
    2.30  	default y
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/Kconfig.drivers	Tue Aug 30 11:39:25 2005 -0800
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/Kconfig.drivers	Tue Aug 30 11:48:08 2005 -0800
     3.3 @@ -49,6 +49,10 @@ source "drivers/infiniband/Kconfig"
     3.4  endif
     3.5  
     3.6  if !XEN_PHYSDEV_ACCESS
     3.7 +source "drivers/char/tpm/Kconfig.domU"
     3.8 +endif
     3.9 +
    3.10 +if !XEN_PHYSDEV_ACCESS
    3.11  
    3.12  menu "Character devices"
    3.13  
     4.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32	Tue Aug 30 11:39:25 2005 -0800
     4.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32	Tue Aug 30 11:48:08 2005 -0800
     4.3 @@ -15,6 +15,9 @@ CONFIG_XEN_PHYSDEV_ACCESS=y
     4.4  CONFIG_XEN_BLKDEV_BACKEND=y
     4.5  # CONFIG_XEN_BLKDEV_TAP_BE is not set
     4.6  CONFIG_XEN_NETDEV_BACKEND=y
     4.7 +# CONFIG_XEN_TPMDEV_FRONTEND is not set
     4.8 +CONFIG_XEN_TPMDEV_BACKEND=y
     4.9 +# CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS is not set
    4.10  CONFIG_XEN_BLKDEV_FRONTEND=y
    4.11  CONFIG_XEN_NETDEV_FRONTEND=y
    4.12  CONFIG_XEN_NETDEV_GRANT_TX=y
    4.13 @@ -852,7 +855,9 @@ CONFIG_DRM_SIS=m
    4.14  #
    4.15  # TPM devices
    4.16  #
    4.17 -# CONFIG_TCG_TPM is not set
    4.18 +CONFIG_TCG_TPM=m
    4.19 +CONFIG_TCG_NSC=m
    4.20 +CONFIG_TCG_ATMEL=m
    4.21  
    4.22  #
    4.23  # I2C support
     5.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64	Tue Aug 30 11:39:25 2005 -0800
     5.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64	Tue Aug 30 11:48:08 2005 -0800
     5.3 @@ -15,6 +15,9 @@ CONFIG_XEN_PHYSDEV_ACCESS=y
     5.4  CONFIG_XEN_BLKDEV_BACKEND=y
     5.5  # CONFIG_XEN_BLKDEV_TAP_BE is not set
     5.6  CONFIG_XEN_NETDEV_BACKEND=y
     5.7 +# CONFIG_XEN_TPMDEV_BACKEND is not set
     5.8 +# CONFIG_XEN_TPMDEV_FRONTEND is not set
     5.9 +# CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS is not set
    5.10  CONFIG_XEN_BLKDEV_FRONTEND=y
    5.11  CONFIG_XEN_NETDEV_FRONTEND=y
    5.12  CONFIG_XEN_NETDEV_GRANT_TX=y
    5.13 @@ -759,7 +762,9 @@ CONFIG_DRM_SIS=m
    5.14  #
    5.15  # TPM devices
    5.16  #
    5.17 -# CONFIG_TCG_TPM is not set
    5.18 +CONFIG_TCG_TPM=m
    5.19 +CONFIG_TCG_NSC=m
    5.20 +CONFIG_TCG_ATMEL=m
    5.21  
    5.22  #
    5.23  # I2C support
     6.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32	Tue Aug 30 11:39:25 2005 -0800
     6.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32	Tue Aug 30 11:48:08 2005 -0800
     6.3 @@ -12,6 +12,8 @@ CONFIG_NO_IDLE_HZ=y
     6.4  #
     6.5  # CONFIG_XEN_PRIVILEGED_GUEST is not set
     6.6  # CONFIG_XEN_PHYSDEV_ACCESS is not set
     6.7 +CONFIG_XEN_TPMDEV_FRONTEND=y
     6.8 +# CONFIG_XEN_TPMDEV_BACKEND is not set
     6.9  CONFIG_XEN_BLKDEV_FRONTEND=y
    6.10  CONFIG_XEN_NETDEV_FRONTEND=y
    6.11  CONFIG_XEN_NETDEV_GRANT_TX=y
    6.12 @@ -336,6 +338,8 @@ CONFIG_NETDEVICES=y
    6.13  CONFIG_UNIX98_PTYS=y
    6.14  CONFIG_LEGACY_PTYS=y
    6.15  CONFIG_LEGACY_PTY_COUNT=256
    6.16 +CONFIG_TCG_TPM=y
    6.17 +CONFIG_TCG_XEN=y
    6.18  
    6.19  #
    6.20  # Character devices
     7.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64	Tue Aug 30 11:39:25 2005 -0800
     7.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64	Tue Aug 30 11:48:08 2005 -0800
     7.3 @@ -12,6 +12,8 @@ CONFIG_NO_IDLE_HZ=y
     7.4  #
     7.5  # CONFIG_XEN_PRIVILEGED_GUEST is not set
     7.6  # CONFIG_XEN_PHYSDEV_ACCESS is not set
     7.7 +# CONFIG_XEN_TPMDEV_FRONTEND is not set
     7.8 +# CONFIG_XEN_TPMDEV_BACKEND is not set
     7.9  CONFIG_XEN_BLKDEV_FRONTEND=y
    7.10  CONFIG_XEN_NETDEV_FRONTEND=y
    7.11  CONFIG_XEN_NETDEV_GRANT_TX=y
    7.12 @@ -662,6 +664,8 @@ CONFIG_NETCONSOLE=m
    7.13  CONFIG_INPUT=m
    7.14  CONFIG_UNIX98_PTYS=y
    7.15  # CONFIG_LEGACY_PTYS is not set
    7.16 +CONFIG_TCG_TPM=y
    7.17 +CONFIG_TCG_XEN=y
    7.18  
    7.19  #
    7.20  # Character devices
     8.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Tue Aug 30 11:39:25 2005 -0800
     8.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32	Tue Aug 30 11:48:08 2005 -0800
     8.3 @@ -15,8 +15,11 @@ CONFIG_XEN_PHYSDEV_ACCESS=y
     8.4  CONFIG_XEN_BLKDEV_BACKEND=y
     8.5  # CONFIG_XEN_BLKDEV_TAP_BE is not set
     8.6  CONFIG_XEN_NETDEV_BACKEND=y
     8.7 +CONFIG_XEN_TPMDEV_BACKEND=y
     8.8 +# CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS is not set
     8.9  CONFIG_XEN_BLKDEV_FRONTEND=y
    8.10  CONFIG_XEN_NETDEV_FRONTEND=y
    8.11 +CONFIG_XEN_TPMDEV_FRONTEND=y
    8.12  CONFIG_XEN_NETDEV_GRANT_TX=y
    8.13  CONFIG_XEN_NETDEV_GRANT_RX=y
    8.14  # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
     9.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64	Tue Aug 30 11:39:25 2005 -0800
     9.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64	Tue Aug 30 11:48:08 2005 -0800
     9.3 @@ -15,8 +15,10 @@ CONFIG_XEN_PHYSDEV_ACCESS=y
     9.4  CONFIG_XEN_BLKDEV_BACKEND=y
     9.5  # CONFIG_XEN_BLKDEV_TAP_BE is not set
     9.6  CONFIG_XEN_NETDEV_BACKEND=y
     9.7 +# CONFIG_XEN_TPMEV_BACKEND is not set
     9.8  CONFIG_XEN_BLKDEV_FRONTEND=y
     9.9  CONFIG_XEN_NETDEV_FRONTEND=y
    9.10 +# CONFIG_XEN_TPMDEV_FRONTEND is not set
    9.11  CONFIG_XEN_NETDEV_GRANT_TX=y
    9.12  CONFIG_XEN_NETDEV_GRANT_RX=y
    9.13  # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig.domU	Tue Aug 30 11:48:08 2005 -0800
    10.3 @@ -0,0 +1,30 @@
    10.4 +#
    10.5 +# TPM device configuration
    10.6 +#
    10.7 +
    10.8 +menu "TPM devices"
    10.9 +
   10.10 +config TCG_TPM
   10.11 +	tristate "TPM Support for XEN"
   10.12 +	depends on ARCH_XEN && !XEN_PHYSDEV_ACCESS
   10.13 +	---help---
   10.14 +	  If you want to make TPM security available in your system,
   10.15 +	  say Yes and it will be accessible from within a user domain.  For
   10.16 +	  more information see <http://www.trustedcomputinggroup.org>.
   10.17 +	  An implementation of the Trusted Software Stack (TSS), the
   10.18 +	  userspace enablement piece of the specification, can be
   10.19 +	  obtained at: <http://sourceforge.net/projects/trousers>.  To
   10.20 +	  compile this driver as a module, choose M here; the module
   10.21 +	  will be called tpm. If unsure, say N.
   10.22 +
   10.23 +config TCG_XEN
   10.24 +	tristate "XEN TPM Interface"
   10.25 +	depends on TCG_TPM && ARCH_XEN
   10.26 +	---help---
   10.27 +	  If you want to make TPM support available to a Xen
   10.28 +	  user domain, say Yes and it will
   10.29 +          be accessible from within Linux. To compile this driver
   10.30 +          as a module, choose M here; the module will be called
   10.31 +          tpm_xen.
   10.32 +
   10.33 +endmenu
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/Makefile	Tue Aug 30 11:48:08 2005 -0800
    11.3 @@ -0,0 +1,12 @@
    11.4 +#
    11.5 +# Makefile for the kernel tpm device drivers.
    11.6 +#
    11.7 +ifeq ($(CONFIG_XEN_PHYSDEV_ACCESS),y)
    11.8 +obj-$(CONFIG_TCG_TPM) += tpm.o
    11.9 +obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
   11.10 +obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
   11.11 +obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
   11.12 +else
   11.13 +obj-$(CONFIG_TCG_TPM) += tpm_nopci.o
   11.14 +obj-$(CONFIG_TCG_XEN) += tpm_xen.o
   11.15 +endif
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm.c	Tue Aug 30 11:48:08 2005 -0800
    12.3 @@ -0,0 +1,627 @@
    12.4 +/*
    12.5 + * Copyright (C) 2004 IBM Corporation
    12.6 + *
    12.7 + * Authors:
    12.8 + * Leendert van Doorn <leendert@watson.ibm.com>
    12.9 + * Dave Safford <safford@watson.ibm.com>
   12.10 + * Reiner Sailer <sailer@watson.ibm.com>
   12.11 + * Kylene Hall <kjhall@us.ibm.com>
   12.12 + *
   12.13 + * Maintained by: <tpmdd_devel@lists.sourceforge.net>
   12.14 + *
   12.15 + * Device driver for TCG/TCPA TPM (trusted platform module).
   12.16 + * Specifications at www.trustedcomputinggroup.org
   12.17 + *
   12.18 + * This program is free software; you can redistribute it and/or
   12.19 + * modify it under the terms of the GNU General Public License as
   12.20 + * published by the Free Software Foundation, version 2 of the
   12.21 + * License.
   12.22 + *
   12.23 + * Note, the TPM chip is not interrupt driven (only polling)
   12.24 + * and can have very long timeouts (minutes!). Hence the unusual
   12.25 + * calls to schedule_timeout.
   12.26 + *
   12.27 + */
   12.28 +
   12.29 +#include <linux/sched.h>
   12.30 +#include <linux/poll.h>
   12.31 +#include <linux/spinlock.h>
   12.32 +#include "tpm.h"
   12.33 +
   12.34 +#define	TPM_MINOR			224	/* officially assigned */
   12.35 +
   12.36 +#define	TPM_BUFSIZE			2048
   12.37 +
   12.38 +static LIST_HEAD(tpm_chip_list);
   12.39 +static DEFINE_SPINLOCK(driver_lock);
   12.40 +static int dev_mask[32];
   12.41 +
   12.42 +static void user_reader_timeout(unsigned long ptr)
   12.43 +{
   12.44 +	struct tpm_chip *chip = (struct tpm_chip *) ptr;
   12.45 +
   12.46 +	down(&chip->buffer_mutex);
   12.47 +	atomic_set(&chip->data_pending, 0);
   12.48 +	memset(chip->data_buffer, 0, TPM_BUFSIZE);
   12.49 +	up(&chip->buffer_mutex);
   12.50 +}
   12.51 +
   12.52 +void tpm_time_expired(unsigned long ptr)
   12.53 +{
   12.54 +	int *exp = (int *) ptr;
   12.55 +	*exp = 1;
   12.56 +}
   12.57 +
   12.58 +EXPORT_SYMBOL_GPL(tpm_time_expired);
   12.59 +
   12.60 +/*
   12.61 + * Internal kernel interface to transmit TPM commands
   12.62 + */
   12.63 +static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
   12.64 +			    size_t bufsiz)
   12.65 +{
   12.66 +	ssize_t len;
   12.67 +	u32 count;
   12.68 +	__be32 *native_size;
   12.69 +
   12.70 +	native_size = (__force __be32 *) (buf + 2);
   12.71 +	count = be32_to_cpu(*native_size);
   12.72 +
   12.73 +	if (count == 0)
   12.74 +		return -ENODATA;
   12.75 +	if (count > bufsiz) {
   12.76 +		dev_err(&chip->pci_dev->dev,
   12.77 +			"invalid count value %x %zx \n", count, bufsiz);
   12.78 +		return -E2BIG;
   12.79 +	}
   12.80 +
   12.81 +	down(&chip->tpm_mutex);
   12.82 +
   12.83 +	if ((len = chip->vendor->send(chip, (u8 *) buf, count)) < 0) {
   12.84 +		dev_err(&chip->pci_dev->dev,
   12.85 +			"tpm_transmit: tpm_send: error %zd\n", len);
   12.86 +		return len;
   12.87 +	}
   12.88 +
   12.89 +	down(&chip->timer_manipulation_mutex);
   12.90 +	chip->time_expired = 0;
   12.91 +	init_timer(&chip->device_timer);
   12.92 +	chip->device_timer.function = tpm_time_expired;
   12.93 +	chip->device_timer.expires = jiffies + 2 * 60 * HZ;
   12.94 +	chip->device_timer.data = (unsigned long) &chip->time_expired;
   12.95 +	add_timer(&chip->device_timer);
   12.96 +	up(&chip->timer_manipulation_mutex);
   12.97 +
   12.98 +	do {
   12.99 +		u8 status = inb(chip->vendor->base + 1);
  12.100 +		if ((status & chip->vendor->req_complete_mask) ==
  12.101 +		    chip->vendor->req_complete_val) {
  12.102 +			down(&chip->timer_manipulation_mutex);
  12.103 +			del_singleshot_timer_sync(&chip->device_timer);
  12.104 +			up(&chip->timer_manipulation_mutex);
  12.105 +			goto out_recv;
  12.106 +		}
  12.107 +		set_current_state(TASK_UNINTERRUPTIBLE);
  12.108 +		schedule_timeout(TPM_TIMEOUT);
  12.109 +		rmb();
  12.110 +	} while (!chip->time_expired);
  12.111 +
  12.112 +
  12.113 +	chip->vendor->cancel(chip);
  12.114 +	dev_err(&chip->pci_dev->dev, "Time expired\n");
  12.115 +	up(&chip->tpm_mutex);
  12.116 +	return -EIO;
  12.117 +
  12.118 +out_recv:
  12.119 +	len = chip->vendor->recv(chip, (u8 *) buf, bufsiz);
  12.120 +	if (len < 0)
  12.121 +		dev_err(&chip->pci_dev->dev,
  12.122 +			"tpm_transmit: tpm_recv: error %zd\n", len);
  12.123 +	up(&chip->tpm_mutex);
  12.124 +	return len;
  12.125 +}
  12.126 +
  12.127 +#define TPM_DIGEST_SIZE 20
  12.128 +#define CAP_PCR_RESULT_SIZE 18
  12.129 +static u8 cap_pcr[] = {
  12.130 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  12.131 +	0, 0, 0, 22,		/* length */
  12.132 +	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
  12.133 +	0, 0, 0, 5,
  12.134 +	0, 0, 0, 4,
  12.135 +	0, 0, 1, 1
  12.136 +};
  12.137 +
  12.138 +#define READ_PCR_RESULT_SIZE 30
  12.139 +static u8 pcrread[] = {
  12.140 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  12.141 +	0, 0, 0, 14,		/* length */
  12.142 +	0, 0, 0, 21,		/* TPM_ORD_PcrRead */
  12.143 +	0, 0, 0, 0		/* PCR index */
  12.144 +};
  12.145 +
  12.146 +static ssize_t show_pcrs(struct device *dev, char *buf)
  12.147 +{
  12.148 +	u8 data[READ_PCR_RESULT_SIZE];
  12.149 +	ssize_t len;
  12.150 +	int i, j, index, num_pcrs;
  12.151 +	char *str = buf;
  12.152 +
  12.153 +	struct tpm_chip *chip =
  12.154 +	    pci_get_drvdata(container_of(dev, struct pci_dev, dev));
  12.155 +	if (chip == NULL)
  12.156 +		return -ENODEV;
  12.157 +
  12.158 +	memcpy(data, cap_pcr, sizeof(cap_pcr));
  12.159 +	if ((len = tpm_transmit(chip, data, sizeof(data)))
  12.160 +	    < CAP_PCR_RESULT_SIZE)
  12.161 +		return len;
  12.162 +
  12.163 +	num_pcrs = be32_to_cpu(*((__force __be32 *) (data + 14)));
  12.164 +
  12.165 +	for (i = 0; i < num_pcrs; i++) {
  12.166 +		memcpy(data, pcrread, sizeof(pcrread));
  12.167 +		index = cpu_to_be32(i);
  12.168 +		memcpy(data + 10, &index, 4);
  12.169 +		if ((len = tpm_transmit(chip, data, sizeof(data)))
  12.170 +		    < READ_PCR_RESULT_SIZE)
  12.171 +			return len;
  12.172 +		str += sprintf(str, "PCR-%02d: ", i);
  12.173 +		for (j = 0; j < TPM_DIGEST_SIZE; j++)
  12.174 +			str += sprintf(str, "%02X ", *(data + 10 + j));
  12.175 +		str += sprintf(str, "\n");
  12.176 +	}
  12.177 +	return str - buf;
  12.178 +}
  12.179 +
  12.180 +static DEVICE_ATTR(pcrs, S_IRUGO, show_pcrs, NULL);
  12.181 +
  12.182 +#define  READ_PUBEK_RESULT_SIZE 314
  12.183 +static u8 readpubek[] = {
  12.184 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  12.185 +	0, 0, 0, 30,		/* length */
  12.186 +	0, 0, 0, 124,		/* TPM_ORD_ReadPubek */
  12.187 +};
  12.188 +
  12.189 +static ssize_t show_pubek(struct device *dev, char *buf)
  12.190 +{
  12.191 +	u8 data[READ_PUBEK_RESULT_SIZE];
  12.192 +	ssize_t len;
  12.193 +	__be32 *native_val;
  12.194 +	int i;
  12.195 +	char *str = buf;
  12.196 +
  12.197 +	struct tpm_chip *chip =
  12.198 +	    pci_get_drvdata(container_of(dev, struct pci_dev, dev));
  12.199 +	if (chip == NULL)
  12.200 +		return -ENODEV;
  12.201 +
  12.202 +	memcpy(data, readpubek, sizeof(readpubek));
  12.203 +	memset(data + sizeof(readpubek), 0, 20);	/* zero nonce */
  12.204 +
  12.205 +	if ((len = tpm_transmit(chip, data, sizeof(data))) <
  12.206 +	    READ_PUBEK_RESULT_SIZE)
  12.207 +		return len;
  12.208 +
  12.209 +	/*
  12.210 +	   ignore header 10 bytes
  12.211 +	   algorithm 32 bits (1 == RSA )
  12.212 +	   encscheme 16 bits
  12.213 +	   sigscheme 16 bits
  12.214 +	   parameters (RSA 12->bytes: keybit, #primes, expbit)
  12.215 +	   keylenbytes 32 bits
  12.216 +	   256 byte modulus
  12.217 +	   ignore checksum 20 bytes
  12.218 +	 */
  12.219 +
  12.220 +	native_val = (__force __be32 *) (data + 34);
  12.221 +
  12.222 +	str +=
  12.223 +	    sprintf(str,
  12.224 +		    "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
  12.225 +		    "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X"
  12.226 +		    " %02X %02X %02X %02X %02X %02X %02X %02X\n"
  12.227 +		    "Modulus length: %d\nModulus: \n",
  12.228 +		    data[10], data[11], data[12], data[13], data[14],
  12.229 +		    data[15], data[16], data[17], data[22], data[23],
  12.230 +		    data[24], data[25], data[26], data[27], data[28],
  12.231 +		    data[29], data[30], data[31], data[32], data[33],
  12.232 +		    be32_to_cpu(*native_val)
  12.233 +	    );
  12.234 +
  12.235 +	for (i = 0; i < 256; i++) {
  12.236 +		str += sprintf(str, "%02X ", data[i + 39]);
  12.237 +		if ((i + 1) % 16 == 0)
  12.238 +			str += sprintf(str, "\n");
  12.239 +	}
  12.240 +	return str - buf;
  12.241 +}
  12.242 +
  12.243 +static DEVICE_ATTR(pubek, S_IRUGO, show_pubek, NULL);
  12.244 +
  12.245 +#define CAP_VER_RESULT_SIZE 18
  12.246 +static u8 cap_version[] = {
  12.247 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  12.248 +	0, 0, 0, 18,		/* length */
  12.249 +	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
  12.250 +	0, 0, 0, 6,
  12.251 +	0, 0, 0, 0
  12.252 +};
  12.253 +
  12.254 +#define CAP_MANUFACTURER_RESULT_SIZE 18
  12.255 +static u8 cap_manufacturer[] = {
  12.256 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  12.257 +	0, 0, 0, 22,		/* length */
  12.258 +	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
  12.259 +	0, 0, 0, 5,
  12.260 +	0, 0, 0, 4,
  12.261 +	0, 0, 1, 3
  12.262 +};
  12.263 +
  12.264 +static ssize_t show_caps(struct device *dev, char *buf)
  12.265 +{
  12.266 +	u8 data[READ_PUBEK_RESULT_SIZE];
  12.267 +	ssize_t len;
  12.268 +	char *str = buf;
  12.269 +
  12.270 +	struct tpm_chip *chip =
  12.271 +	    pci_get_drvdata(container_of(dev, struct pci_dev, dev));
  12.272 +	if (chip == NULL)
  12.273 +		return -ENODEV;
  12.274 +
  12.275 +	memcpy(data, cap_manufacturer, sizeof(cap_manufacturer));
  12.276 +
  12.277 +	if ((len = tpm_transmit(chip, data, sizeof(data))) <
  12.278 +	    CAP_MANUFACTURER_RESULT_SIZE)
  12.279 +		return len;
  12.280 +
  12.281 +	str += sprintf(str, "Manufacturer: 0x%x\n",
  12.282 +		       be32_to_cpu(*(data + 14)));
  12.283 +
  12.284 +	memcpy(data, cap_version, sizeof(cap_version));
  12.285 +
  12.286 +	if ((len = tpm_transmit(chip, data, sizeof(data))) <
  12.287 +	    CAP_VER_RESULT_SIZE)
  12.288 +		return len;
  12.289 +
  12.290 +	str +=
  12.291 +	    sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n",
  12.292 +		    (int) data[14], (int) data[15], (int) data[16],
  12.293 +		    (int) data[17]);
  12.294 +
  12.295 +	return str - buf;
  12.296 +}
  12.297 +
  12.298 +static DEVICE_ATTR(caps, S_IRUGO, show_caps, NULL);
  12.299 +
  12.300 +/*
  12.301 + * Device file system interface to the TPM
  12.302 + */
  12.303 +int tpm_open(struct inode *inode, struct file *file)
  12.304 +{
  12.305 +	int rc = 0, minor = iminor(inode);
  12.306 +	struct tpm_chip *chip = NULL, *pos;
  12.307 +
  12.308 +	spin_lock(&driver_lock);
  12.309 +
  12.310 +	list_for_each_entry(pos, &tpm_chip_list, list) {
  12.311 +		if (pos->vendor->miscdev.minor == minor) {
  12.312 +			chip = pos;
  12.313 +			break;
  12.314 +		}
  12.315 +	}
  12.316 +
  12.317 +	if (chip == NULL) {
  12.318 +		rc = -ENODEV;
  12.319 +		goto err_out;
  12.320 +	}
  12.321 +
  12.322 +	if (chip->num_opens) {
  12.323 +		dev_dbg(&chip->pci_dev->dev,
  12.324 +			"Another process owns this TPM\n");
  12.325 +		rc = -EBUSY;
  12.326 +		goto err_out;
  12.327 +	}
  12.328 +
  12.329 +	chip->num_opens++;
  12.330 +	pci_dev_get(chip->pci_dev);
  12.331 +
  12.332 +	spin_unlock(&driver_lock);
  12.333 +
  12.334 +	chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
  12.335 +	if (chip->data_buffer == NULL) {
  12.336 +		chip->num_opens--;
  12.337 +		pci_dev_put(chip->pci_dev);
  12.338 +		return -ENOMEM;
  12.339 +	}
  12.340 +
  12.341 +	atomic_set(&chip->data_pending, 0);
  12.342 +
  12.343 +	file->private_data = chip;
  12.344 +	return 0;
  12.345 +
  12.346 +err_out:
  12.347 +	spin_unlock(&driver_lock);
  12.348 +	return rc;
  12.349 +}
  12.350 +
  12.351 +EXPORT_SYMBOL_GPL(tpm_open);
  12.352 +
  12.353 +int tpm_release(struct inode *inode, struct file *file)
  12.354 +{
  12.355 +	struct tpm_chip *chip = file->private_data;
  12.356 +
  12.357 +	file->private_data = NULL;
  12.358 +
  12.359 +	spin_lock(&driver_lock);
  12.360 +	chip->num_opens--;
  12.361 +	spin_unlock(&driver_lock);
  12.362 +
  12.363 +	down(&chip->timer_manipulation_mutex);
  12.364 +	if (timer_pending(&chip->user_read_timer))
  12.365 +		del_singleshot_timer_sync(&chip->user_read_timer);
  12.366 +	else if (timer_pending(&chip->device_timer))
  12.367 +		del_singleshot_timer_sync(&chip->device_timer);
  12.368 +	up(&chip->timer_manipulation_mutex);
  12.369 +
  12.370 +	kfree(chip->data_buffer);
  12.371 +	atomic_set(&chip->data_pending, 0);
  12.372 +
  12.373 +	pci_dev_put(chip->pci_dev);
  12.374 +	return 0;
  12.375 +}
  12.376 +
  12.377 +EXPORT_SYMBOL_GPL(tpm_release);
  12.378 +
  12.379 +ssize_t tpm_write(struct file * file, const char __user * buf,
  12.380 +		  size_t size, loff_t * off)
  12.381 +{
  12.382 +	struct tpm_chip *chip = file->private_data;
  12.383 +	int in_size = size, out_size;
  12.384 +
  12.385 +	/* cannot perform a write until the read has cleared
  12.386 +	   either via tpm_read or a user_read_timer timeout */
  12.387 +	while (atomic_read(&chip->data_pending) != 0) {
  12.388 +		set_current_state(TASK_UNINTERRUPTIBLE);
  12.389 +		schedule_timeout(TPM_TIMEOUT);
  12.390 +	}
  12.391 +
  12.392 +	down(&chip->buffer_mutex);
  12.393 +
  12.394 +	if (in_size > TPM_BUFSIZE)
  12.395 +		in_size = TPM_BUFSIZE;
  12.396 +
  12.397 +	if (copy_from_user
  12.398 +	    (chip->data_buffer, (void __user *) buf, in_size)) {
  12.399 +		up(&chip->buffer_mutex);
  12.400 +		return -EFAULT;
  12.401 +	}
  12.402 +
  12.403 +	/* atomic tpm command send and result receive */
  12.404 +	out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
  12.405 +
  12.406 +	atomic_set(&chip->data_pending, out_size);
  12.407 +	atomic_set(&chip->data_position, 0);
  12.408 +	up(&chip->buffer_mutex);
  12.409 +
  12.410 +	/* Set a timeout by which the reader must come claim the result */
  12.411 +	down(&chip->timer_manipulation_mutex);
  12.412 +	init_timer(&chip->user_read_timer);
  12.413 +	chip->user_read_timer.function = user_reader_timeout;
  12.414 +	chip->user_read_timer.data = (unsigned long) chip;
  12.415 +	chip->user_read_timer.expires = jiffies + (60 * HZ);
  12.416 +	add_timer(&chip->user_read_timer);
  12.417 +	up(&chip->timer_manipulation_mutex);
  12.418 +
  12.419 +	return in_size;
  12.420 +}
  12.421 +
  12.422 +EXPORT_SYMBOL_GPL(tpm_write);
  12.423 +
  12.424 +ssize_t tpm_read(struct file * file, char __user * buf,
  12.425 +		 size_t size, loff_t * off)
  12.426 +{
  12.427 +	struct tpm_chip *chip = file->private_data;
  12.428 +	int ret_size = -ENODATA;
  12.429 +	int pos, pending = 0;
  12.430 +
  12.431 +	down(&chip->buffer_mutex);
  12.432 +	ret_size = atomic_read(&chip->data_pending);
  12.433 +	if ( ret_size > 0 ) {	/* Result available */
  12.434 +		if (size < ret_size)
  12.435 +			ret_size = size;
  12.436 +
  12.437 +		pos = atomic_read(&chip->data_position);
  12.438 +
  12.439 +		if (copy_to_user((void __user *) buf,
  12.440 +				 &chip->data_buffer[pos], ret_size)) {
  12.441 +			ret_size = -EFAULT;
  12.442 +		} else {
  12.443 +			pending = atomic_read(&chip->data_pending) - ret_size;
  12.444 +			if ( pending ) {
  12.445 +				atomic_set( &chip->data_pending, pending );
  12.446 +				atomic_set( &chip->data_position, pos+ret_size );
  12.447 +			}
  12.448 +		}
  12.449 +	}
  12.450 +	up(&chip->buffer_mutex);
  12.451 +
  12.452 +	if ( ret_size <= 0 || pending == 0 ) {
  12.453 +		atomic_set( &chip->data_pending, 0 );
  12.454 +		down(&chip->timer_manipulation_mutex);
  12.455 +		del_singleshot_timer_sync(&chip->user_read_timer);
  12.456 +		up(&chip->timer_manipulation_mutex);
  12.457 +	}
  12.458 +
  12.459 +	return ret_size;
  12.460 +}
  12.461 +
  12.462 +EXPORT_SYMBOL_GPL(tpm_read);
  12.463 +
  12.464 +void __devexit tpm_remove(struct pci_dev *pci_dev)
  12.465 +{
  12.466 +	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
  12.467 +
  12.468 +	if (chip == NULL) {
  12.469 +		dev_err(&pci_dev->dev, "No device data found\n");
  12.470 +		return;
  12.471 +	}
  12.472 +
  12.473 +	spin_lock(&driver_lock);
  12.474 +
  12.475 +	list_del(&chip->list);
  12.476 +
  12.477 +	spin_unlock(&driver_lock);
  12.478 +
  12.479 +	pci_set_drvdata(pci_dev, NULL);
  12.480 +	misc_deregister(&chip->vendor->miscdev);
  12.481 +
  12.482 +	device_remove_file(&pci_dev->dev, &dev_attr_pubek);
  12.483 +	device_remove_file(&pci_dev->dev, &dev_attr_pcrs);
  12.484 +	device_remove_file(&pci_dev->dev, &dev_attr_caps);
  12.485 +
  12.486 +	pci_disable_device(pci_dev);
  12.487 +
  12.488 +	dev_mask[chip->dev_num / 32] &= !(1 << (chip->dev_num % 32));
  12.489 +
  12.490 +	kfree(chip);
  12.491 +
  12.492 +	pci_dev_put(pci_dev);
  12.493 +}
  12.494 +
  12.495 +EXPORT_SYMBOL_GPL(tpm_remove);
  12.496 +
  12.497 +static u8 savestate[] = {
  12.498 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  12.499 +	0, 0, 0, 10,		/* blob length (in bytes) */
  12.500 +	0, 0, 0, 152		/* TPM_ORD_SaveState */
  12.501 +};
  12.502 +
  12.503 +/*
  12.504 + * We are about to suspend. Save the TPM state
  12.505 + * so that it can be restored.
  12.506 + */
  12.507 +int tpm_pm_suspend(struct pci_dev *pci_dev, pm_message_t pm_state)
  12.508 +{
  12.509 +	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
  12.510 +	if (chip == NULL)
  12.511 +		return -ENODEV;
  12.512 +
  12.513 +	tpm_transmit(chip, savestate, sizeof(savestate));
  12.514 +	return 0;
  12.515 +}
  12.516 +
  12.517 +EXPORT_SYMBOL_GPL(tpm_pm_suspend);
  12.518 +
  12.519 +/*
  12.520 + * Resume from a power safe. The BIOS already restored
  12.521 + * the TPM state.
  12.522 + */
  12.523 +int tpm_pm_resume(struct pci_dev *pci_dev)
  12.524 +{
  12.525 +	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
  12.526 +
  12.527 +	if (chip == NULL)
  12.528 +		return -ENODEV;
  12.529 +
  12.530 +	return 0;
  12.531 +}
  12.532 +
  12.533 +EXPORT_SYMBOL_GPL(tpm_pm_resume);
  12.534 +
  12.535 +/*
  12.536 + * Called from tpm_<specific>.c probe function only for devices
  12.537 + * the driver has determined it should claim.  Prior to calling
  12.538 + * this function the specific probe function has called pci_enable_device
  12.539 + * upon errant exit from this function specific probe function should call
  12.540 + * pci_disable_device
  12.541 + */
  12.542 +int tpm_register_hardware(struct pci_dev *pci_dev,
  12.543 +			  struct tpm_vendor_specific *entry)
  12.544 +{
  12.545 +	char devname[7];
  12.546 +	struct tpm_chip *chip;
  12.547 +	int i, j;
  12.548 +
  12.549 +	/* Driver specific per-device data */
  12.550 +	chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  12.551 +	if (chip == NULL)
  12.552 +		return -ENOMEM;
  12.553 +
  12.554 +	memset(chip, 0, sizeof(struct tpm_chip));
  12.555 +
  12.556 +	init_MUTEX(&chip->buffer_mutex);
  12.557 +	init_MUTEX(&chip->tpm_mutex);
  12.558 +	init_MUTEX(&chip->timer_manipulation_mutex);
  12.559 +	INIT_LIST_HEAD(&chip->list);
  12.560 +
  12.561 +	chip->vendor = entry;
  12.562 +
  12.563 +	chip->dev_num = -1;
  12.564 +
  12.565 +	for (i = 0; i < 32; i++)
  12.566 +		for (j = 0; j < 8; j++)
  12.567 +			if ((dev_mask[i] & (1 << j)) == 0) {
  12.568 +				chip->dev_num = i * 32 + j;
  12.569 +				dev_mask[i] |= 1 << j;
  12.570 +				goto dev_num_search_complete;
  12.571 +			}
  12.572 +
  12.573 +dev_num_search_complete:
  12.574 +	if (chip->dev_num < 0) {
  12.575 +		dev_err(&pci_dev->dev,
  12.576 +			"No available tpm device numbers\n");
  12.577 +		kfree(chip);
  12.578 +		return -ENODEV;
  12.579 +	} else if (chip->dev_num == 0)
  12.580 +		chip->vendor->miscdev.minor = TPM_MINOR;
  12.581 +	else
  12.582 +		chip->vendor->miscdev.minor = MISC_DYNAMIC_MINOR;
  12.583 +
  12.584 +	snprintf(devname, sizeof(devname), "%s%d", "tpm", chip->dev_num);
  12.585 +	chip->vendor->miscdev.name = devname;
  12.586 +
  12.587 +	chip->vendor->miscdev.dev = &(pci_dev->dev);
  12.588 +	chip->pci_dev = pci_dev_get(pci_dev);
  12.589 +
  12.590 +	if (misc_register(&chip->vendor->miscdev)) {
  12.591 +		dev_err(&chip->pci_dev->dev,
  12.592 +			"unable to misc_register %s, minor %d\n",
  12.593 +			chip->vendor->miscdev.name,
  12.594 +			chip->vendor->miscdev.minor);
  12.595 +		pci_dev_put(pci_dev);
  12.596 +		kfree(chip);
  12.597 +		dev_mask[i] &= !(1 << j);
  12.598 +		return -ENODEV;
  12.599 +	}
  12.600 +
  12.601 +	pci_set_drvdata(pci_dev, chip);
  12.602 +
  12.603 +	list_add(&chip->list, &tpm_chip_list);
  12.604 +
  12.605 +	device_create_file(&pci_dev->dev, &dev_attr_pubek);
  12.606 +	device_create_file(&pci_dev->dev, &dev_attr_pcrs);
  12.607 +	device_create_file(&pci_dev->dev, &dev_attr_caps);
  12.608 +
  12.609 +	return 0;
  12.610 +}
  12.611 +
  12.612 +EXPORT_SYMBOL_GPL(tpm_register_hardware);
  12.613 +
  12.614 +static int __init init_tpm(void)
  12.615 +{
  12.616 +	return 0;
  12.617 +}
  12.618 +
  12.619 +static void __exit cleanup_tpm(void)
  12.620 +{
  12.621 +
  12.622 +}
  12.623 +
  12.624 +module_init(init_tpm);
  12.625 +module_exit(cleanup_tpm);
  12.626 +
  12.627 +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
  12.628 +MODULE_DESCRIPTION("TPM Driver");
  12.629 +MODULE_VERSION("2.0");
  12.630 +MODULE_LICENSE("GPL");
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm.h	Tue Aug 30 11:48:08 2005 -0800
    13.3 @@ -0,0 +1,92 @@
    13.4 +/*
    13.5 + * Copyright (C) 2004 IBM Corporation
    13.6 + *
    13.7 + * Authors:
    13.8 + * Leendert van Doorn <leendert@watson.ibm.com>
    13.9 + * Dave Safford <safford@watson.ibm.com>
   13.10 + * Reiner Sailer <sailer@watson.ibm.com>
   13.11 + * Kylene Hall <kjhall@us.ibm.com>
   13.12 + *
   13.13 + * Maintained by: <tpmdd_devel@lists.sourceforge.net>
   13.14 + *
   13.15 + * Device driver for TCG/TCPA TPM (trusted platform module).
   13.16 + * Specifications at www.trustedcomputinggroup.org
   13.17 + *
   13.18 + * This program is free software; you can redistribute it and/or
   13.19 + * modify it under the terms of the GNU General Public License as
   13.20 + * published by the Free Software Foundation, version 2 of the
   13.21 + * License.
   13.22 + *
   13.23 + */
   13.24 +#include <linux/module.h>
   13.25 +#include <linux/version.h>
   13.26 +#include <linux/pci.h>
   13.27 +#include <linux/delay.h>
   13.28 +#include <linux/fs.h>
   13.29 +#include <linux/miscdevice.h>
   13.30 +
   13.31 +#define TPM_TIMEOUT msecs_to_jiffies(5)
   13.32 +
   13.33 +/* TPM addresses */
   13.34 +#define	TPM_ADDR			0x4E
   13.35 +#define	TPM_DATA			0x4F
   13.36 +
   13.37 +struct tpm_chip;
   13.38 +
   13.39 +struct tpm_vendor_specific {
   13.40 +	u8 req_complete_mask;
   13.41 +	u8 req_complete_val;
   13.42 +	u16 base;		/* TPM base address */
   13.43 +
   13.44 +	int (*recv) (struct tpm_chip *, u8 *, size_t);
   13.45 +	int (*send) (struct tpm_chip *, u8 *, size_t);
   13.46 +	void (*cancel) (struct tpm_chip *);
   13.47 +	struct miscdevice miscdev;
   13.48 +};
   13.49 +
   13.50 +struct tpm_chip {
   13.51 +	struct pci_dev *pci_dev;	/* PCI device stuff */
   13.52 +
   13.53 +	int dev_num;		/* /dev/tpm# */
   13.54 +	int num_opens;		/* only one allowed */
   13.55 +	int time_expired;
   13.56 +
   13.57 +	/* Data passed to and from the tpm via the read/write calls */
   13.58 +	u8 *data_buffer;
   13.59 +	atomic_t data_pending;
   13.60 +	atomic_t data_position;
   13.61 +	struct semaphore buffer_mutex;
   13.62 +
   13.63 +	struct timer_list user_read_timer;	/* user needs to claim result */
   13.64 +	struct semaphore tpm_mutex;	/* tpm is processing */
   13.65 +	struct timer_list device_timer;	/* tpm is processing */
   13.66 +	struct semaphore timer_manipulation_mutex;
   13.67 +
   13.68 +	struct tpm_vendor_specific *vendor;
   13.69 +
   13.70 +	struct list_head list;
   13.71 +};
   13.72 +
   13.73 +static inline int tpm_read_index(int index)
   13.74 +{
   13.75 +	outb(index, TPM_ADDR);
   13.76 +	return inb(TPM_DATA) & 0xFF;
   13.77 +}
   13.78 +
   13.79 +static inline void tpm_write_index(int index, int value)
   13.80 +{
   13.81 +	outb(index, TPM_ADDR);
   13.82 +	outb(value & 0xFF, TPM_DATA);
   13.83 +}
   13.84 +
   13.85 +extern void tpm_time_expired(unsigned long);
   13.86 +extern int tpm_register_hardware(struct pci_dev *,
   13.87 +				 struct tpm_vendor_specific *);
   13.88 +extern int tpm_open(struct inode *, struct file *);
   13.89 +extern int tpm_release(struct inode *, struct file *);
   13.90 +extern ssize_t tpm_write(struct file *, const char __user *, size_t,
   13.91 +			 loff_t *);
   13.92 +extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
   13.93 +extern void __devexit tpm_remove(struct pci_dev *);
   13.94 +extern int tpm_pm_suspend(struct pci_dev *, pm_message_t);
   13.95 +extern int tpm_pm_resume(struct pci_dev *);
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_atmel.c	Tue Aug 30 11:48:08 2005 -0800
    14.3 @@ -0,0 +1,220 @@
    14.4 +/*
    14.5 + * Copyright (C) 2004 IBM Corporation
    14.6 + *
    14.7 + * Authors:
    14.8 + * Leendert van Doorn <leendert@watson.ibm.com>
    14.9 + * Dave Safford <safford@watson.ibm.com>
   14.10 + * Reiner Sailer <sailer@watson.ibm.com>
   14.11 + * Kylene Hall <kjhall@us.ibm.com>
   14.12 + *
   14.13 + * Maintained by: <tpmdd_devel@lists.sourceforge.net>
   14.14 + *
   14.15 + * Device driver for TCG/TCPA TPM (trusted platform module).
   14.16 + * Specifications at www.trustedcomputinggroup.org
   14.17 + *
   14.18 + * This program is free software; you can redistribute it and/or
   14.19 + * modify it under the terms of the GNU General Public License as
   14.20 + * published by the Free Software Foundation, version 2 of the
   14.21 + * License.
   14.22 + *
   14.23 + */
   14.24 +
   14.25 +#include "tpm.h"
   14.26 +
   14.27 +/* Atmel definitions */
   14.28 +enum tpm_atmel_addr {
   14.29 +	TPM_ATMEL_BASE_ADDR_LO = 0x08,
   14.30 +	TPM_ATMEL_BASE_ADDR_HI = 0x09
   14.31 +};
   14.32 +
   14.33 +/* write status bits */
   14.34 +#define	ATML_STATUS_ABORT		0x01
   14.35 +#define	ATML_STATUS_LASTBYTE		0x04
   14.36 +
   14.37 +/* read status bits */
   14.38 +#define	ATML_STATUS_BUSY		0x01
   14.39 +#define	ATML_STATUS_DATA_AVAIL		0x02
   14.40 +#define	ATML_STATUS_REWRITE		0x04
   14.41 +
   14.42 +
   14.43 +static int tpm_atml_recv(struct tpm_chip *chip, u8 * buf, size_t count)
   14.44 +{
   14.45 +	u8 status, *hdr = buf;
   14.46 +	u32 size;
   14.47 +	int i;
   14.48 +	__be32 *native_size;
   14.49 +
   14.50 +	/* start reading header */
   14.51 +	if (count < 6)
   14.52 +		return -EIO;
   14.53 +
   14.54 +	for (i = 0; i < 6; i++) {
   14.55 +		status = inb(chip->vendor->base + 1);
   14.56 +		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   14.57 +			dev_err(&chip->pci_dev->dev,
   14.58 +				"error reading header\n");
   14.59 +			return -EIO;
   14.60 +		}
   14.61 +		*buf++ = inb(chip->vendor->base);
   14.62 +	}
   14.63 +
   14.64 +	/* size of the data received */
   14.65 +	native_size = (__force __be32 *) (hdr + 2);
   14.66 +	size = be32_to_cpu(*native_size);
   14.67 +
   14.68 +	if (count < size) {
   14.69 +		dev_err(&chip->pci_dev->dev,
   14.70 +			"Recv size(%d) less than available space\n", size);
   14.71 +		for (; i < size; i++) {	/* clear the waiting data anyway */
   14.72 +			status = inb(chip->vendor->base + 1);
   14.73 +			if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   14.74 +				dev_err(&chip->pci_dev->dev,
   14.75 +					"error reading data\n");
   14.76 +				return -EIO;
   14.77 +			}
   14.78 +		}
   14.79 +		return -EIO;
   14.80 +	}
   14.81 +
   14.82 +	/* read all the data available */
   14.83 +	for (; i < size; i++) {
   14.84 +		status = inb(chip->vendor->base + 1);
   14.85 +		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
   14.86 +			dev_err(&chip->pci_dev->dev,
   14.87 +				"error reading data\n");
   14.88 +			return -EIO;
   14.89 +		}
   14.90 +		*buf++ = inb(chip->vendor->base);
   14.91 +	}
   14.92 +
   14.93 +	/* make sure data available is gone */
   14.94 +	status = inb(chip->vendor->base + 1);
   14.95 +	if (status & ATML_STATUS_DATA_AVAIL) {
   14.96 +		dev_err(&chip->pci_dev->dev, "data available is stuck\n");
   14.97 +		return -EIO;
   14.98 +	}
   14.99 +
  14.100 +	return size;
  14.101 +}
  14.102 +
  14.103 +static int tpm_atml_send(struct tpm_chip *chip, u8 * buf, size_t count)
  14.104 +{
  14.105 +	int i;
  14.106 +
  14.107 +	dev_dbg(&chip->pci_dev->dev, "tpm_atml_send: ");
  14.108 +	for (i = 0; i < count; i++) {
  14.109 +		dev_dbg(&chip->pci_dev->dev, "0x%x(%d) ", buf[i], buf[i]);
  14.110 +		outb(buf[i], chip->vendor->base);
  14.111 +	}
  14.112 +
  14.113 +	return count;
  14.114 +}
  14.115 +
  14.116 +static void tpm_atml_cancel(struct tpm_chip *chip)
  14.117 +{
  14.118 +	outb(ATML_STATUS_ABORT, chip->vendor->base + 1);
  14.119 +}
  14.120 +
  14.121 +static struct file_operations atmel_ops = {
  14.122 +	.owner = THIS_MODULE,
  14.123 +	.llseek = no_llseek,
  14.124 +	.open = tpm_open,
  14.125 +	.read = tpm_read,
  14.126 +	.write = tpm_write,
  14.127 +	.release = tpm_release,
  14.128 +};
  14.129 +
  14.130 +static struct tpm_vendor_specific tpm_atmel = {
  14.131 +	.recv = tpm_atml_recv,
  14.132 +	.send = tpm_atml_send,
  14.133 +	.cancel = tpm_atml_cancel,
  14.134 +	.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
  14.135 +	.req_complete_val = ATML_STATUS_DATA_AVAIL,
  14.136 +	.miscdev = { .fops = &atmel_ops, },
  14.137 +};
  14.138 +
  14.139 +static int __devinit tpm_atml_init(struct pci_dev *pci_dev,
  14.140 +				   const struct pci_device_id *pci_id)
  14.141 +{
  14.142 +	u8 version[4];
  14.143 +	int rc = 0;
  14.144 +	int lo, hi;
  14.145 +
  14.146 +	if (pci_enable_device(pci_dev))
  14.147 +		return -EIO;
  14.148 +
  14.149 +	lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
  14.150 +	hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
  14.151 +
  14.152 +	tpm_atmel.base = (hi<<8)|lo;
  14.153 +	dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
  14.154 +
  14.155 +	/* verify that it is an Atmel part */
  14.156 +	if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
  14.157 +	    || tpm_read_index(6) != 'M' || tpm_read_index(7) != 'L') {
  14.158 +		rc = -ENODEV;
  14.159 +		goto out_err;
  14.160 +	}
  14.161 +
  14.162 +	/* query chip for its version number */
  14.163 +	if ((version[0] = tpm_read_index(0x00)) != 0xFF) {
  14.164 +		version[1] = tpm_read_index(0x01);
  14.165 +		version[2] = tpm_read_index(0x02);
  14.166 +		version[3] = tpm_read_index(0x03);
  14.167 +	} else {
  14.168 +		dev_info(&pci_dev->dev, "version query failed\n");
  14.169 +		rc = -ENODEV;
  14.170 +		goto out_err;
  14.171 +	}
  14.172 +
  14.173 +	if ((rc = tpm_register_hardware(pci_dev, &tpm_atmel)) < 0)
  14.174 +		goto out_err;
  14.175 +
  14.176 +	dev_info(&pci_dev->dev,
  14.177 +		 "Atmel TPM version %d.%d.%d.%d\n", version[0], version[1],
  14.178 +		 version[2], version[3]);
  14.179 +
  14.180 +	return 0;
  14.181 +out_err:
  14.182 +	pci_disable_device(pci_dev);
  14.183 +	return rc;
  14.184 +}
  14.185 +
  14.186 +static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
  14.187 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)},
  14.188 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)},
  14.189 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
  14.190 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
  14.191 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
  14.192 +	{PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)},
  14.193 +	{0,}
  14.194 +};
  14.195 +
  14.196 +MODULE_DEVICE_TABLE(pci, tpm_pci_tbl);
  14.197 +
  14.198 +static struct pci_driver atmel_pci_driver = {
  14.199 +	.name = "tpm_atmel",
  14.200 +	.id_table = tpm_pci_tbl,
  14.201 +	.probe = tpm_atml_init,
  14.202 +	.remove = __devexit_p(tpm_remove),
  14.203 +	.suspend = tpm_pm_suspend,
  14.204 +	.resume = tpm_pm_resume,
  14.205 +};
  14.206 +
  14.207 +static int __init init_atmel(void)
  14.208 +{
  14.209 +	return pci_register_driver(&atmel_pci_driver);
  14.210 +}
  14.211 +
  14.212 +static void __exit cleanup_atmel(void)
  14.213 +{
  14.214 +	pci_unregister_driver(&atmel_pci_driver);
  14.215 +}
  14.216 +
  14.217 +module_init(init_atmel);
  14.218 +module_exit(cleanup_atmel);
  14.219 +
  14.220 +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
  14.221 +MODULE_DESCRIPTION("TPM Driver");
  14.222 +MODULE_VERSION("2.0");
  14.223 +MODULE_LICENSE("GPL");
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.c	Tue Aug 30 11:48:08 2005 -0800
    15.3 @@ -0,0 +1,741 @@
    15.4 +/*
    15.5 + * Copyright (C) 2004 IBM Corporation
    15.6 + *
    15.7 + * Authors:
    15.8 + * Leendert van Doorn <leendert@watson.ibm.com>
    15.9 + * Dave Safford <safford@watson.ibm.com>
   15.10 + * Reiner Sailer <sailer@watson.ibm.com>
   15.11 + * Kylene Hall <kjhall@us.ibm.com>
   15.12 + *
   15.13 + * Maintained by: <tpmdd_devel@lists.sourceforge.net>
   15.14 + *
   15.15 + * Device driver for TCG/TCPA TPM (trusted platform module).
   15.16 + * Specifications at www.trustedcomputinggroup.org
   15.17 + *
   15.18 + * This program is free software; you can redistribute it and/or
   15.19 + * modify it under the terms of the GNU General Public License as
   15.20 + * published by the Free Software Foundation, version 2 of the
   15.21 + * License.
   15.22 + *
   15.23 + * Note, the TPM chip is not interrupt driven (only polling)
   15.24 + * and can have very long timeouts (minutes!). Hence the unusual
   15.25 + * calls to schedule_timeout.
   15.26 + *
   15.27 + */
   15.28 +
   15.29 +#include <linux/sched.h>
   15.30 +#include <linux/poll.h>
   15.31 +#include <linux/spinlock.h>
   15.32 +#include "tpm_nopci.h"
   15.33 +
   15.34 +enum {
   15.35 +	TPM_MINOR = 224,	/* officially assigned */
   15.36 +	TPM_BUFSIZE = 2048,
   15.37 +	TPM_NUM_DEVICES = 256,
   15.38 +	TPM_NUM_MASK_ENTRIES = TPM_NUM_DEVICES / (8 * sizeof(int))
   15.39 +};
   15.40 +
   15.41 +  /* PCI configuration addresses */
   15.42 +enum {
   15.43 +	PCI_GEN_PMCON_1 = 0xA0,
   15.44 +	PCI_GEN1_DEC = 0xE4,
   15.45 +	PCI_LPC_EN = 0xE6,
   15.46 +	PCI_GEN2_DEC = 0xEC
   15.47 +};
   15.48 +
   15.49 +enum {
   15.50 +	TPM_LOCK_REG = 0x0D,
   15.51 +	TPM_INTERUPT_REG = 0x0A,
   15.52 +	TPM_BASE_ADDR_LO = 0x08,
   15.53 +	TPM_BASE_ADDR_HI = 0x09,
   15.54 +	TPM_UNLOCK_VALUE = 0x55,
   15.55 +	TPM_LOCK_VALUE = 0xAA,
   15.56 +	TPM_DISABLE_INTERUPT_VALUE = 0x00
   15.57 +};
   15.58 +
   15.59 +static LIST_HEAD(tpm_chip_list);
   15.60 +static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED;
   15.61 +static int dev_mask[32];
   15.62 +
   15.63 +static void user_reader_timeout(unsigned long ptr)
   15.64 +{
   15.65 +	struct tpm_chip *chip = (struct tpm_chip *) ptr;
   15.66 +
   15.67 +	down(&chip->buffer_mutex);
   15.68 +	atomic_set(&chip->data_pending, 0);
   15.69 +	memset(chip->data_buffer, 0, TPM_BUFSIZE);
   15.70 +	up(&chip->buffer_mutex);
   15.71 +}
   15.72 +
   15.73 +void tpm_time_expired(unsigned long ptr)
   15.74 +{
   15.75 +	int *exp = (int *) ptr;
   15.76 +	*exp = 1;
   15.77 +}
   15.78 +
   15.79 +EXPORT_SYMBOL_GPL(tpm_time_expired);
   15.80 +
   15.81 +
   15.82 +/*
   15.83 + * This function should be used by other kernel subsystems attempting to use the tpm through the tpm_transmit interface.
   15.84 + * A call to this function will return the chip structure corresponding to the TPM you are looking for that can then be sent with your command to tpm_transmit.
   15.85 + * Passing 0 as the argument corresponds to /dev/tpm0 and thus the first and probably primary TPM on the system.  Passing 1 corresponds to /dev/tpm1 and the next TPM discovered.  If a TPM with the given chip_num does not exist NULL will be returned.
   15.86 + */
   15.87 +struct tpm_chip* tpm_chip_lookup(int chip_num)
   15.88 +{
   15.89 +
   15.90 +	struct tpm_chip *pos;
   15.91 +	list_for_each_entry(pos, &tpm_chip_list, list)
   15.92 +		if (pos->dev_num == chip_num ||
   15.93 +		    chip_num == TPM_ANY_NUM)
   15.94 +			return pos;
   15.95 +
   15.96 +	return NULL;
   15.97 +
   15.98 +}
   15.99 +
  15.100 +/*
  15.101 + * Internal kernel interface to transmit TPM commands
  15.102 + */
  15.103 +ssize_t tpm_transmit(struct tpm_chip * chip, const char *buf,
  15.104 +		     size_t bufsiz)
  15.105 +{
  15.106 +	ssize_t rc;
  15.107 +	u32 count;
  15.108 +	unsigned long stop;
  15.109 +
  15.110 +	count = be32_to_cpu(*((__be32 *) (buf + 2)));
  15.111 +
  15.112 +	if (count == 0)
  15.113 +		return -ENODATA;
  15.114 +	if (count > bufsiz) {
  15.115 +		dev_err(chip->dev,
  15.116 +			"invalid count value %x %x \n", count, bufsiz);
  15.117 +		return -E2BIG;
  15.118 +	}
  15.119 +
  15.120 +	dev_dbg(chip->dev, "TPM Ordinal: %d\n",
  15.121 +		be32_to_cpu(*((__be32 *) (buf + 6))));
  15.122 +	dev_dbg(chip->dev, "Chip Status: %x\n",
  15.123 +		inb(chip->vendor->base + 1));
  15.124 +
  15.125 +	down(&chip->tpm_mutex);
  15.126 +
  15.127 +	if ((rc = chip->vendor->send(chip, (u8 *) buf, count)) < 0) {
  15.128 +		dev_err(chip->dev,
  15.129 +			"tpm_transmit: tpm_send: error %d\n", rc);
  15.130 +		goto out;
  15.131 +	}
  15.132 +
  15.133 +	stop = jiffies + 2 * 60 * HZ;
  15.134 +	do {
  15.135 +		u8 status = chip->vendor->status(chip);
  15.136 +		if ((status & chip->vendor->req_complete_mask) ==
  15.137 +		    chip->vendor->req_complete_val) {
  15.138 +			goto out_recv;
  15.139 +		}
  15.140 +
  15.141 +		if ((status == chip->vendor->req_canceled)) {
  15.142 +			dev_err(chip->dev, "Operation Canceled\n");
  15.143 +			rc = -ECANCELED;
  15.144 +			goto out;
  15.145 +		}
  15.146 +
  15.147 +		msleep(TPM_TIMEOUT);	/* CHECK */
  15.148 +		rmb();
  15.149 +	}
  15.150 +	while (time_before(jiffies, stop));
  15.151 +
  15.152 +
  15.153 +	chip->vendor->cancel(chip);
  15.154 +	dev_err(chip->dev, "Operation Timed out\n");
  15.155 +	rc = -ETIME;
  15.156 +	goto out;
  15.157 +
  15.158 +out_recv:
  15.159 +	rc = chip->vendor->recv(chip, (u8 *) buf, bufsiz);
  15.160 +	if (rc < 0)
  15.161 +		dev_err(chip->dev,
  15.162 +			"tpm_transmit: tpm_recv: error %d\n", rc);
  15.163 +	atomic_set(&chip->data_position, 0);
  15.164 +
  15.165 +out:
  15.166 +	up(&chip->tpm_mutex);
  15.167 +	return rc;
  15.168 +}
  15.169 +
  15.170 +EXPORT_SYMBOL_GPL(tpm_transmit);
  15.171 +
  15.172 +#define TPM_DIGEST_SIZE 20
  15.173 +#define CAP_PCR_RESULT_SIZE 18
  15.174 +static const u8 cap_pcr[] = {
  15.175 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  15.176 +	0, 0, 0, 22,		/* length */
  15.177 +	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
  15.178 +	0, 0, 0, 5,
  15.179 +	0, 0, 0, 4,
  15.180 +	0, 0, 1, 1
  15.181 +};
  15.182 +
  15.183 +#define READ_PCR_RESULT_SIZE 30
  15.184 +static const u8 pcrread[] = {
  15.185 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  15.186 +	0, 0, 0, 14,		/* length */
  15.187 +	0, 0, 0, 21,		/* TPM_ORD_PcrRead */
  15.188 +	0, 0, 0, 0		/* PCR index */
  15.189 +};
  15.190 +
  15.191 +ssize_t tpm_show_pcrs(struct device *dev, char *buf)
  15.192 +{
  15.193 +	u8 data[READ_PCR_RESULT_SIZE];
  15.194 +	ssize_t len;
  15.195 +	int i, j, num_pcrs;
  15.196 +	__be32 index;
  15.197 +	char *str = buf;
  15.198 +
  15.199 +	struct tpm_chip *chip = dev_get_drvdata(dev);
  15.200 +	if (chip == NULL)
  15.201 +		return -ENODEV;
  15.202 +
  15.203 +	memcpy(data, cap_pcr, sizeof(cap_pcr));
  15.204 +	if ((len = tpm_transmit(chip, data, sizeof(data)))
  15.205 +	    < CAP_PCR_RESULT_SIZE)
  15.206 +		return len;
  15.207 +
  15.208 +	num_pcrs = be32_to_cpu(*((__be32 *) (data + 14)));
  15.209 +
  15.210 +	for (i = 0; i < num_pcrs; i++) {
  15.211 +		memcpy(data, pcrread, sizeof(pcrread));
  15.212 +		index = cpu_to_be32(i);
  15.213 +		memcpy(data + 10, &index, 4);
  15.214 +		if ((len = tpm_transmit(chip, data, sizeof(data)))
  15.215 +		    < READ_PCR_RESULT_SIZE)
  15.216 +			return len;
  15.217 +		str += sprintf(str, "PCR-%02d: ", i);
  15.218 +		for (j = 0; j < TPM_DIGEST_SIZE; j++)
  15.219 +			str += sprintf(str, "%02X ", *(data + 10 + j));
  15.220 +		str += sprintf(str, "\n");
  15.221 +	}
  15.222 +	return str - buf;
  15.223 +}
  15.224 +
  15.225 +EXPORT_SYMBOL_GPL(tpm_show_pcrs);
  15.226 +
  15.227 +/*
  15.228 + * Return 0 on success.  On error pass along error code.
  15.229 + * chip_id Upper 2 bytes equal ANY, HW_ONLY or SW_ONLY
  15.230 + * Lower 2 bytes equal tpm idx # or AN&
  15.231 + * res_buf must fit a TPM_PCR (20 bytes) or NULL if you don't care
  15.232 + */
  15.233 +int tpm_pcr_read( u32 chip_id, int pcr_idx, u8* res_buf, int res_buf_size )
  15.234 +{
  15.235 +	u8 data[READ_PCR_RESULT_SIZE];
  15.236 +	int rc;
  15.237 +	__be32 index;
  15.238 +	int chip_num = chip_id & TPM_CHIP_NUM_MASK;
  15.239 +	struct tpm_chip* chip;
  15.240 +
  15.241 +	if ( res_buf && res_buf_size < TPM_DIGEST_SIZE )
  15.242 +		return -ENOSPC;
  15.243 +	if ( (chip = tpm_chip_lookup( chip_num /*,
  15.244 +				       chip_id >> TPM_CHIP_TYPE_SHIFT*/ ) ) == NULL ) {
  15.245 +		printk("chip %d not found.\n",chip_num);
  15.246 +		return -ENODEV;
  15.247 +	}
  15.248 +	memcpy(data, pcrread, sizeof(pcrread));
  15.249 +	index = cpu_to_be32(pcr_idx);
  15.250 +	memcpy(data + 10, &index, 4);
  15.251 +	if ((rc = tpm_transmit(chip, data, sizeof(data))) > 0 )
  15.252 +		rc = be32_to_cpu(*((u32*)(data+6)));
  15.253 +
  15.254 +	if ( rc == 0 && res_buf )
  15.255 +		memcpy(res_buf, data+10, TPM_DIGEST_SIZE);
  15.256 +	return rc;
  15.257 +}
  15.258 +EXPORT_SYMBOL_GPL(tpm_pcr_read);
  15.259 +
  15.260 +#define EXTEND_PCR_SIZE 34
  15.261 +static const u8 pcrextend[] = {
  15.262 +	0, 193,		 		 		 /* TPM_TAG_RQU_COMMAND */
  15.263 +	0, 0, 0, 34,		 		 /* length */
  15.264 +	0, 0, 0, 20,		 		 /* TPM_ORD_Extend */
  15.265 +	0, 0, 0, 0		 		 /* PCR index */
  15.266 +};
  15.267 +
  15.268 +/*
  15.269 + * Return 0 on success.  On error pass along error code.
  15.270 + * chip_id Upper 2 bytes equal ANY, HW_ONLY or SW_ONLY
  15.271 + * Lower 2 bytes equal tpm idx # or ANY
  15.272 + */
  15.273 +int tpm_pcr_extend(u32 chip_id, int pcr_idx, const u8* hash)
  15.274 +{
  15.275 +	u8 data[EXTEND_PCR_SIZE];
  15.276 +	int rc;
  15.277 +	__be32 index;
  15.278 +	int chip_num = chip_id & TPM_CHIP_NUM_MASK;
  15.279 +	struct tpm_chip* chip;
  15.280 +
  15.281 +	if ( (chip = tpm_chip_lookup( chip_num /*,
  15.282 +				      chip_id >> TPM_CHIP_TYPE_SHIFT */)) == NULL )
  15.283 +		return -ENODEV;
  15.284 +
  15.285 +	memcpy(data, pcrextend, sizeof(pcrextend));
  15.286 +	index = cpu_to_be32(pcr_idx);
  15.287 +	memcpy(data + 10, &index, 4);
  15.288 +	memcpy( data + 14, hash, TPM_DIGEST_SIZE );
  15.289 +	if ((rc = tpm_transmit(chip, data, sizeof(data))) > 0 )
  15.290 +		rc = be32_to_cpu(*((u32*)(data+6)));
  15.291 +	return rc;
  15.292 +}
  15.293 +EXPORT_SYMBOL_GPL(tpm_pcr_extend);
  15.294 +
  15.295 +
  15.296 +
  15.297 +#define  READ_PUBEK_RESULT_SIZE 314
  15.298 +static const u8 readpubek[] = {
  15.299 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  15.300 +	0, 0, 0, 30,		/* length */
  15.301 +	0, 0, 0, 124,		/* TPM_ORD_ReadPubek */
  15.302 +};
  15.303 +
  15.304 +ssize_t tpm_show_pubek(struct device *dev, char *buf)
  15.305 +{
  15.306 +	u8 *data;
  15.307 +	ssize_t len;
  15.308 +	int i, rc;
  15.309 +	char *str = buf;
  15.310 +
  15.311 +	struct tpm_chip *chip = dev_get_drvdata(dev);
  15.312 +	if (chip == NULL)
  15.313 +		return -ENODEV;
  15.314 +
  15.315 +	data = kmalloc(READ_PUBEK_RESULT_SIZE, GFP_KERNEL);
  15.316 +	if (!data)
  15.317 +		return -ENOMEM;
  15.318 +
  15.319 +	memcpy(data, readpubek, sizeof(readpubek));
  15.320 +	memset(data + sizeof(readpubek), 0, 20);	/* zero nonce */
  15.321 +
  15.322 +	if ((len = tpm_transmit(chip, data, READ_PUBEK_RESULT_SIZE)) <
  15.323 +	    READ_PUBEK_RESULT_SIZE) {
  15.324 +		rc = len;
  15.325 +		goto out;
  15.326 +	}
  15.327 +
  15.328 +	/*
  15.329 +	   ignore header 10 bytes
  15.330 +	   algorithm 32 bits (1 == RSA )
  15.331 +	   encscheme 16 bits
  15.332 +	   sigscheme 16 bits
  15.333 +	   parameters (RSA 12->bytes: keybit, #primes, expbit)
  15.334 +	   keylenbytes 32 bits
  15.335 +	   256 byte modulus
  15.336 +	   ignore checksum 20 bytes
  15.337 +	 */
  15.338 +
  15.339 +	str +=
  15.340 +	    sprintf(str,
  15.341 +		    "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
  15.342 +		    "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X"
  15.343 +		    " %02X %02X %02X %02X %02X %02X %02X %02X\n"
  15.344 +		    "Modulus length: %d\nModulus: \n",
  15.345 +		    data[10], data[11], data[12], data[13], data[14],
  15.346 +		    data[15], data[16], data[17], data[22], data[23],
  15.347 +		    data[24], data[25], data[26], data[27], data[28],
  15.348 +		    data[29], data[30], data[31], data[32], data[33],
  15.349 +		    be32_to_cpu(*((__be32 *) (data + 32))));
  15.350 +
  15.351 +	for (i = 0; i < 256; i++) {
  15.352 +		str += sprintf(str, "%02X ", data[i + 39]);
  15.353 +		if ((i + 1) % 16 == 0)
  15.354 +			str += sprintf(str, "\n");
  15.355 +	}
  15.356 +	rc = str - buf;
  15.357 +out:
  15.358 +	kfree(data);
  15.359 +	return rc;
  15.360 +}
  15.361 +
  15.362 +EXPORT_SYMBOL_GPL(tpm_show_pubek);
  15.363 +
  15.364 +#define CAP_VER_RESULT_SIZE 18
  15.365 +static const u8 cap_version[] = {
  15.366 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  15.367 +	0, 0, 0, 18,		/* length */
  15.368 +	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
  15.369 +	0, 0, 0, 6,
  15.370 +	0, 0, 0, 0
  15.371 +};
  15.372 +
  15.373 +#define CAP_MANUFACTURER_RESULT_SIZE 18
  15.374 +static const u8 cap_manufacturer[] = {
  15.375 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  15.376 +	0, 0, 0, 22,		/* length */
  15.377 +	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
  15.378 +	0, 0, 0, 5,
  15.379 +	0, 0, 0, 4,
  15.380 +	0, 0, 1, 3
  15.381 +};
  15.382 +
  15.383 +ssize_t tpm_show_caps(struct device *dev, char *buf)
  15.384 +{
  15.385 +	u8 data[sizeof(cap_manufacturer)];
  15.386 +	ssize_t len;
  15.387 +	char *str = buf;
  15.388 +
  15.389 +	struct tpm_chip *chip = dev_get_drvdata(dev);
  15.390 +	if (chip == NULL)
  15.391 +		return -ENODEV;
  15.392 +
  15.393 +	memcpy(data, cap_manufacturer, sizeof(cap_manufacturer));
  15.394 +
  15.395 +	if ((len = tpm_transmit(chip, data, sizeof(data))) <
  15.396 +	    CAP_MANUFACTURER_RESULT_SIZE)
  15.397 +		return len;
  15.398 +
  15.399 +	str += sprintf(str, "Manufacturer: 0x%x\n",
  15.400 +		       be32_to_cpu(*((__be32 *)(data + 14))));
  15.401 +
  15.402 +	memcpy(data, cap_version, sizeof(cap_version));
  15.403 +
  15.404 +	if ((len = tpm_transmit(chip, data, sizeof(data))) <
  15.405 +	    CAP_VER_RESULT_SIZE)
  15.406 +		return len;
  15.407 +
  15.408 +	str +=
  15.409 +	    sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n",
  15.410 +		    (int) data[14], (int) data[15], (int) data[16],
  15.411 +		    (int) data[17]);
  15.412 +
  15.413 +	return str - buf;
  15.414 +}
  15.415 +
  15.416 +EXPORT_SYMBOL_GPL(tpm_show_caps);
  15.417 +
  15.418 +ssize_t tpm_store_cancel(struct device * dev, const char *buf,
  15.419 +			 size_t count)
  15.420 +{
  15.421 +	struct tpm_chip *chip = dev_get_drvdata(dev);
  15.422 +	if (chip == NULL)
  15.423 +		return 0;
  15.424 +
  15.425 +	chip->vendor->cancel(chip);
  15.426 +	return count;
  15.427 +}
  15.428 +
  15.429 +EXPORT_SYMBOL_GPL(tpm_store_cancel);
  15.430 +
  15.431 +/*
  15.432 + * Device file system interface to the TPM
  15.433 + */
  15.434 +int tpm_open(struct inode *inode, struct file *file)
  15.435 +{
  15.436 +	int rc = 0, minor = iminor(inode);
  15.437 +	struct tpm_chip *chip = NULL, *pos;
  15.438 +
  15.439 +	spin_lock(&driver_lock);
  15.440 +
  15.441 +	list_for_each_entry(pos, &tpm_chip_list, list) {
  15.442 +		if (pos->vendor->miscdev.minor == minor) {
  15.443 +			chip = pos;
  15.444 +			break;
  15.445 +		}
  15.446 +	}
  15.447 +
  15.448 +	if (chip == NULL) {
  15.449 +		rc = -ENODEV;
  15.450 +		goto err_out;
  15.451 +	}
  15.452 +
  15.453 +	if (chip->num_opens) {
  15.454 +		dev_dbg(chip->dev, "Another process owns this TPM\n");
  15.455 +		rc = -EBUSY;
  15.456 +		goto err_out;
  15.457 +	}
  15.458 +
  15.459 +	chip->num_opens++;
  15.460 +	get_device(chip->dev);
  15.461 +
  15.462 +	spin_unlock(&driver_lock);
  15.463 +
  15.464 +	chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
  15.465 +	if (chip->data_buffer == NULL) {
  15.466 +		chip->num_opens--;
  15.467 +		put_device(chip->dev);
  15.468 +		return -ENOMEM;
  15.469 +	}
  15.470 +
  15.471 +	atomic_set(&chip->data_pending, 0);
  15.472 +
  15.473 +	file->private_data = chip;
  15.474 +	return 0;
  15.475 +
  15.476 +err_out:
  15.477 +	spin_unlock(&driver_lock);
  15.478 +	return rc;
  15.479 +}
  15.480 +
  15.481 +EXPORT_SYMBOL_GPL(tpm_open);
  15.482 +
  15.483 +int tpm_release(struct inode *inode, struct file *file)
  15.484 +{
  15.485 +	struct tpm_chip *chip = file->private_data;
  15.486 +
  15.487 +	spin_lock(&driver_lock);
  15.488 +	file->private_data = NULL;
  15.489 +	chip->num_opens--;
  15.490 +	del_singleshot_timer_sync(&chip->user_read_timer);
  15.491 +	atomic_set(&chip->data_pending, 0);
  15.492 +	put_device(chip->dev);
  15.493 +	kfree(chip->data_buffer);
  15.494 +	spin_unlock(&driver_lock);
  15.495 +	return 0;
  15.496 +}
  15.497 +
  15.498 +EXPORT_SYMBOL_GPL(tpm_release);
  15.499 +
  15.500 +ssize_t tpm_write(struct file * file, const char __user * buf,
  15.501 +		  size_t size, loff_t * off)
  15.502 +{
  15.503 +	struct tpm_chip *chip = file->private_data;
  15.504 +	int in_size = size, out_size;
  15.505 +
  15.506 +	/* cannot perform a write until the read has cleared
  15.507 +	   either via tpm_read or a user_read_timer timeout */
  15.508 +	while (atomic_read(&chip->data_pending) != 0)
  15.509 +		msleep(TPM_TIMEOUT);
  15.510 +
  15.511 +	down(&chip->buffer_mutex);
  15.512 +
  15.513 +	if (in_size > TPM_BUFSIZE)
  15.514 +		in_size = TPM_BUFSIZE;
  15.515 +
  15.516 +	if (copy_from_user
  15.517 +	    (chip->data_buffer, (void __user *) buf, in_size)) {
  15.518 +		up(&chip->buffer_mutex);
  15.519 +		return -EFAULT;
  15.520 +	}
  15.521 +
  15.522 +	/* atomic tpm command send and result receive */
  15.523 +	out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
  15.524 +
  15.525 +	atomic_set(&chip->data_pending, out_size);
  15.526 +	up(&chip->buffer_mutex);
  15.527 +
  15.528 +	/* Set a timeout by which the reader must come claim the result */
  15.529 +	mod_timer(&chip->user_read_timer, jiffies + (60 * HZ));
  15.530 +
  15.531 +	return in_size;
  15.532 +}
  15.533 +
  15.534 +EXPORT_SYMBOL_GPL(tpm_write);
  15.535 +
  15.536 +ssize_t tpm_read(struct file * file, char __user * buf,
  15.537 +		 size_t size, loff_t * off)
  15.538 +{
  15.539 +	struct tpm_chip *chip = file->private_data;
  15.540 +	int ret_size;
  15.541 +
  15.542 +	del_singleshot_timer_sync(&chip->user_read_timer);
  15.543 +	ret_size = atomic_read(&chip->data_pending);
  15.544 +
  15.545 +	if (ret_size > 0) {	/* relay data */
  15.546 +		int position = atomic_read(&chip->data_position);
  15.547 +
  15.548 +		if (size < ret_size)
  15.549 +			ret_size = size;
  15.550 +
  15.551 +		down(&chip->buffer_mutex);
  15.552 +
  15.553 +		if (copy_to_user((void __user *) buf,
  15.554 +				 &chip->data_buffer[position],
  15.555 +				 ret_size)) {
  15.556 +			ret_size = -EFAULT;
  15.557 +		} else {
  15.558 +		 	int pending = atomic_read(&chip->data_pending) - ret_size;
  15.559 +			atomic_set(&chip->data_pending,
  15.560 +			           pending);
  15.561 +			atomic_set(&chip->data_position,
  15.562 +			           position + ret_size);
  15.563 +		}
  15.564 +		up(&chip->buffer_mutex);
  15.565 +	}
  15.566 +
  15.567 +	return ret_size;
  15.568 +}
  15.569 +
  15.570 +EXPORT_SYMBOL_GPL(tpm_read);
  15.571 +
  15.572 +void tpm_remove_hardware(struct device *dev)
  15.573 +{
  15.574 +	struct tpm_chip *chip = dev_get_drvdata(dev);
  15.575 +	int i;
  15.576 +
  15.577 +	if (chip == NULL) {
  15.578 +		dev_err(dev, "No device data found\n");
  15.579 +		return;
  15.580 +	}
  15.581 +
  15.582 +	spin_lock(&driver_lock);
  15.583 +
  15.584 +	list_del(&chip->list);
  15.585 +
  15.586 +	spin_unlock(&driver_lock);
  15.587 +
  15.588 +	dev_set_drvdata(dev, NULL);
  15.589 +	misc_deregister(&chip->vendor->miscdev);
  15.590 +
  15.591 +	for (i = 0; i < TPM_NUM_ATTR; i++)
  15.592 +		device_remove_file(dev, &chip->vendor->attr[i]);
  15.593 +
  15.594 +	dev_mask[chip->dev_num / TPM_NUM_MASK_ENTRIES] &=
  15.595 +	    !(1 << (chip->dev_num % TPM_NUM_MASK_ENTRIES));
  15.596 +
  15.597 +	kfree(chip);
  15.598 +
  15.599 +	put_device(dev);
  15.600 +}
  15.601 +
  15.602 +EXPORT_SYMBOL_GPL(tpm_remove_hardware);
  15.603 +
  15.604 +static const u8 savestate[] = {
  15.605 +	0, 193,			/* TPM_TAG_RQU_COMMAND */
  15.606 +	0, 0, 0, 10,		/* blob length (in bytes) */
  15.607 +	0, 0, 0, 152		/* TPM_ORD_SaveState */
  15.608 +};
  15.609 +
  15.610 +/*
  15.611 + * We are about to suspend. Save the TPM state
  15.612 + * so that it can be restored.
  15.613 + */
  15.614 +int tpm_pm_suspend(struct pci_dev *pci_dev, u32 pm_state)
  15.615 +{
  15.616 +	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
  15.617 +	if (chip == NULL)
  15.618 +		return -ENODEV;
  15.619 +
  15.620 +	tpm_transmit(chip, savestate, sizeof(savestate));
  15.621 +	return 0;
  15.622 +}
  15.623 +
  15.624 +EXPORT_SYMBOL_GPL(tpm_pm_suspend);
  15.625 +
  15.626 +/*
  15.627 + * Resume from a power safe. The BIOS already restored
  15.628 + * the TPM state.
  15.629 + */
  15.630 +int tpm_pm_resume(struct pci_dev *pci_dev)
  15.631 +{
  15.632 +	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
  15.633 +
  15.634 +	if (chip == NULL)
  15.635 +		return -ENODEV;
  15.636 +
  15.637 +	return 0;
  15.638 +}
  15.639 +
  15.640 +EXPORT_SYMBOL_GPL(tpm_pm_resume);
  15.641 +
  15.642 +/*
  15.643 + * Called from tpm_<specific>.c probe function only for devices
  15.644 + * the driver has determined it should claim.  Prior to calling
  15.645 + * this function the specific probe function has called pci_enable_device
  15.646 + * upon errant exit from this function specific probe function should call
  15.647 + * pci_disable_device
  15.648 + */
  15.649 +int tpm_register_hardware_nopci(struct device *dev,
  15.650 +			        struct tpm_vendor_specific *entry)
  15.651 +{
  15.652 +	char devname[7];
  15.653 +	struct tpm_chip *chip;
  15.654 +	int i, j;
  15.655 +
  15.656 +	/* Driver specific per-device data */
  15.657 +	chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  15.658 +	if (chip == NULL)
  15.659 +		return -ENOMEM;
  15.660 +
  15.661 +	memset(chip, 0, sizeof(struct tpm_chip));
  15.662 +
  15.663 +	init_MUTEX(&chip->buffer_mutex);
  15.664 +	init_MUTEX(&chip->tpm_mutex);
  15.665 +	INIT_LIST_HEAD(&chip->list);
  15.666 +
  15.667 +	init_timer(&chip->user_read_timer);
  15.668 +	chip->user_read_timer.function = user_reader_timeout;
  15.669 +	chip->user_read_timer.data = (unsigned long) chip;
  15.670 +
  15.671 +	chip->vendor = entry;
  15.672 +
  15.673 +	chip->dev_num = -1;
  15.674 +
  15.675 +	for (i = 0; i < TPM_NUM_MASK_ENTRIES; i++)
  15.676 +		for (j = 0; j < 8 * sizeof(int); j++)
  15.677 +			if ((dev_mask[i] & (1 << j)) == 0) {
  15.678 +				chip->dev_num =
  15.679 +				    i * TPM_NUM_MASK_ENTRIES + j;
  15.680 +				dev_mask[i] |= 1 << j;
  15.681 +				goto dev_num_search_complete;
  15.682 +			}
  15.683 +
  15.684 +dev_num_search_complete:
  15.685 +	if (chip->dev_num < 0) {
  15.686 +		dev_err(dev, "No available tpm device numbers\n");
  15.687 +		kfree(chip);
  15.688 +		return -ENODEV;
  15.689 +	} else if (chip->dev_num == 0)
  15.690 +		chip->vendor->miscdev.minor = TPM_MINOR;
  15.691 +	else
  15.692 +		chip->vendor->miscdev.minor = MISC_DYNAMIC_MINOR;
  15.693 +
  15.694 +	snprintf(devname, sizeof(devname), "%s%d", "tpm", chip->dev_num);
  15.695 +	chip->vendor->miscdev.name = devname;
  15.696 +
  15.697 +	chip->vendor->miscdev.dev = dev;
  15.698 +	chip->dev = get_device(dev);
  15.699 +
  15.700 +
  15.701 +	if (misc_register(&chip->vendor->miscdev)) {
  15.702 +		dev_err(chip->dev,
  15.703 +			"unable to misc_register %s, minor %d\n",
  15.704 +			chip->vendor->miscdev.name,
  15.705 +			chip->vendor->miscdev.minor);
  15.706 +		put_device(dev);
  15.707 +		kfree(chip);
  15.708 +		dev_mask[i] &= !(1 << j);
  15.709 +		return -ENODEV;
  15.710 +	}
  15.711 +
  15.712 +	spin_lock(&driver_lock);
  15.713 +
  15.714 +	dev_set_drvdata(dev, chip);
  15.715 +
  15.716 +	list_add(&chip->list, &tpm_chip_list);
  15.717 +
  15.718 +	spin_unlock(&driver_lock);
  15.719 +
  15.720 +	for (i = 0; i < TPM_NUM_ATTR; i++)
  15.721 +		device_create_file(dev, &chip->vendor->attr[i]);
  15.722 +
  15.723 +	return 0;
  15.724 +}
  15.725 +
  15.726 +EXPORT_SYMBOL_GPL(tpm_register_hardware_nopci);
  15.727 +
  15.728 +static int __init init_tpm(void)
  15.729 +{
  15.730 +	return 0;
  15.731 +}
  15.732 +
  15.733 +static void __exit cleanup_tpm(void)
  15.734 +{
  15.735 +
  15.736 +}
  15.737 +
  15.738 +module_init(init_tpm);
  15.739 +module_exit(cleanup_tpm);
  15.740 +
  15.741 +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
  15.742 +MODULE_DESCRIPTION("TPM Driver");
  15.743 +MODULE_VERSION("2.0");
  15.744 +MODULE_LICENSE("GPL");
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nopci.h	Tue Aug 30 11:48:08 2005 -0800
    16.3 @@ -0,0 +1,127 @@
    16.4 +/*
    16.5 + * Copyright (C) 2004 IBM Corporation
    16.6 + *
    16.7 + * Authors:
    16.8 + * Leendert van Doorn <leendert@watson.ibm.com>
    16.9 + * Dave Safford <safford@watson.ibm.com>
   16.10 + * Reiner Sailer <sailer@watson.ibm.com>
   16.11 + * Kylene Hall <kjhall@us.ibm.com>
   16.12 + *
   16.13 + * Maintained by: <tpmdd_devel@lists.sourceforge.net>
   16.14 + *
   16.15 + * Device driver for TCG/TCPA TPM (trusted platform module).
   16.16 + * Specifications at www.trustedcomputinggroup.org
   16.17 + *
   16.18 + * This program is free software; you can redistribute it and/or
   16.19 + * modify it under the terms of the GNU General Public License as
   16.20 + * published by the Free Software Foundation, version 2 of the
   16.21 + * License.
   16.22 + *
   16.23 + */
   16.24 +#include <linux/module.h>
   16.25 +#include <linux/version.h>
   16.26 +#include <linux/pci.h>
   16.27 +#include <linux/delay.h>
   16.28 +#include <linux/miscdevice.h>
   16.29 +
   16.30 +enum {
   16.31 +	TPM_TIMEOUT = 5,	/* msecs */
   16.32 +	TPM_NUM_ATTR = 4
   16.33 +};
   16.34 +
   16.35 +/* TPM addresses */
   16.36 +enum {
   16.37 +	TPM_ADDR = 0x4E,
   16.38 +	TPM_DATA = 0x4F
   16.39 +};
   16.40 +
   16.41 +/*
   16.42 + * Chip num is this value or a valid tpm idx in lower two bytes of chip_id
   16.43 + */
   16.44 +enum tpm_chip_num {
   16.45 +	TPM_ANY_NUM = 0xFFFF,
   16.46 +};
   16.47 +
   16.48 +#define TPM_CHIP_NUM_MASK	0x0000ffff
   16.49 +
   16.50 +extern ssize_t tpm_show_pubek(struct device *, char *);
   16.51 +extern ssize_t tpm_show_pcrs(struct device *, char *);
   16.52 +extern ssize_t tpm_show_caps(struct device *, char *);
   16.53 +extern ssize_t tpm_store_cancel(struct device *, const char *, size_t);
   16.54 +
   16.55 +#define TPM_DEVICE_ATTRS { \
   16.56 +	__ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL), \
   16.57 +	__ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL), \
   16.58 +	__ATTR(caps, S_IRUGO, tpm_show_caps, NULL), \
   16.59 +	__ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel) }
   16.60 +
   16.61 +struct tpm_chip;
   16.62 +
   16.63 +struct tpm_vendor_specific {
   16.64 +	u8 req_complete_mask;
   16.65 +	u8 req_complete_val;
   16.66 +	u8 req_canceled;
   16.67 +	u16 base;		/* TPM base address */
   16.68 +
   16.69 +	int (*recv) (struct tpm_chip *, u8 *, size_t);
   16.70 +	int (*send) (struct tpm_chip *, u8 *, size_t);
   16.71 +	void (*cancel) (struct tpm_chip *);
   16.72 +	 u8(*status) (struct tpm_chip *);
   16.73 +	struct miscdevice miscdev;
   16.74 +	struct device_attribute attr[TPM_NUM_ATTR];
   16.75 +};
   16.76 +
   16.77 +struct tpm_chip {
   16.78 +	struct device *dev;	/* PCI device stuff */
   16.79 +
   16.80 +	int dev_num;		/* /dev/tpm# */
   16.81 +	int num_opens;		/* only one allowed */
   16.82 +	int time_expired;
   16.83 +
   16.84 +	/* Data passed to and from the tpm via the read/write calls */
   16.85 +	u8 *data_buffer;
   16.86 +	atomic_t data_pending;
   16.87 +	atomic_t data_position;
   16.88 +	struct semaphore buffer_mutex;
   16.89 +
   16.90 +	struct timer_list user_read_timer;	/* user needs to claim result */
   16.91 +	struct semaphore tpm_mutex;	/* tpm is processing */
   16.92 +
   16.93 +	struct tpm_vendor_specific *vendor;
   16.94 +
   16.95 +	struct list_head list;
   16.96 +};
   16.97 +
   16.98 +static inline int tpm_read_index(int index)
   16.99 +{
  16.100 +	outb(index, TPM_ADDR);
  16.101 +	return inb(TPM_DATA) & 0xFF;
  16.102 +}
  16.103 +
  16.104 +static inline void tpm_write_index(int index, int value)
  16.105 +{
  16.106 +	outb(index, TPM_ADDR);
  16.107 +	outb(value & 0xFF, TPM_DATA);
  16.108 +}
  16.109 +
  16.110 +extern void tpm_time_expired(unsigned long);
  16.111 +extern int tpm_lpc_bus_init(struct pci_dev *, u16);
  16.112 +
  16.113 +extern int tpm_register_hardware_nopci(struct device *,
  16.114 +				       struct tpm_vendor_specific *);
  16.115 +extern void tpm_remove_hardware(struct device *);
  16.116 +extern int tpm_open(struct inode *, struct file *);
  16.117 +extern int tpm_release(struct inode *, struct file *);
  16.118 +extern ssize_t tpm_write(struct file *, const char __user *, size_t,
  16.119 +			 loff_t *);
  16.120 +extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
  16.121 +extern int tpm_pcr_extend(u32 chip_id, int pcr_idx, const u8* hash);
  16.122 +extern int tpm_pcr_read( u32 chip_id, int pcr_idx, u8* res_buf, int res_buf_size );
  16.123 +
  16.124 +extern int tpm_pm_suspend(struct pci_dev *, u32);
  16.125 +extern int tpm_pm_resume(struct pci_dev *);
  16.126 +
  16.127 +/* internal kernel interface */
  16.128 +extern ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
  16.129 +			    size_t bufsiz);
  16.130 +extern struct tpm_chip *tpm_chip_lookup(int chip_num);
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_nsc.c	Tue Aug 30 11:48:08 2005 -0800
    17.3 @@ -0,0 +1,377 @@
    17.4 +/*
    17.5 + * Copyright (C) 2004 IBM Corporation
    17.6 + *
    17.7 + * Authors:
    17.8 + * Leendert van Doorn <leendert@watson.ibm.com>
    17.9 + * Dave Safford <safford@watson.ibm.com>
   17.10 + * Reiner Sailer <sailer@watson.ibm.com>
   17.11 + * Kylene Hall <kjhall@us.ibm.com>
   17.12 + *
   17.13 + * Maintained by: <tpmdd_devel@lists.sourceforge.net>
   17.14 + *
   17.15 + * Device driver for TCG/TCPA TPM (trusted platform module).
   17.16 + * Specifications at www.trustedcomputinggroup.org
   17.17 + *
   17.18 + * This program is free software; you can redistribute it and/or
   17.19 + * modify it under the terms of the GNU General Public License as
   17.20 + * published by the Free Software Foundation, version 2 of the
   17.21 + * License.
   17.22 + *
   17.23 + */
   17.24 +
   17.25 +#include "tpm.h"
   17.26 +
   17.27 +/* National definitions */
   17.28 +#define	TPM_NSC_BASE			0x360
   17.29 +#define	TPM_NSC_IRQ			0x07
   17.30 +#define	TPM_NSC_BASE0_HI		0x60
   17.31 +#define	TPM_NSC_BASE0_LO		0x61
   17.32 +#define	TPM_NSC_BASE1_HI		0x62
   17.33 +#define	TPM_NSC_BASE1_LO		0x63
   17.34 +
   17.35 +#define	NSC_LDN_INDEX			0x07
   17.36 +#define	NSC_SID_INDEX			0x20
   17.37 +#define	NSC_LDC_INDEX			0x30
   17.38 +#define	NSC_DIO_INDEX			0x60
   17.39 +#define	NSC_CIO_INDEX			0x62
   17.40 +#define	NSC_IRQ_INDEX			0x70
   17.41 +#define	NSC_ITS_INDEX			0x71
   17.42 +
   17.43 +#define	NSC_STATUS			0x01
   17.44 +#define	NSC_COMMAND			0x01
   17.45 +#define	NSC_DATA			0x00
   17.46 +
   17.47 +/* status bits */
   17.48 +#define	NSC_STATUS_OBF			0x01	/* output buffer full */
   17.49 +#define	NSC_STATUS_IBF			0x02	/* input buffer full */
   17.50 +#define	NSC_STATUS_F0			0x04	/* F0 */
   17.51 +#define	NSC_STATUS_A2			0x08	/* A2 */
   17.52 +#define	NSC_STATUS_RDY			0x10	/* ready to receive command */
   17.53 +#define	NSC_STATUS_IBR			0x20	/* ready to receive data */
   17.54 +
   17.55 +/* command bits */
   17.56 +#define	NSC_COMMAND_NORMAL		0x01	/* normal mode */
   17.57 +#define	NSC_COMMAND_EOC			0x03
   17.58 +#define	NSC_COMMAND_CANCEL		0x22
   17.59 +
   17.60 +/*
   17.61 + * Wait for a certain status to appear
   17.62 + */
   17.63 +static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
   17.64 +{
   17.65 +	int expired = 0;
   17.66 +	struct timer_list status_timer =
   17.67 +	    TIMER_INITIALIZER(tpm_time_expired, jiffies + 10 * HZ,
   17.68 +			      (unsigned long) &expired);
   17.69 +
   17.70 +	/* status immediately available check */
   17.71 +	*data = inb(chip->vendor->base + NSC_STATUS);
   17.72 +	if ((*data & mask) == val)
   17.73 +		return 0;
   17.74 +
   17.75 +	/* wait for status */
   17.76 +	add_timer(&status_timer);
   17.77 +	do {
   17.78 +		set_current_state(TASK_UNINTERRUPTIBLE);
   17.79 +		schedule_timeout(TPM_TIMEOUT);
   17.80 +		*data = inb(chip->vendor->base + 1);
   17.81 +		if ((*data & mask) == val) {
   17.82 +			del_singleshot_timer_sync(&status_timer);
   17.83 +			return 0;
   17.84 +		}
   17.85 +	}
   17.86 +	while (!expired);
   17.87 +
   17.88 +	return -EBUSY;
   17.89 +}
   17.90 +
   17.91 +static int nsc_wait_for_ready(struct tpm_chip *chip)
   17.92 +{
   17.93 +	int status;
   17.94 +	int expired = 0;
   17.95 +	struct timer_list status_timer =
   17.96 +	    TIMER_INITIALIZER(tpm_time_expired, jiffies + 100,
   17.97 +			      (unsigned long) &expired);
   17.98 +
   17.99 +	/* status immediately available check */
  17.100 +	status = inb(chip->vendor->base + NSC_STATUS);
  17.101 +	if (status & NSC_STATUS_OBF)
  17.102 +		status = inb(chip->vendor->base + NSC_DATA);
  17.103 +	if (status & NSC_STATUS_RDY)
  17.104 +		return 0;
  17.105 +
  17.106 +	/* wait for status */
  17.107 +	add_timer(&status_timer);
  17.108 +	do {
  17.109 +		set_current_state(TASK_UNINTERRUPTIBLE);
  17.110 +		schedule_timeout(TPM_TIMEOUT);
  17.111 +		status = inb(chip->vendor->base + NSC_STATUS);
  17.112 +		if (status & NSC_STATUS_OBF)
  17.113 +			status = inb(chip->vendor->base + NSC_DATA);
  17.114 +		if (status & NSC_STATUS_RDY) {
  17.115 +			del_singleshot_timer_sync(&status_timer);
  17.116 +			return 0;
  17.117 +		}
  17.118 +	}
  17.119 +	while (!expired);
  17.120 +
  17.121 +	dev_info(&chip->pci_dev->dev, "wait for ready failed\n");
  17.122 +	return -EBUSY;
  17.123 +}
  17.124 +
  17.125 +
  17.126 +static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
  17.127 +{
  17.128 +	u8 *buffer = buf;
  17.129 +	u8 data, *p;
  17.130 +	u32 size;
  17.131 +	__be32 *native_size;
  17.132 +
  17.133 +	if (count < 6)
  17.134 +		return -EIO;
  17.135 +
  17.136 +	if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
  17.137 +		dev_err(&chip->pci_dev->dev, "F0 timeout\n");
  17.138 +		return -EIO;
  17.139 +	}
  17.140 +	if ((data =
  17.141 +	     inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
  17.142 +		dev_err(&chip->pci_dev->dev, "not in normal mode (0x%x)\n",
  17.143 +			data);
  17.144 +		return -EIO;
  17.145 +	}
  17.146 +
  17.147 +	/* read the whole packet */
  17.148 +	for (p = buffer; p < &buffer[count]; p++) {
  17.149 +		if (wait_for_stat
  17.150 +		    (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
  17.151 +			dev_err(&chip->pci_dev->dev,
  17.152 +				"OBF timeout (while reading data)\n");
  17.153 +			return -EIO;
  17.154 +		}
  17.155 +		if (data & NSC_STATUS_F0)
  17.156 +			break;
  17.157 +		*p = inb(chip->vendor->base + NSC_DATA);
  17.158 +	}
  17.159 +
  17.160 +	if ((data & NSC_STATUS_F0) == 0) {
  17.161 +		dev_err(&chip->pci_dev->dev, "F0 not set\n");
  17.162 +		return -EIO;
  17.163 +	}
  17.164 +	if ((data = inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_EOC) {
  17.165 +		dev_err(&chip->pci_dev->dev,
  17.166 +			"expected end of command(0x%x)\n", data);
  17.167 +		return -EIO;
  17.168 +	}
  17.169 +
  17.170 +	native_size = (__force __be32 *) (buf + 2);
  17.171 +	size = be32_to_cpu(*native_size);
  17.172 +
  17.173 +	if (count < size)
  17.174 +		return -EIO;
  17.175 +
  17.176 +	return size;
  17.177 +}
  17.178 +
  17.179 +static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
  17.180 +{
  17.181 +	u8 data;
  17.182 +	int i;
  17.183 +
  17.184 +	/*
  17.185 +	 * If we hit the chip with back to back commands it locks up
  17.186 +	 * and never set IBF. Hitting it with this "hammer" seems to
  17.187 +	 * fix it. Not sure why this is needed, we followed the flow
  17.188 +	 * chart in the manual to the letter.
  17.189 +	 */
  17.190 +	outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
  17.191 +
  17.192 +	if (nsc_wait_for_ready(chip) != 0)
  17.193 +		return -EIO;
  17.194 +
  17.195 +	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
  17.196 +		dev_err(&chip->pci_dev->dev, "IBF timeout\n");
  17.197 +		return -EIO;
  17.198 +	}
  17.199 +
  17.200 +	outb(NSC_COMMAND_NORMAL, chip->vendor->base + NSC_COMMAND);
  17.201 +	if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
  17.202 +		dev_err(&chip->pci_dev->dev, "IBR timeout\n");
  17.203 +		return -EIO;
  17.204 +	}
  17.205 +
  17.206 +	for (i = 0; i < count; i++) {
  17.207 +		if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
  17.208 +			dev_err(&chip->pci_dev->dev,
  17.209 +				"IBF timeout (while writing data)\n");
  17.210 +			return -EIO;
  17.211 +		}
  17.212 +		outb(buf[i], chip->vendor->base + NSC_DATA);
  17.213 +	}
  17.214 +
  17.215 +	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
  17.216 +		dev_err(&chip->pci_dev->dev, "IBF timeout\n");
  17.217 +		return -EIO;
  17.218 +	}
  17.219 +	outb(NSC_COMMAND_EOC, chip->vendor->base + NSC_COMMAND);
  17.220 +
  17.221 +	return count;
  17.222 +}
  17.223 +
  17.224 +static void tpm_nsc_cancel(struct tpm_chip *chip)
  17.225 +{
  17.226 +	outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
  17.227 +}
  17.228 +
  17.229 +static struct file_operations nsc_ops = {
  17.230 +	.owner = THIS_MODULE,
  17.231 +	.llseek = no_llseek,
  17.232 +	.open = tpm_open,
  17.233 +	.read = tpm_read,
  17.234 +	.write = tpm_write,
  17.235 +	.release = tpm_release,
  17.236 +};
  17.237 +
  17.238 +static struct tpm_vendor_specific tpm_nsc = {
  17.239 +	.recv = tpm_nsc_recv,
  17.240 +	.send = tpm_nsc_send,
  17.241 +	.cancel = tpm_nsc_cancel,
  17.242 +	.req_complete_mask = NSC_STATUS_OBF,
  17.243 +	.req_complete_val = NSC_STATUS_OBF,
  17.244 +	.miscdev = { .fops = &nsc_ops, },
  17.245 +
  17.246 +};
  17.247 +
  17.248 +static int __devinit tpm_nsc_init(struct pci_dev *pci_dev,
  17.249 +				  const struct pci_device_id *pci_id)
  17.250 +{
  17.251 +	int rc = 0;
  17.252 +	int lo, hi;
  17.253 +
  17.254 +	hi = tpm_read_index(TPM_NSC_BASE0_HI);
  17.255 +	lo = tpm_read_index(TPM_NSC_BASE0_LO);
  17.256 +
  17.257 +	tpm_nsc.base = (hi<<8) | lo;
  17.258 +
  17.259 +	if (pci_enable_device(pci_dev))
  17.260 +		return -EIO;
  17.261 +
  17.262 +	/* verify that it is a National part (SID) */
  17.263 +	if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
  17.264 +		rc = -ENODEV;
  17.265 +		goto out_err;
  17.266 +	}
  17.267 +
  17.268 +	dev_dbg(&pci_dev->dev, "NSC TPM detected\n");
  17.269 +	dev_dbg(&pci_dev->dev,
  17.270 +		"NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n",
  17.271 +		tpm_read_index(0x07), tpm_read_index(0x20),
  17.272 +		tpm_read_index(0x27));
  17.273 +	dev_dbg(&pci_dev->dev,
  17.274 +		"NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n",
  17.275 +		tpm_read_index(0x21), tpm_read_index(0x25),
  17.276 +		tpm_read_index(0x26), tpm_read_index(0x28));
  17.277 +	dev_dbg(&pci_dev->dev, "NSC IO Base0 0x%x\n",
  17.278 +		(tpm_read_index(0x60) << 8) | tpm_read_index(0x61));
  17.279 +	dev_dbg(&pci_dev->dev, "NSC IO Base1 0x%x\n",
  17.280 +		(tpm_read_index(0x62) << 8) | tpm_read_index(0x63));
  17.281 +	dev_dbg(&pci_dev->dev, "NSC Interrupt number and wakeup 0x%x\n",
  17.282 +		tpm_read_index(0x70));
  17.283 +	dev_dbg(&pci_dev->dev, "NSC IRQ type select 0x%x\n",
  17.284 +		tpm_read_index(0x71));
  17.285 +	dev_dbg(&pci_dev->dev,
  17.286 +		"NSC DMA channel select0 0x%x, select1 0x%x\n",
  17.287 +		tpm_read_index(0x74), tpm_read_index(0x75));
  17.288 +	dev_dbg(&pci_dev->dev,
  17.289 +		"NSC Config "
  17.290 +		"0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
  17.291 +		tpm_read_index(0xF0), tpm_read_index(0xF1),
  17.292 +		tpm_read_index(0xF2), tpm_read_index(0xF3),
  17.293 +		tpm_read_index(0xF4), tpm_read_index(0xF5),
  17.294 +		tpm_read_index(0xF6), tpm_read_index(0xF7),
  17.295 +		tpm_read_index(0xF8), tpm_read_index(0xF9));
  17.296 +
  17.297 +	dev_info(&pci_dev->dev,
  17.298 +		 "NSC PC21100 TPM revision %d\n",
  17.299 +		 tpm_read_index(0x27) & 0x1F);
  17.300 +
  17.301 +	if (tpm_read_index(NSC_LDC_INDEX) == 0)
  17.302 +		dev_info(&pci_dev->dev, ": NSC TPM not active\n");
  17.303 +
  17.304 +	/* select PM channel 1 */
  17.305 +	tpm_write_index(NSC_LDN_INDEX, 0x12);
  17.306 +	tpm_read_index(NSC_LDN_INDEX);
  17.307 +
  17.308 +	/* disable the DPM module */
  17.309 +	tpm_write_index(NSC_LDC_INDEX, 0);
  17.310 +	tpm_read_index(NSC_LDC_INDEX);
  17.311 +
  17.312 +	/* set the data register base addresses */
  17.313 +	tpm_write_index(NSC_DIO_INDEX, TPM_NSC_BASE >> 8);
  17.314 +	tpm_write_index(NSC_DIO_INDEX + 1, TPM_NSC_BASE);
  17.315 +	tpm_read_index(NSC_DIO_INDEX);
  17.316 +	tpm_read_index(NSC_DIO_INDEX + 1);
  17.317 +
  17.318 +	/* set the command register base addresses */
  17.319 +	tpm_write_index(NSC_CIO_INDEX, (TPM_NSC_BASE + 1) >> 8);
  17.320 +	tpm_write_index(NSC_CIO_INDEX + 1, (TPM_NSC_BASE + 1));
  17.321 +	tpm_read_index(NSC_DIO_INDEX);
  17.322 +	tpm_read_index(NSC_DIO_INDEX + 1);
  17.323 +
  17.324 +	/* set the interrupt number to be used for the host interface */
  17.325 +	tpm_write_index(NSC_IRQ_INDEX, TPM_NSC_IRQ);
  17.326 +	tpm_write_index(NSC_ITS_INDEX, 0x00);
  17.327 +	tpm_read_index(NSC_IRQ_INDEX);
  17.328 +
  17.329 +	/* enable the DPM module */
  17.330 +	tpm_write_index(NSC_LDC_INDEX, 0x01);
  17.331 +	tpm_read_index(NSC_LDC_INDEX);
  17.332 +
  17.333 +	if ((rc = tpm_register_hardware(pci_dev, &tpm_nsc)) < 0)
  17.334 +		goto out_err;
  17.335 +
  17.336 +	return 0;
  17.337 +
  17.338 +out_err:
  17.339 +	pci_disable_device(pci_dev);
  17.340 +	return rc;
  17.341 +}
  17.342 +
  17.343 +static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
  17.344 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)},
  17.345 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)},
  17.346 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
  17.347 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
  17.348 +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
  17.349 +	{PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)},
  17.350 +	{0,}
  17.351 +};
  17.352 +
  17.353 +MODULE_DEVICE_TABLE(pci, tpm_pci_tbl);
  17.354 +
  17.355 +static struct pci_driver nsc_pci_driver = {
  17.356 +	.name = "tpm_nsc",
  17.357 +	.id_table = tpm_pci_tbl,
  17.358 +	.probe = tpm_nsc_init,
  17.359 +	.remove = __devexit_p(tpm_remove),
  17.360 +	.suspend = tpm_pm_suspend,
  17.361 +	.resume = tpm_pm_resume,
  17.362 +};
  17.363 +
  17.364 +static int __init init_nsc(void)
  17.365 +{
  17.366 +	return pci_register_driver(&nsc_pci_driver);
  17.367 +}
  17.368 +
  17.369 +static void __exit cleanup_nsc(void)
  17.370 +{
  17.371 +	pci_unregister_driver(&nsc_pci_driver);
  17.372 +}
  17.373 +
  17.374 +module_init(init_nsc);
  17.375 +module_exit(cleanup_nsc);
  17.376 +
  17.377 +MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
  17.378 +MODULE_DESCRIPTION("TPM Driver");
  17.379 +MODULE_VERSION("2.0");
  17.380 +MODULE_LICENSE("GPL");
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c	Tue Aug 30 11:48:08 2005 -0800
    18.3 @@ -0,0 +1,513 @@
    18.4 +/*
    18.5 + * Copyright (C) 2004 IBM Corporation
    18.6 + *
    18.7 + * Authors:
    18.8 + * Leendert van Doorn <leendert@watson.ibm.com>
    18.9 + * Dave Safford <safford@watson.ibm.com>
   18.10 + * Reiner Sailer <sailer@watson.ibm.com>
   18.11 + * Kylene Hall <kjhall@us.ibm.com>
   18.12 + * Stefan Berger <stefanb@us.ibm.com>
   18.13 + *
   18.14 + * Maintained by: <tpmdd_devel@lists.sourceforge.net>
   18.15 + *
   18.16 + * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
   18.17 + * Specifications at www.trustedcomputinggroup.org
   18.18 + *
   18.19 + * This program is free software; you can redistribute it and/or
   18.20 + * modify it under the terms of the GNU General Public License as
   18.21 + * published by the Free Software Foundation, version 2 of the
   18.22 + * License.
   18.23 + *
   18.24 + */
   18.25 +
   18.26 +#include <asm/uaccess.h>
   18.27 +#include <linux/list.h>
   18.28 +#include <linux/tpmfe.h>
   18.29 +#include <linux/device.h>
   18.30 +#include <linux/interrupt.h>
   18.31 +#include "tpm_nopci.h"
   18.32 +
   18.33 +/* read status bits */
   18.34 +enum {
   18.35 +	STATUS_BUSY = 0x01,
   18.36 +	STATUS_DATA_AVAIL = 0x02,
   18.37 +	STATUS_READY = 0x04
   18.38 +};
   18.39 +
   18.40 +#define MIN(x,y)  ((x) < (y)) ? (x) : (y)
   18.41 +
   18.42 +struct transmission {
   18.43 +	struct list_head next;
   18.44 +	unsigned char *request;
   18.45 +	unsigned int request_len;
   18.46 +	unsigned char *rcv_buffer;
   18.47 +	unsigned int  buffersize;
   18.48 +	struct tpm_chip     *chip;
   18.49 +	unsigned int flags;
   18.50 +};
   18.51 +
   18.52 +enum {
   18.53 +	TRANSMISSION_FLAG_WAS_QUEUED = 0x1
   18.54 +};
   18.55 +
   18.56 +struct data_exchange {
   18.57 +	struct transmission *current_request;
   18.58 +	spinlock_t           req_list_lock;
   18.59 +	wait_queue_head_t    req_wait_queue;
   18.60 +
   18.61 +	struct list_head     queued_requests;
   18.62 +
   18.63 +	struct transmission *current_response;
   18.64 +	spinlock_t           resp_list_lock;
   18.65 +	wait_queue_head_t    resp_wait_queue;     // processes waiting for responses
   18.66 +
   18.67 +	struct transmission *req_cancelled;       // if a cancellation was encounterd
   18.68 +
   18.69 +	unsigned int         fe_status;
   18.70 +	unsigned int         flags;
   18.71 +};
   18.72 +
   18.73 +enum {
   18.74 +	DATAEX_FLAG_QUEUED_ONLY = 0x1
   18.75 +};
   18.76 +
   18.77 +static struct data_exchange dataex;
   18.78 +
   18.79 +static unsigned long disconnect_time;
   18.80 +
   18.81 +/* local function prototypes */
   18.82 +static void __exit cleanup_xen(void);
   18.83 +
   18.84 +
   18.85 +/* =============================================================
   18.86 + * Some utility functions
   18.87 + * =============================================================
   18.88 + */
   18.89 +static inline struct transmission *
   18.90 +transmission_alloc(void)
   18.91 +{
   18.92 +	struct transmission *t = kmalloc(sizeof(*t), GFP_KERNEL);
   18.93 +	if (t) {
   18.94 +		memset(t, 0x0, sizeof(*t));
   18.95 +	}
   18.96 +	return t;
   18.97 +}
   18.98 +
   18.99 +static inline unsigned char *
  18.100 +transmission_set_buffer(struct transmission *t,
  18.101 +                        unsigned char *buffer, unsigned int len)
  18.102 +{
  18.103 +	if (NULL != t->request) {
  18.104 +		kfree(t->request);
  18.105 +	}
  18.106 +	t->request = kmalloc(len, GFP_KERNEL);
  18.107 +	if (t->request) {
  18.108 +		memcpy(t->request,
  18.109 +		       buffer,
  18.110 +		       len);
  18.111 +		t->request_len = len;
  18.112 +	}
  18.113 +	return t->request;
  18.114 +}
  18.115 +
  18.116 +static inline void
  18.117 +transmission_free(struct transmission *t)
  18.118 +{
  18.119 +	if (t->request) {
  18.120 +		kfree(t->request);
  18.121 +	}
  18.122 +	if (t->rcv_buffer) {
  18.123 +		kfree(t->rcv_buffer);
  18.124 +	}
  18.125 +	kfree(t);
  18.126 +}
  18.127 +
  18.128 +/* =============================================================
  18.129 + * Interface with the TPM shared memory driver for XEN
  18.130 + * =============================================================
  18.131 + */
  18.132 +static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
  18.133 +{
  18.134 +	int ret_size = 0;
  18.135 +	struct transmission *t, *temp;
  18.136 +
  18.137 +	/*
  18.138 +	 * The list with requests must contain one request
  18.139 +	 * only and the element there must be the one that
  18.140 +	 * was passed to me from the front-end.
  18.141 +	 */
  18.142 +	if (dataex.current_request != ptr) {
  18.143 +		printk("WARNING: The request pointer is different than the pointer "
  18.144 +		       "the shared memory driver returned to me. %p != %p\n",
  18.145 +		       dataex.current_request, ptr);
  18.146 +	}
  18.147 +
  18.148 +	/*
  18.149 +	 * If the request has been cancelled, just quit here
  18.150 +	 */
  18.151 +	if (dataex.req_cancelled == (struct transmission *)ptr) {
  18.152 +		if (dataex.current_request == dataex.req_cancelled) {
  18.153 +			dataex.current_request = NULL;
  18.154 +		}
  18.155 +		transmission_free(dataex.req_cancelled);
  18.156 +		dataex.req_cancelled = NULL;
  18.157 +		return 0;
  18.158 +	}
  18.159 +
  18.160 +	if (NULL != (temp = dataex.current_request)) {
  18.161 +		transmission_free(temp);
  18.162 +		dataex.current_request = NULL;
  18.163 +	}
  18.164 +
  18.165 +	t = transmission_alloc();
  18.166 +	if (NULL != t) {
  18.167 +		unsigned long flags;
  18.168 +		t->rcv_buffer = kmalloc(count, GFP_KERNEL);
  18.169 +		if (NULL == t->rcv_buffer) {
  18.170 +			transmission_free(t);
  18.171 +			return -ENOMEM;
  18.172 +		}
  18.173 +		t->buffersize = count;
  18.174 +		memcpy(t->rcv_buffer, buffer, count);
  18.175 +		ret_size = count;
  18.176 +
  18.177 +		spin_lock_irqsave(&dataex.resp_list_lock ,flags);
  18.178 +		dataex.current_response = t;
  18.179 +		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
  18.180 +		wake_up_interruptible(&dataex.resp_wait_queue);
  18.181 +	}
  18.182 +	return ret_size;
  18.183 +}
  18.184 +
  18.185 +
  18.186 +static void tpm_fe_status(unsigned int flags)
  18.187 +{
  18.188 +	dataex.fe_status = flags;
  18.189 +	if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
  18.190 +		disconnect_time = jiffies;
  18.191 +	}
  18.192 +}
  18.193 +
  18.194 +/* =============================================================
  18.195 + * Interface with the generic TPM driver
  18.196 + * =============================================================
  18.197 + */
  18.198 +static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
  18.199 +{
  18.200 +	unsigned long flags;
  18.201 +	int rc = 0;
  18.202 +
  18.203 +	spin_lock_irqsave(&dataex.resp_list_lock, flags);
  18.204 +	/*
  18.205 +	 * Check if the previous operation only queued the command
  18.206 +	 * In this case there won't be a response, so I just
  18.207 +	 * return from here and reset that flag. In any other
  18.208 +	 * case I should receive a response from the back-end.
  18.209 +	 */
  18.210 +	if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
  18.211 +		dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
  18.212 +		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
  18.213 +		/*
  18.214 +		 * a little hack here. The first few measurements
  18.215 +		 * are queued since there's no way to talk to the
  18.216 +		 * TPM yet (due to slowness of the control channel)
  18.217 +		 * So we just make IMA happy by giving it 30 NULL
  18.218 +		 * bytes back where the most important part is
  18.219 +		 * that the result code is '0'.
  18.220 +		 */
  18.221 +
  18.222 +		count = MIN(count, 30);
  18.223 +		memset(buf, 0x0, count);
  18.224 +		return count;
  18.225 +	}
  18.226 +	/*
  18.227 +	 * Check whether something is in the responselist and if
  18.228 +	 * there's nothing in the list wait for something to appear.
  18.229 +	 */
  18.230 +
  18.231 +	if (NULL == dataex.current_response) {
  18.232 +		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
  18.233 +		interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
  18.234 +		                               1000);
  18.235 +		spin_lock_irqsave(&dataex.resp_list_lock ,flags);
  18.236 +	}
  18.237 +
  18.238 +	if (NULL != dataex.current_response) {
  18.239 +		struct transmission *t = dataex.current_response;
  18.240 +		dataex.current_response = NULL;
  18.241 +		rc = MIN(count, t->buffersize);
  18.242 +		memcpy(buf, t->rcv_buffer, rc);
  18.243 +		transmission_free(t);
  18.244 +	}
  18.245 +
  18.246 +	spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
  18.247 +	return rc;
  18.248 +}
  18.249 +
  18.250 +static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
  18.251 +{
  18.252 +	/*
  18.253 +	 * We simply pass the packet onto the XEN shared
  18.254 +	 * memory driver.
  18.255 +	 */
  18.256 +	unsigned long flags;
  18.257 +	int rc;
  18.258 +	struct transmission *t = transmission_alloc();
  18.259 +
  18.260 +	spin_lock_irqsave(&dataex.req_list_lock, flags);
  18.261 +	/*
  18.262 +	 * If there's a current request, it must be the
  18.263 +	 * previous request that has timed out.
  18.264 +	 */
  18.265 +	if (dataex.current_request != NULL) {
  18.266 +		printk("WARNING: Sending although there is a request outstanding.\n"
  18.267 +		       "         Previous request must have timed out.\n");
  18.268 +		transmission_free(dataex.current_request);
  18.269 +		dataex.current_request = NULL;
  18.270 +	}
  18.271 +
  18.272 +	if (t != NULL) {
  18.273 +		unsigned int error = 0;
  18.274 +		t->rcv_buffer = NULL;
  18.275 +		t->buffersize = 0;
  18.276 +		t->chip = chip;
  18.277 +
  18.278 +		/*
  18.279 +		 * Queue the packet if the driver below is not
  18.280 +		 * ready, yet, or there is any packet already
  18.281 +		 * in the queue.
  18.282 +		 * If the driver below is ready, unqueue all
  18.283 +		 * packets first before sending our current
  18.284 +		 * packet.
  18.285 +		 * For each unqueued packet, except for the
  18.286 +		 * last (=current) packet, call the function
  18.287 +		 * tpm_xen_recv to wait for the response to come
  18.288 +		 * back.
  18.289 +		 */
  18.290 +		if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
  18.291 +			if (time_after(jiffies, disconnect_time + HZ * 10)) {
  18.292 +				rc = -ENOENT;
  18.293 +			} else {
  18.294 +				/*
  18.295 +				 * copy the request into the buffer
  18.296 +				 */
  18.297 +				if (transmission_set_buffer(t, buf, count)
  18.298 +				    == NULL) {
  18.299 +					transmission_free(t);
  18.300 +					rc = -ENOMEM;
  18.301 +					goto exit;
  18.302 +				}
  18.303 +				dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
  18.304 +				list_add_tail(&t->next, &dataex.queued_requests);
  18.305 +				rc = 0;
  18.306 +			}
  18.307 +		} else {
  18.308 +			/*
  18.309 +			 * Check whether there are any packets in the queue
  18.310 +			 */
  18.311 +			while (!list_empty(&dataex.queued_requests)) {
  18.312 +				/*
  18.313 +				 * Need to dequeue them.
  18.314 +				 * Read the result into a dummy buffer.
  18.315 +				 */
  18.316 +				unsigned char buffer[1];
  18.317 +				struct transmission *qt = (struct transmission *) dataex.queued_requests.next;
  18.318 +				list_del(&qt->next);
  18.319 +				dataex.current_request = qt;
  18.320 +				spin_unlock_irqrestore(&dataex.req_list_lock, flags);
  18.321 +
  18.322 +				rc = tpm_fe_send(qt->request,
  18.323 +				                 qt->request_len,
  18.324 +				                 qt);
  18.325 +
  18.326 +				if (rc < 0) {
  18.327 +					spin_lock_irqsave(&dataex.req_list_lock, flags);
  18.328 +					if ((qt = dataex.current_request) != NULL) {
  18.329 +						/*
  18.330 +						 * requeue it at the beginning
  18.331 +						 * of the list
  18.332 +						 */
  18.333 +						list_add(&qt->next,
  18.334 +						         &dataex.queued_requests);
  18.335 +					}
  18.336 +					dataex.current_request = NULL;
  18.337 +					error = 1;
  18.338 +					break;
  18.339 +				}
  18.340 +				/*
  18.341 +				 * After this point qt is not valid anymore!
  18.342 +				 * It is freed when the front-end is delivering the data
  18.343 +				 * by calling tpm_recv
  18.344 +				 */
  18.345 +
  18.346 +				/*
  18.347 +				 * Try to receive the response now into the provided dummy
  18.348 +				 * buffer (I don't really care about this response since
  18.349 +				 * there is no receiver anymore for this response)
  18.350 +				 */
  18.351 +				rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
  18.352 +
  18.353 +				spin_lock_irqsave(&dataex.req_list_lock, flags);
  18.354 +			}
  18.355 +
  18.356 +			if (error == 0) {
  18.357 +				/*
  18.358 +				 * Finally, send the current request.
  18.359 +				 */
  18.360 +				dataex.current_request = t;
  18.361 +				/*
  18.362 +				 * Call the shared memory driver
  18.363 +				 * Pass to it the buffer with the request, the
  18.364 +				 * amount of bytes in the request and
  18.365 +				 * a void * pointer (here: transmission structure)
  18.366 +				 */
  18.367 +				rc = tpm_fe_send(buf, count, t);
  18.368 +				/*
  18.369 +				 * The generic TPM driver will call
  18.370 +				 * the function to receive the response.
  18.371 +				 */
  18.372 +				if (rc < 0) {
  18.373 +					dataex.current_request = NULL;
  18.374 +					goto queue_it;
  18.375 +				}
  18.376 +			} else {
  18.377 +queue_it:
  18.378 +				if (transmission_set_buffer(t, buf, count) == NULL) {
  18.379 +					transmission_free(t);
  18.380 +					rc = -ENOMEM;
  18.381 +					goto exit;
  18.382 +				}
  18.383 +				/*
  18.384 +				 * An error occurred. Don't event try
  18.385 +				 * to send the current request. Just
  18.386 +				 * queue it.
  18.387 +				 */
  18.388 +				dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
  18.389 +				list_add_tail(&t->next, &dataex.queued_requests);
  18.390 +				rc = 0;
  18.391 +			}
  18.392 +		}
  18.393 +	} else {
  18.394 +		rc = -ENOMEM;
  18.395 +	}
  18.396 +
  18.397 +exit:
  18.398 +	spin_unlock_irqrestore(&dataex.req_list_lock, flags);
  18.399 +	return rc;
  18.400 +}
  18.401 +
  18.402 +static void tpm_xen_cancel(struct tpm_chip *chip)
  18.403 +{
  18.404 +	unsigned long flags;
  18.405 +	spin_lock_irqsave(&dataex.resp_list_lock,flags);
  18.406 +
  18.407 +	dataex.req_cancelled = dataex.current_request;
  18.408 +
  18.409 +	spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
  18.410 +}
  18.411 +
  18.412 +static u8 tpm_xen_status(struct tpm_chip *chip)
  18.413 +{
  18.414 +	unsigned long flags;
  18.415 +	u8 rc = 0;
  18.416 +	spin_lock_irqsave(&dataex.resp_list_lock, flags);
  18.417 +	/*
  18.418 +	 * Data are available if:
  18.419 +	 *  - there's a current response
  18.420 +	 *  - the last packet was queued only (this is fake, but necessary to
  18.421 +	 *      get the generic TPM layer to call the receive function.)
  18.422 +	 */
  18.423 +	if (NULL != dataex.current_response ||
  18.424 +	    0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
  18.425 +		rc = STATUS_DATA_AVAIL;
  18.426 +	}
  18.427 +	spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
  18.428 +	return rc;
  18.429 +}
  18.430 +
  18.431 +static struct file_operations tpm_xen_ops = {
  18.432 +	.owner = THIS_MODULE,
  18.433 +	.llseek = no_llseek,
  18.434 +	.open = tpm_open,
  18.435 +	.read = tpm_read,
  18.436 +	.write = tpm_write,
  18.437 +	.release = tpm_release,
  18.438 +};
  18.439 +
  18.440 +static struct tpm_vendor_specific tpm_xen = {
  18.441 +	.recv = tpm_xen_recv,
  18.442 +	.send = tpm_xen_send,
  18.443 +	.cancel = tpm_xen_cancel,
  18.444 +	.status = tpm_xen_status,
  18.445 +	.req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
  18.446 +	.req_complete_val  = STATUS_DATA_AVAIL,
  18.447 +	.req_canceled = STATUS_READY,
  18.448 +	.base = 0,
  18.449 +	.attr = TPM_DEVICE_ATTRS,
  18.450 +	.miscdev.fops = &tpm_xen_ops,
  18.451 +};
  18.452 +
  18.453 +static struct device tpm_device = {
  18.454 +	.bus_id = "vtpm",
  18.455 +};
  18.456 +
  18.457 +static struct tpmfe_device tpmfe = {
  18.458 +	.receive = tpm_recv,
  18.459 +	.status  = tpm_fe_status,
  18.460 +};
  18.461 +
  18.462 +
  18.463 +static int __init init_xen(void)
  18.464 +{
  18.465 +	int rc;
  18.466 +
  18.467 +	/*
  18.468 +	 * Register device with the low lever front-end
  18.469 +	 * driver
  18.470 +	 */
  18.471 +	if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
  18.472 +		return rc;
  18.473 +	}
  18.474 +
  18.475 +	/*
  18.476 +	 * Register our device with the system.
  18.477 +	 */
  18.478 +	if ((rc = device_register(&tpm_device)) < 0) {
  18.479 +		tpm_fe_unregister_receiver();
  18.480 +		return rc;
  18.481 +	}
  18.482 +
  18.483 +	if ((rc = tpm_register_hardware_nopci(&tpm_device, &tpm_xen)) < 0) {
  18.484 +		device_unregister(&tpm_device);
  18.485 +		tpm_fe_unregister_receiver();
  18.486 +		return rc;
  18.487 +	}
  18.488 +
  18.489 +	dataex.current_request = NULL;
  18.490 +	spin_lock_init(&dataex.req_list_lock);
  18.491 +	init_waitqueue_head(&dataex.req_wait_queue);
  18.492 +	INIT_LIST_HEAD(&dataex.queued_requests);
  18.493 +
  18.494 +	dataex.current_response = NULL;
  18.495 +	spin_lock_init(&dataex.resp_list_lock);
  18.496 +	init_waitqueue_head(&dataex.resp_wait_queue);
  18.497 +
  18.498 +	disconnect_time = jiffies;
  18.499 +
  18.500 +	return 0;
  18.501 +}
  18.502 +
  18.503 +static void __exit cleanup_xen(void)
  18.504 +{
  18.505 +	tpm_remove_hardware(&tpm_device);
  18.506 +	device_unregister(&tpm_device);
  18.507 +	tpm_fe_unregister_receiver();
  18.508 +}
  18.509 +
  18.510 +fs_initcall(init_xen);
  18.511 +module_exit(cleanup_xen);
  18.512 +
  18.513 +MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
  18.514 +MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
  18.515 +MODULE_VERSION("1.0");
  18.516 +MODULE_LICENSE("GPL");
    19.1 --- a/linux-2.6-xen-sparse/drivers/xen/Makefile	Tue Aug 30 11:39:25 2005 -0800
    19.2 +++ b/linux-2.6-xen-sparse/drivers/xen/Makefile	Tue Aug 30 11:48:08 2005 -0800
    19.3 @@ -8,7 +8,9 @@ obj-y	+= xenbus/
    19.4  
    19.5  obj-$(CONFIG_XEN_BLKDEV_BACKEND)	+= blkback/
    19.6  obj-$(CONFIG_XEN_NETDEV_BACKEND)	+= netback/
    19.7 +obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmback/
    19.8  obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= blkfront/
    19.9  obj-$(CONFIG_XEN_NETDEV_FRONTEND)	+= netfront/
   19.10  obj-$(CONFIG_XEN_BLKDEV_TAP)    	+= blktap/
   19.11 +obj-$(CONFIG_XEN_TPMDEV_FRONTEND)	+= tpmfront/
   19.12  
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/Makefile	Tue Aug 30 11:48:08 2005 -0800
    20.3 @@ -0,0 +1,4 @@
    20.4 +
    20.5 +obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmbk.o
    20.6 +
    20.7 +tpmbk-y += tpmback.o interface.o xenbus.o
    21.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h	Tue Aug 30 11:48:08 2005 -0800
    21.3 @@ -0,0 +1,89 @@
    21.4 +/******************************************************************************
    21.5 + * drivers/xen/tpmback/common.h
    21.6 + */
    21.7 +
    21.8 +#ifndef __NETIF__BACKEND__COMMON_H__
    21.9 +#define __NETIF__BACKEND__COMMON_H__
   21.10 +
   21.11 +#include <linux/config.h>
   21.12 +#include <linux/version.h>
   21.13 +#include <linux/module.h>
   21.14 +#include <linux/interrupt.h>
   21.15 +#include <linux/slab.h>
   21.16 +#include <asm-xen/ctrl_if.h>
   21.17 +#include <asm-xen/evtchn.h>
   21.18 +#include <asm-xen/xen-public/io/tpmif.h>
   21.19 +#include <asm/io.h>
   21.20 +#include <asm/pgalloc.h>
   21.21 +#include <asm-xen/xen-public/io/domain_controller.h>
   21.22 +
   21.23 +#if 0
   21.24 +#define ASSERT(_p) \
   21.25 +    if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
   21.26 +    __LINE__, __FILE__); *(int*)0=0; }
   21.27 +#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
   21.28 +                           __FILE__ , __LINE__ , ## _a )
   21.29 +#else
   21.30 +#define ASSERT(_p) ((void)0)
   21.31 +#define DPRINTK(_f, _a...) ((void)0)
   21.32 +#endif
   21.33 +
   21.34 +typedef struct tpmif_st {
   21.35 +        struct list_head tpmif_list;
   21.36 +	/* Unique identifier for this interface. */
   21.37 +	domid_t domid;
   21.38 +	unsigned int handle;
   21.39 +
   21.40 +	/* Physical parameters of the comms window. */
   21.41 +	unsigned long tx_shmem_frame;
   21.42 +	unsigned int evtchn;
   21.43 +	unsigned int remote_evtchn;
   21.44 +
   21.45 +	/* The shared rings and indexes. */
   21.46 +	tpmif_tx_interface_t *tx;
   21.47 +
   21.48 +	/* Miscellaneous private stuff. */
   21.49 +	enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
   21.50 +	int active;
   21.51 +
   21.52 +	struct tpmif_st *hash_next;
   21.53 +	struct list_head list;	/* scheduling list */
   21.54 +	atomic_t refcnt;
   21.55 +
   21.56 +	long int tpm_instance;
   21.57 +	unsigned long mmap_vstart;
   21.58 +
   21.59 +	struct work_struct work;
   21.60 +
   21.61 +	u16 shmem_handle;
   21.62 +	unsigned long shmem_vaddr;
   21.63 +	grant_ref_t shmem_ref;
   21.64 +
   21.65 +} tpmif_t;
   21.66 +
   21.67 +void tpmif_disconnect_complete(tpmif_t * tpmif);
   21.68 +tpmif_t *tpmif_find(domid_t domid, long int instance);
   21.69 +void tpmif_interface_init(void);
   21.70 +void tpmif_schedule_work(tpmif_t * tpmif);
   21.71 +void tpmif_deschedule_work(tpmif_t * tpmif);
   21.72 +void tpmif_xenbus_init(void);
   21.73 +int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
   21.74 +irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs);
   21.75 +int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domain, u32 instance);
   21.76 +int tpmif_vtpm_close(u32 instance);
   21.77 +
   21.78 +int vtpm_release_packets(tpmif_t * tpmif, int send_msgs);
   21.79 +
   21.80 +#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt))
   21.81 +#define tpmif_put(_b)                             \
   21.82 +    do {                                          \
   21.83 +        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
   21.84 +            tpmif_disconnect_complete(_b);        \
   21.85 +    } while (0)
   21.86 +
   21.87 +
   21.88 +extern int num_frontends;
   21.89 +
   21.90 +#define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE))
   21.91 +
   21.92 +#endif /* __TPMIF__BACKEND__COMMON_H__ */
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Tue Aug 30 11:48:08 2005 -0800
    22.3 @@ -0,0 +1,200 @@
    22.4 +/******************************************************************************
    22.5 + * drivers/xen/tpmback/interface.c
    22.6 + *
    22.7 + * Vritual TPM interface management.
    22.8 + *
    22.9 + * Copyright (c) 2005, IBM Corporation
   22.10 + *
   22.11 + * Author: Stefan Berger, stefanb@us.ibm.com
   22.12 + *
   22.13 + * This code has been derived from drivers/xen/netback/interface.c
   22.14 + * Copyright (c) 2004, Keir Fraser
   22.15 + */
   22.16 +
   22.17 +#include "common.h"
   22.18 +#include <asm-xen/balloon.h>
   22.19 +
   22.20 +#define VMALLOC_VMADDR(x) ((unsigned long)(x))
   22.21 +
   22.22 +#define TPMIF_HASHSZ (2 << 5)
   22.23 +#define TPMIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(TPMIF_HASHSZ-1))
   22.24 +
   22.25 +static kmem_cache_t *tpmif_cachep;
   22.26 +int num_frontends = 0;
   22.27 +LIST_HEAD(tpmif_list);
   22.28 +
   22.29 +
   22.30 +tpmif_t *alloc_tpmif(domid_t domid, long int instance)
   22.31 +{
   22.32 +    struct page *page;
   22.33 +    tpmif_t *tpmif;
   22.34 +
   22.35 +    tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL);
   22.36 +    if (!tpmif)
   22.37 +        return ERR_PTR(-ENOMEM);
   22.38 +
   22.39 +    memset(tpmif, 0, sizeof(*tpmif));
   22.40 +    tpmif->domid        = domid;
   22.41 +    tpmif->status       = DISCONNECTED;
   22.42 +    tpmif->tpm_instance = instance;
   22.43 +    atomic_set(&tpmif->refcnt, 1);
   22.44 +
   22.45 +    page = balloon_alloc_empty_page_range(TPMIF_TX_RING_SIZE);
   22.46 +    BUG_ON(page == NULL);
   22.47 +    tpmif->mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
   22.48 +
   22.49 +    list_add(&tpmif->tpmif_list, &tpmif_list);
   22.50 +    num_frontends++;
   22.51 +
   22.52 +    return tpmif;
   22.53 +}
   22.54 +
   22.55 +
   22.56 +void free_tpmif(tpmif_t *tpmif)
   22.57 +{
   22.58 +    num_frontends--;
   22.59 +    list_del(&tpmif->tpmif_list);
   22.60 +    kmem_cache_free(tpmif_cachep, tpmif);
   22.61 +}
   22.62 +
   22.63 +
   22.64 +tpmif_t *tpmif_find(domid_t domid, long int instance)
   22.65 +{
   22.66 +    tpmif_t *tpmif;
   22.67 +
   22.68 +    list_for_each_entry(tpmif, &tpmif_list, tpmif_list) {
   22.69 +        if (tpmif->tpm_instance == instance) {
   22.70 +            if (tpmif->domid == domid) {
   22.71 +                tpmif_get(tpmif);
   22.72 +                return tpmif;
   22.73 +	    } else {
   22.74 +	        return NULL;
   22.75 +	    }
   22.76 +        }
   22.77 +    }
   22.78 +
   22.79 +    return alloc_tpmif(domid, instance);
   22.80 +}
   22.81 +
   22.82 +
   22.83 +static int map_frontend_page(tpmif_t *tpmif, unsigned long localaddr,
   22.84 +			     unsigned long shared_page)
   22.85 +{
   22.86 +    struct gnttab_map_grant_ref op = {
   22.87 +        .host_addr = localaddr,
   22.88 +        .flags     = GNTMAP_host_map,
   22.89 +        .ref       = shared_page,
   22.90 +        .dom       = tpmif->domid,
   22.91 +    };
   22.92 +
   22.93 +    BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
   22.94 +
   22.95 +    if (op.handle < 0) {
   22.96 +	DPRINTK(" Grant table operation failure !\n");
   22.97 +	return op.handle;
   22.98 +    }
   22.99 +
  22.100 +    tpmif->shmem_ref    = shared_page;
  22.101 +    tpmif->shmem_handle = op.handle;
  22.102 +    tpmif->shmem_vaddr  = localaddr;
  22.103 +    return 0;
  22.104 +}
  22.105 +
  22.106 +
  22.107 +static void unmap_frontend_page(tpmif_t *tpmif)
  22.108 +{
  22.109 +    struct gnttab_unmap_grant_ref op;
  22.110 +
  22.111 +    op.host_addr = tpmif->shmem_vaddr;
  22.112 +    op.handle = tpmif->shmem_handle;
  22.113 +    op.dev_bus_addr = 0;
  22.114 +
  22.115 +    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
  22.116 +}
  22.117 +
  22.118 +
  22.119 +int tpmif_map(tpmif_t *tpmif,
  22.120 +              unsigned long shared_page, unsigned int evtchn)
  22.121 +{
  22.122 +    struct vm_struct *vma;
  22.123 +    evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
  22.124 +    int err;
  22.125 +
  22.126 +    BUG_ON(tpmif->remote_evtchn);
  22.127 +
  22.128 +    if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL )
  22.129 +	return -ENOMEM;
  22.130 +
  22.131 +    err = map_frontend_page(tpmif,
  22.132 +                            VMALLOC_VMADDR(vma->addr),
  22.133 +                            shared_page);
  22.134 +    if (err) {
  22.135 +        vfree(vma->addr);
  22.136 +	return err;
  22.137 +    }
  22.138 +
  22.139 +    op.u.bind_interdomain.dom1 = DOMID_SELF;
  22.140 +    op.u.bind_interdomain.dom2 = tpmif->domid;
  22.141 +    op.u.bind_interdomain.port1 = 0;
  22.142 +    op.u.bind_interdomain.port2 = evtchn;
  22.143 +    err = HYPERVISOR_event_channel_op(&op);
  22.144 +    if (err) {
  22.145 +	unmap_frontend_page(tpmif);
  22.146 +	vfree(vma->addr);
  22.147 +	return err;
  22.148 +    }
  22.149 +
  22.150 +    tpmif->evtchn = op.u.bind_interdomain.port1;
  22.151 +    tpmif->remote_evtchn = evtchn;
  22.152 +
  22.153 +    tpmif->tx = (tpmif_tx_interface_t *) vma->addr;
  22.154 +
  22.155 +    bind_evtchn_to_irqhandler(tpmif->evtchn,
  22.156 +                              tpmif_be_int,
  22.157 +                              0,
  22.158 +                              "tpmif-backend",
  22.159 +			      tpmif);
  22.160 +    tpmif->status        = CONNECTED;
  22.161 +    tpmif->shmem_ref     = shared_page;
  22.162 +    tpmif->active        = 1;
  22.163 +
  22.164 +    return 0;
  22.165 +}
  22.166 +
  22.167 +
  22.168 +static void __tpmif_disconnect_complete(void *arg)
  22.169 +{
  22.170 +    evtchn_op_t op = { .cmd = EVTCHNOP_close };
  22.171 +    tpmif_t *tpmif = (tpmif_t *) arg;
  22.172 +
  22.173 +    op.u.close.port = tpmif->evtchn;
  22.174 +    op.u.close.dom  = DOMID_SELF;
  22.175 +    HYPERVISOR_event_channel_op(&op);
  22.176 +    op.u.close.port = tpmif->remote_evtchn;
  22.177 +    op.u.close.dom  = tpmif->domid;
  22.178 +    HYPERVISOR_event_channel_op(&op);
  22.179 +
  22.180 +    if (tpmif->evtchn)
  22.181 +         unbind_evtchn_from_irqhandler(tpmif->evtchn, tpmif);
  22.182 +
  22.183 +    if (tpmif->tx) {
  22.184 +        unmap_frontend_page(tpmif);
  22.185 +        vfree(tpmif->tx);
  22.186 +    }
  22.187 +
  22.188 +    free_tpmif(tpmif);
  22.189 +}
  22.190 +
  22.191 +
  22.192 +void tpmif_disconnect_complete(tpmif_t * tpmif)
  22.193 +{
  22.194 +    INIT_WORK(&tpmif->work, __tpmif_disconnect_complete, (void *)tpmif);
  22.195 +    schedule_work(&tpmif->work);
  22.196 +}
  22.197 +
  22.198 +
  22.199 +void __init tpmif_interface_init(void)
  22.200 +{
  22.201 +    tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof(tpmif_t),
  22.202 +                                     0, 0, NULL, NULL);
  22.203 +}
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Tue Aug 30 11:48:08 2005 -0800
    23.3 @@ -0,0 +1,1078 @@
    23.4 +/******************************************************************************
    23.5 + * drivers/xen/tpmback/tpmback.c
    23.6 + *
    23.7 + * Copyright (c) 2005, IBM Corporation
    23.8 + *
    23.9 + * Author: Stefan Berger, stefanb@us.ibm.com
   23.10 + * Grant table support: Mahadevan Gomathisankaran
   23.11 + *
   23.12 + * This code has been derived from drivers/xen/netback/netback.c
   23.13 + * Copyright (c) 2002-2004, K A Fraser
   23.14 + *
   23.15 + */
   23.16 +
   23.17 +#include "common.h"
   23.18 +#include <asm-xen/evtchn.h>
   23.19 +
   23.20 +#include <linux/types.h>
   23.21 +#include <linux/list.h>
   23.22 +#include <linux/miscdevice.h>
   23.23 +#include <asm/uaccess.h>
   23.24 +#include <asm-xen/xenbus.h>
   23.25 +#include <asm-xen/xen-public/grant_table.h>
   23.26 +
   23.27 +
   23.28 +struct data_exchange {
   23.29 +	struct list_head pending_pak;
   23.30 +	struct list_head current_pak;
   23.31 +	unsigned int copied_so_far;
   23.32 +	u8 has_opener;
   23.33 +	rwlock_t pak_lock;  // protects all of the previous fields
   23.34 +	wait_queue_head_t wait_queue;
   23.35 +};
   23.36 +
   23.37 +struct packet {
   23.38 +	struct list_head next;
   23.39 +	unsigned int data_len;
   23.40 +	u8 *data_buffer;
   23.41 +	tpmif_t *tpmif;
   23.42 +	u32 tpm_instance;
   23.43 +	u8 req_tag;
   23.44 +	u32 last_read;
   23.45 +	u8 flags;
   23.46 +	ctrl_msg_t ctrl_msg;
   23.47 +	struct timer_list processing_timer;
   23.48 +};
   23.49 +
   23.50 +enum {
   23.51 +	PACKET_FLAG_DISCARD_RESPONSE = 1,
   23.52 +	PACKET_FLAG_SEND_CONTROLMESSAGE = 2,
   23.53 +};
   23.54 +
   23.55 +static struct data_exchange dataex;
   23.56 +
   23.57 +/* local function prototypes */
   23.58 +static int vtpm_queue_packet(struct packet *pak);
   23.59 +static int _packet_write(struct packet *pak,
   23.60 +                         const char *data, size_t size,
   23.61 +                         int userbuffer);
   23.62 +static void processing_timeout(unsigned long ptr);
   23.63 +static int  packet_read_shmem(struct packet *pak,
   23.64 +                              tpmif_t *tpmif,
   23.65 +                              u32 offset,
   23.66 +                              char *buffer,
   23.67 +                              int isuserbuffer,
   23.68 +                              u32 left);
   23.69 +
   23.70 +
   23.71 +#define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
   23.72 +
   23.73 +static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
   23.74 +
   23.75 +#define MIN(x,y)  (x) < (y) ? (x) : (y)
   23.76 +
   23.77 +/***************************************************************
   23.78 + Packet-related functions
   23.79 +***************************************************************/
   23.80 +
   23.81 +static struct packet *
   23.82 +packet_find_instance(struct list_head *head, u32 tpm_instance)
   23.83 +{
   23.84 +	struct packet *pak;
   23.85 +	struct list_head *p;
   23.86 +	/*
   23.87 +	 * traverse the list of packets and return the first
   23.88 +	 * one with the given instance number
   23.89 +	 */
   23.90 +	list_for_each(p, head) {
   23.91 +		pak = list_entry(p, struct packet, next);
   23.92 +		if (pak->tpm_instance == tpm_instance) {
   23.93 +			return pak;
   23.94 +		}
   23.95 +	}
   23.96 +	return NULL;
   23.97 +}
   23.98 +
   23.99 +static struct packet *
  23.100 +packet_find_packet(struct list_head *head, void *packet)
  23.101 +{
  23.102 +	struct packet *pak;
  23.103 +	struct list_head *p;
  23.104 +	/*
  23.105 +	 * traverse the list of packets and return the first
  23.106 +	 * one with the given instance number
  23.107 +	 */
  23.108 +	list_for_each(p, head) {
  23.109 +		pak = list_entry(p, struct packet, next);
  23.110 +		if (pak == packet) {
  23.111 +			return pak;
  23.112 +		}
  23.113 +	}
  23.114 +	return NULL;
  23.115 +}
  23.116 +
  23.117 +static struct packet *
  23.118 +packet_alloc(tpmif_t *tpmif, u32 size, u8 req_tag, u8 flags)
  23.119 +{
  23.120 +	struct packet *pak = NULL;
  23.121 +	pak = kmalloc(sizeof(struct packet),
  23.122 +                      GFP_KERNEL);
  23.123 +	if (NULL != pak) {
  23.124 +		memset(pak, 0x0, sizeof(*pak));
  23.125 +		if (tpmif) {
  23.126 +			pak->tpmif = tpmif;
  23.127 +			pak->tpm_instance = tpmif->tpm_instance;
  23.128 +		}
  23.129 +		pak->data_len  = size;
  23.130 +		pak->req_tag   = req_tag;
  23.131 +		pak->last_read = 0;
  23.132 +		pak->flags     = flags;
  23.133 +
  23.134 +		/*
  23.135 +		 * cannot do tpmif_get(tpmif); bad things happen
  23.136 +		 * on the last tpmif_put()
  23.137 +		 */
  23.138 +		init_timer(&pak->processing_timer);
  23.139 +		pak->processing_timer.function = processing_timeout;
  23.140 +		pak->processing_timer.data = (unsigned long)pak;
  23.141 +	}
  23.142 +	return pak;
  23.143 +}
  23.144 +
  23.145 +static void inline
  23.146 +packet_reset(struct packet *pak)
  23.147 +{
  23.148 +	pak->last_read = 0;
  23.149 +}
  23.150 +
  23.151 +static void inline
  23.152 +packet_free(struct packet *pak)
  23.153 +{
  23.154 +	del_singleshot_timer_sync(&pak->processing_timer);
  23.155 +	if (pak->data_buffer) {
  23.156 +		kfree(pak->data_buffer);
  23.157 +	}
  23.158 +	/*
  23.159 +	 * cannot do tpmif_put(pak->tpmif); bad things happen
  23.160 +	 * on the last tpmif_put()
  23.161 +	 */
  23.162 +	kfree(pak);
  23.163 +}
  23.164 +
  23.165 +static int
  23.166 +packet_set(struct packet *pak,
  23.167 +           const unsigned char *buffer, u32 size)
  23.168 +{
  23.169 +	int rc = 0;
  23.170 +	unsigned char *buf = kmalloc(size, GFP_KERNEL);
  23.171 +	if (NULL != buf) {
  23.172 +		pak->data_buffer = buf;
  23.173 +		memcpy(buf, buffer, size);
  23.174 +		pak->data_len = size;
  23.175 +	} else {
  23.176 +		rc = -ENOMEM;
  23.177 +	}
  23.178 +	return rc;
  23.179 +}
  23.180 +
  23.181 +
  23.182 +/*
  23.183 + * Write data to the shared memory and send it to the FE.
  23.184 + */
  23.185 +static int
  23.186 +packet_write(struct packet *pak,
  23.187 +             const char *data, size_t size,
  23.188 +             int userbuffer)
  23.189 +{
  23.190 +	int rc = 0;
  23.191 +
  23.192 +	DPRINTK("Supposed to send %d bytes to front-end!\n",
  23.193 +	        size);
  23.194 +
  23.195 +	if (0 != (pak->flags & PACKET_FLAG_SEND_CONTROLMESSAGE)) {
  23.196 +#ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
  23.197 +		u32 res;
  23.198 +		memcpy(&res, &data[2+4], sizeof(res));
  23.199 +		if (res != 0) {
  23.200 +			/*
  23.201 +			 * Will close down this device and have the
  23.202 +			 * FE notified about closure.
  23.203 +			 */
  23.204 +		}
  23.205 +#endif
  23.206 +	}
  23.207 +
  23.208 +	if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
  23.209 +		/* Don't send a respone to this packet. Just acknowledge it. */
  23.210 +		rc = size;
  23.211 +	} else {
  23.212 +		rc = _packet_write(pak, data, size, userbuffer);
  23.213 +	}
  23.214 +
  23.215 +	return rc;
  23.216 +}
  23.217 +
  23.218 +
  23.219 +static int
  23.220 +_packet_write(struct packet *pak,
  23.221 +              const char *data, size_t size,
  23.222 +              int userbuffer)
  23.223 +{
  23.224 +	/*
  23.225 +	 * Write into the shared memory pages directly
  23.226 +	 * and send it to the front end.
  23.227 +	 */
  23.228 +	tpmif_t *tpmif = pak->tpmif;
  23.229 +	u16 handle;
  23.230 +	int rc = 0;
  23.231 +	unsigned int i = 0;
  23.232 +	unsigned int offset = 0;
  23.233 +	multicall_entry_t *mcl;
  23.234 +
  23.235 +	if (tpmif == NULL)
  23.236 +		return -EFAULT;
  23.237 +
  23.238 +	if (tpmif->status != CONNECTED) {
  23.239 +		return size;
  23.240 +	}
  23.241 +
  23.242 +	mcl = tx_mcl;
  23.243 +	while (offset < size && i < TPMIF_TX_RING_SIZE) {
  23.244 +		unsigned int tocopy;
  23.245 +		struct gnttab_map_grant_ref map_op;
  23.246 +		struct gnttab_unmap_grant_ref unmap_op;
  23.247 +		tpmif_tx_request_t *tx;
  23.248 +
  23.249 +		tx = &tpmif->tx->ring[i].req;
  23.250 +
  23.251 +		if (0 == tx->addr) {
  23.252 +			DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
  23.253 +			return 0;
  23.254 +		}
  23.255 +
  23.256 +		map_op.host_addr  = MMAP_VADDR(tpmif, i);
  23.257 +		map_op.flags      = GNTMAP_host_map;
  23.258 +		map_op.ref        = tx->ref;
  23.259 +		map_op.dom        = tpmif->domid;
  23.260 +
  23.261 +		if(unlikely(
  23.262 +		    HYPERVISOR_grant_table_op(
  23.263 +		        GNTTABOP_map_grant_ref,
  23.264 +		        &map_op,
  23.265 +		        1))) {
  23.266 +			BUG();
  23.267 +		}
  23.268 +
  23.269 +		handle = map_op.handle;
  23.270 +
  23.271 +		if (map_op.handle < 0) {
  23.272 +			DPRINTK(" Grant table operation failure !\n");
  23.273 +			return 0;
  23.274 +		}
  23.275 +		phys_to_machine_mapping[__pa(MMAP_VADDR(tpmif,i)) >>
  23.276 +					PAGE_SHIFT] =
  23.277 +			FOREIGN_FRAME(map_op.dev_bus_addr >> PAGE_SHIFT);
  23.278 +
  23.279 +		tocopy = size - offset;
  23.280 +		if (tocopy > PAGE_SIZE) {
  23.281 +			tocopy = PAGE_SIZE;
  23.282 +		}
  23.283 +		if (userbuffer) {
  23.284 +			if (copy_from_user((void *)(MMAP_VADDR(tpmif,i) |
  23.285 +			                           (tx->addr & ~PAGE_MASK)),
  23.286 +			                   (void __user *)&data[offset],
  23.287 +			                   tocopy)) {
  23.288 +				tpmif_put(tpmif);
  23.289 +				return -EFAULT;
  23.290 +			}
  23.291 +		} else {
  23.292 +			memcpy((void *)(MMAP_VADDR(tpmif,i) |
  23.293 +					(tx->addr & ~PAGE_MASK)),
  23.294 +			       &data[offset], tocopy);
  23.295 +		}
  23.296 +		tx->size = tocopy;
  23.297 +
  23.298 +		unmap_op.host_addr    = MMAP_VADDR(tpmif, i);
  23.299 +		unmap_op.handle       = handle;
  23.300 +		unmap_op.dev_bus_addr = 0;
  23.301 +
  23.302 +		if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
  23.303 +		                                      &unmap_op,
  23.304 +		                                      1))) {
  23.305 +			BUG();
  23.306 +		}
  23.307 +
  23.308 +		offset += tocopy;
  23.309 +		i++;
  23.310 +	}
  23.311 +
  23.312 +	rc = offset;
  23.313 +	DPRINTK("Notifying frontend via event channel %d\n",
  23.314 +	        tpmif->evtchn);
  23.315 +	notify_via_evtchn(tpmif->evtchn);
  23.316 +
  23.317 +	return rc;
  23.318 +}
  23.319 +
  23.320 +/*
  23.321 + * Read data from the shared memory and copy it directly into the
  23.322 + * provided buffer. Advance the read_last indicator which tells
  23.323 + * how many bytes have already been read.
  23.324 + */
  23.325 +static int
  23.326 +packet_read(struct packet *pak, size_t numbytes,
  23.327 +            char *buffer, size_t buffersize,
  23.328 +            int userbuffer)
  23.329 +{
  23.330 +	tpmif_t *tpmif = pak->tpmif;
  23.331 +	/*
  23.332 +	 * I am supposed to read 'numbytes' of data from the
  23.333 +	 * buffer.
  23.334 +	 * The first 4 bytes that are read are the instance number in
  23.335 +	 * network byte order, after that comes the data from the
  23.336 +	 * shared memory buffer.
  23.337 +	 */
  23.338 +	u32 to_copy;
  23.339 +	u32 offset = 0;
  23.340 +	u32 room_left = buffersize;
  23.341 +	/*
  23.342 +	 * Ensure that we see the request when we copy it.
  23.343 +	 */
  23.344 +	mb();
  23.345 +
  23.346 +	if (pak->last_read < 4) {
  23.347 +		/*
  23.348 +		 * copy the instance number into the buffer
  23.349 +		 */
  23.350 +		u32 instance_no = htonl(pak->tpm_instance);
  23.351 +		u32 last_read = pak->last_read;
  23.352 +		to_copy = MIN(4 - last_read, numbytes);
  23.353 +
  23.354 +		if (userbuffer) {
  23.355 +			if (copy_to_user(&buffer[0],
  23.356 +			                 &(((u8 *)&instance_no)[last_read]),
  23.357 +			                 to_copy)) {
  23.358 +				return -EFAULT;
  23.359 +			}
  23.360 +		} else {
  23.361 +			memcpy(&buffer[0],
  23.362 +			       &(((u8 *)&instance_no)[last_read]),
  23.363 +			       to_copy);
  23.364 +		}
  23.365 +
  23.366 +		pak->last_read += to_copy;
  23.367 +		offset += to_copy;
  23.368 +		room_left -= to_copy;
  23.369 +	}
  23.370 +
  23.371 +	/*
  23.372 +	 * If the packet has a data buffer appended, read from it...
  23.373 +	 */
  23.374 +
  23.375 +	if (room_left > 0) {
  23.376 +		if (pak->data_buffer) {
  23.377 +			u32 to_copy = MIN(pak->data_len - offset, room_left);
  23.378 +			u32 last_read = pak->last_read - 4;
  23.379 +			if (userbuffer) {
  23.380 +				if (copy_to_user(&buffer[offset],
  23.381 +				                 &pak->data_buffer[last_read],
  23.382 +				                 to_copy)) {
  23.383 +					return -EFAULT;
  23.384 +				}
  23.385 +			} else {
  23.386 +				memcpy(&buffer[offset],
  23.387 +				       &pak->data_buffer[last_read],
  23.388 +				       to_copy);
  23.389 +			}
  23.390 +			pak->last_read += to_copy;
  23.391 +			offset += to_copy;
  23.392 +		} else {
  23.393 +			offset = packet_read_shmem(pak,
  23.394 +			                           tpmif,
  23.395 +			                           offset,
  23.396 +			                           buffer,
  23.397 +			                           userbuffer,
  23.398 +			                           room_left);
  23.399 +		}
  23.400 +	}
  23.401 +	return offset;
  23.402 +}
  23.403 +
  23.404 +
  23.405 +static int
  23.406 +packet_read_shmem(struct packet *pak,
  23.407 +                  tpmif_t *tpmif,
  23.408 +                  u32 offset,
  23.409 +                  char *buffer,
  23.410 +                  int isuserbuffer,
  23.411 +                  u32 room_left) {
  23.412 +	u32 last_read = pak->last_read - 4;
  23.413 +	u32 i = (last_read / PAGE_SIZE);
  23.414 +	u32 pg_offset = last_read & (PAGE_SIZE - 1);
  23.415 +	u32 to_copy;
  23.416 +	u16 handle;
  23.417 +
  23.418 +	tpmif_tx_request_t *tx;
  23.419 +	tx = &tpmif->tx->ring[0].req;
  23.420 +	/*
  23.421 +	 * Start copying data at the page with index 'index'
  23.422 +	 * and within that page at offset 'offset'.
  23.423 +	 * Copy a maximum of 'room_left' bytes.
  23.424 +	 */
  23.425 +	to_copy = MIN(PAGE_SIZE - pg_offset, room_left);
  23.426 +	while (to_copy > 0) {
  23.427 +		void *src;
  23.428 +		struct gnttab_map_grant_ref map_op;
  23.429 +		struct gnttab_unmap_grant_ref unmap_op;
  23.430 +
  23.431 +		tx = &tpmif->tx->ring[i].req;
  23.432 +
  23.433 +		map_op.host_addr = MMAP_VADDR(tpmif, i);
  23.434 +		map_op.flags     = GNTMAP_host_map;
  23.435 +		map_op.ref       = tx->ref;
  23.436 +		map_op.dom       = tpmif->domid;
  23.437 +
  23.438 +		if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
  23.439 +		                                      &map_op,
  23.440 +		                                      1))) {
  23.441 +			BUG();
  23.442 +		}
  23.443 +
  23.444 +		if (map_op.handle < 0) {
  23.445 +			DPRINTK(" Grant table operation failure !\n");
  23.446 +			return -EFAULT;
  23.447 +		}
  23.448 +
  23.449 +		handle = map_op.handle;
  23.450 +
  23.451 +		if (to_copy > tx->size) {
  23.452 +			/*
  23.453 +			 * This is the case when the user wants to read more
  23.454 +			 * than what we have. So we just give him what we
  23.455 +			 * have.
  23.456 +			 */
  23.457 +			to_copy = MIN(tx->size, to_copy);
  23.458 +		}
  23.459 +
  23.460 +		DPRINTK("Copying from mapped memory at %08lx\n",
  23.461 +		        (unsigned long)(MMAP_VADDR(tpmif,i) |
  23.462 +			(tx->addr & ~PAGE_MASK)));
  23.463 +
  23.464 +		src = (void *)(MMAP_VADDR(tpmif,i) | ((tx->addr & ~PAGE_MASK) + pg_offset));
  23.465 +		if (isuserbuffer) {
  23.466 +			if (copy_to_user(&buffer[offset],
  23.467 +			                 src,
  23.468 +			                 to_copy)) {
  23.469 +				return -EFAULT;
  23.470 +			}
  23.471 +		} else {
  23.472 +			memcpy(&buffer[offset],
  23.473 +			       src,
  23.474 +			       to_copy);
  23.475 +		}
  23.476 +
  23.477 +
  23.478 +		DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
  23.479 +		        tpmif->domid, buffer[offset], buffer[offset+1],buffer[offset+2],buffer[offset+3]);
  23.480 +
  23.481 +		unmap_op.host_addr    = MMAP_VADDR(tpmif, i);
  23.482 +		unmap_op.handle       = handle;
  23.483 +		unmap_op.dev_bus_addr = 0;
  23.484 +
  23.485 +		if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
  23.486 +		                                      &unmap_op,
  23.487 +		                                      1))) {
  23.488 +			BUG();
  23.489 +		}
  23.490 +
  23.491 +		offset += to_copy;
  23.492 +		pg_offset = 0;
  23.493 +		last_read += to_copy;
  23.494 +		room_left -= to_copy;
  23.495 +
  23.496 +		to_copy = MIN(PAGE_SIZE, room_left);
  23.497 +		i++;
  23.498 +	} /* while (to_copy > 0) */
  23.499 +	/*
  23.500 +	 * Adjust the last_read pointer
  23.501 +	 */
  23.502 +	pak->last_read = last_read + 4;
  23.503 +	return offset;
  23.504 +}
  23.505 +
  23.506 +
  23.507 +/* ============================================================
  23.508 + * The file layer for reading data from this device
  23.509 + * ============================================================
  23.510 + */
  23.511 +static int
  23.512 +vtpm_op_open(struct inode *inode, struct file *f)
  23.513 +{
  23.514 +	int rc = 0;
  23.515 +	unsigned long flags;
  23.516 +
  23.517 +	write_lock_irqsave(&dataex.pak_lock, flags);
  23.518 +	if (dataex.has_opener == 0) {
  23.519 +		dataex.has_opener = 1;
  23.520 +	} else {
  23.521 +		rc = -EPERM;
  23.522 +	}
  23.523 +	write_unlock_irqrestore(&dataex.pak_lock, flags);
  23.524 +	return rc;
  23.525 +}
  23.526 +
  23.527 +static ssize_t
  23.528 +vtpm_op_read(struct file *file,
  23.529 +	     char __user * data, size_t size, loff_t * offset)
  23.530 +{
  23.531 +	int ret_size = -ENODATA;
  23.532 +	struct packet *pak = NULL;
  23.533 +	unsigned long flags;
  23.534 +
  23.535 +	write_lock_irqsave(&dataex.pak_lock, flags);
  23.536 +
  23.537 +	if (list_empty(&dataex.pending_pak)) {
  23.538 +		write_unlock_irqrestore(&dataex.pak_lock, flags);
  23.539 +		wait_event_interruptible(dataex.wait_queue,
  23.540 +		                         !list_empty(&dataex.pending_pak));
  23.541 +		write_lock_irqsave(&dataex.pak_lock, flags);
  23.542 +	}
  23.543 +
  23.544 +	if (!list_empty(&dataex.pending_pak)) {
  23.545 +		unsigned int left;
  23.546 +		pak = list_entry(dataex.pending_pak.next, struct packet, next);
  23.547 +
  23.548 +		left = pak->data_len - dataex.copied_so_far;
  23.549 +
  23.550 +		DPRINTK("size given by app: %d, available: %d\n", size, left);
  23.551 +
  23.552 +		ret_size = MIN(size,left);
  23.553 +
  23.554 +		ret_size = packet_read(pak, ret_size, data, size, 1);
  23.555 +		if (ret_size < 0) {
  23.556 +			ret_size = -EFAULT;
  23.557 +		} else {
  23.558 +			DPRINTK("Copied %d bytes to user buffer\n", ret_size);
  23.559 +
  23.560 +			dataex.copied_so_far += ret_size;
  23.561 +			if (dataex.copied_so_far >= pak->data_len + 4) {
  23.562 +				DPRINTK("All data from this packet given to app.\n");
  23.563 +				/* All data given to app */
  23.564 +
  23.565 +				del_singleshot_timer_sync(&pak->processing_timer);
  23.566 +				list_del(&pak->next);
  23.567 +				list_add_tail(&pak->next, &dataex.current_pak);
  23.568 +				/*
  23.569 +				 * The more fontends that are handled at the same time,
  23.570 +				 * the more time we give the TPM to process the request.
  23.571 +				 */
  23.572 +				mod_timer(&pak->processing_timer,
  23.573 +				          jiffies + (num_frontends * 10 * HZ));
  23.574 +				dataex.copied_so_far = 0;
  23.575 +			}
  23.576 +		}
  23.577 +	}
  23.578 +	write_unlock_irqrestore(&dataex.pak_lock, flags);
  23.579 +
  23.580 +	DPRINTK("Returning result from read to app: %d\n", ret_size);
  23.581 +
  23.582 +	return ret_size;
  23.583 +}
  23.584 +
  23.585 +/*
  23.586 + * Write operation - only works after a previous read operation!
  23.587 + */
  23.588 +static ssize_t
  23.589 +vtpm_op_write(struct file *file, const char __user * data, size_t size,
  23.590 +	      loff_t * offset)
  23.591 +{
  23.592 +	struct packet *pak;
  23.593 +	int rc = 0;
  23.594 +	unsigned int off = 4;
  23.595 +	unsigned long flags;
  23.596 +	u32 instance_no = 0;
  23.597 +	u32 len_no = 0;
  23.598 +
  23.599 +	/*
  23.600 +	 * Minimum required packet size is:
  23.601 +	 * 4 bytes for instance number
  23.602 +	 * 2 bytes for tag
  23.603 +	 * 4 bytes for paramSize
  23.604 +	 * 4 bytes for the ordinal
  23.605 +	 * sum: 14 bytes
  23.606 +	 */
  23.607 +	if ( size < off + 10 ) {
  23.608 +		return -EFAULT;
  23.609 +	}
  23.610 +
  23.611 +	if (copy_from_user(&instance_no,
  23.612 +	                   (void __user *)&data[0],
  23.613 +	                   4)) {
  23.614 +		return -EFAULT;
  23.615 +	}
  23.616 +
  23.617 +	if (copy_from_user(&len_no,
  23.618 +	                   (void __user *)&data[off+2],
  23.619 +	                   4) ||
  23.620 +	    (off + ntohl(len_no) != size)) {
  23.621 +		return -EFAULT;
  23.622 +	}
  23.623 +
  23.624 +	write_lock_irqsave(&dataex.pak_lock, flags);
  23.625 +	pak = packet_find_instance(&dataex.current_pak, ntohl(instance_no));
  23.626 +
  23.627 +	if (pak == NULL) {
  23.628 +		write_unlock_irqrestore(&dataex.pak_lock, flags);
  23.629 +		printk(KERN_ALERT "No associated packet!\n");
  23.630 +		return -EFAULT;
  23.631 +	} else {
  23.632 +		del_singleshot_timer_sync(&pak->processing_timer);
  23.633 +		list_del(&pak->next);
  23.634 +	}
  23.635 +
  23.636 +	write_unlock_irqrestore(&dataex.pak_lock, flags);
  23.637 +
  23.638 +	/*
  23.639 +	 * The first 'offset' bytes must be the instance number.
  23.640 +	 * I will just pull that from the packet.
  23.641 +	 */
  23.642 +	size -= off;
  23.643 +	data = &data[off];
  23.644 +
  23.645 +	rc = packet_write(pak, data, size, 1);
  23.646 +
  23.647 +	if (rc > 0) {
  23.648 +		/* I neglected the first 4 bytes */
  23.649 +		rc += off;
  23.650 +	}
  23.651 +	packet_free(pak);
  23.652 +	return rc;
  23.653 +}
  23.654 +
  23.655 +static int
  23.656 +vtpm_op_release(struct inode *inode, struct file *file)
  23.657 +{
  23.658 +	unsigned long flags;
  23.659 +	vtpm_release_packets(NULL, 1);
  23.660 +	write_lock_irqsave(&dataex.pak_lock, flags);
  23.661 +	dataex.has_opener = 0;
  23.662 +	write_unlock_irqrestore(&dataex.pak_lock, flags);
  23.663 +	return 0;
  23.664 +}
  23.665 +
  23.666 +static unsigned int
  23.667 +vtpm_op_poll(struct file *file, struct poll_table_struct *pst)
  23.668 +{
  23.669 +	return 0;
  23.670 +}
  23.671 +
  23.672 +static struct file_operations vtpm_ops = {
  23.673 +	.owner = THIS_MODULE,
  23.674 +	.llseek = no_llseek,
  23.675 +	.open = vtpm_op_open,
  23.676 +	.read = vtpm_op_read,
  23.677 +	.write = vtpm_op_write,
  23.678 +	.release = vtpm_op_release,
  23.679 +	.poll = vtpm_op_poll,
  23.680 +};
  23.681 +
  23.682 +static struct miscdevice ibmvtpms_miscdevice = {
  23.683 +	.minor = 225,
  23.684 +	.name = "vtpm",
  23.685 +	.fops = &vtpm_ops,
  23.686 +};
  23.687 +
  23.688 +
  23.689 +/***************************************************************
  23.690 + Virtual TPM functions and data stuctures
  23.691 +***************************************************************/
  23.692 +
  23.693 +static u8 create_cmd[] = {
  23.694 +        1,193,		/* 0: TPM_TAG_RQU_COMMAMD */
  23.695 +        0,0,0,19,	/* 2: length */
  23.696 +        0,0,0,0x1,	/* 6: VTPM_ORD_OPEN */
  23.697 +        0,		/* 10: VTPM type */
  23.698 +        0,0,0,0,	/* 11: domain id */
  23.699 +        0,0,0,0		/* 15: instance id */
  23.700 +};
  23.701 +
  23.702 +static u8 destroy_cmd[] = {
  23.703 +        1,193,		/* 0: TPM_TAG_RQU_COMMAMD */
  23.704 +        0,0,0,14,	/* 2: length */
  23.705 +        0,0,0,0x2,	/* 6: VTPM_ORD_CLOSE */
  23.706 +        0,0,0,0		/* 10: instance id */
  23.707 +};
  23.708 +
  23.709 +int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domid, u32 instance)
  23.710 +{
  23.711 +	int rc = 0;
  23.712 +	struct packet *pak = packet_alloc(tpmif, sizeof(create_cmd), create_cmd[0],
  23.713 +	    PACKET_FLAG_DISCARD_RESPONSE|
  23.714 +	    PACKET_FLAG_SEND_CONTROLMESSAGE);
  23.715 +	if (pak) {
  23.716 +		u8 buf[sizeof(create_cmd)];
  23.717 +		u32 domid_no = htonl((u32)domid);
  23.718 +		u32 instance_no = htonl(instance);
  23.719 +		memcpy(buf, create_cmd, sizeof(create_cmd));
  23.720 +
  23.721 +		memcpy(&buf[11], &domid_no, sizeof(u32));
  23.722 +		memcpy(&buf[15], &instance_no, sizeof(u32));
  23.723 +
  23.724 +		/* copy the buffer into the packet */
  23.725 +		rc = packet_set(pak, buf, sizeof(buf));
  23.726 +
  23.727 +		if (rc == 0) {
  23.728 +			pak->tpm_instance = 0;
  23.729 +			rc = vtpm_queue_packet(pak);
  23.730 +		}
  23.731 +		if (rc < 0) {
  23.732 +			/* could not be queued or built */
  23.733 +			packet_free(pak);
  23.734 +		}
  23.735 +	} else {
  23.736 +		rc = -ENOMEM;
  23.737 +	}
  23.738 +	return rc;
  23.739 +}
  23.740 +
  23.741 +int tpmif_vtpm_close(u32 instid)
  23.742 +{
  23.743 +	int rc = 0;
  23.744 +	struct packet *pak;
  23.745 +
  23.746 +	pak = packet_alloc(NULL,
  23.747 +	                   sizeof(create_cmd),
  23.748 +	                   create_cmd[0],
  23.749 +	                   PACKET_FLAG_DISCARD_RESPONSE|
  23.750 +	                   PACKET_FLAG_SEND_CONTROLMESSAGE);
  23.751 +	if (pak) {
  23.752 +		u8 buf[sizeof(destroy_cmd)];
  23.753 +		u32 instid_no = htonl(instid);
  23.754 +		memcpy(buf, destroy_cmd, sizeof(destroy_cmd));
  23.755 +		memcpy(&buf[10], &instid_no, sizeof(u32));
  23.756 +
  23.757 +		/* copy the buffer into the packet */
  23.758 +		rc = packet_set(pak, buf, sizeof(buf));
  23.759 +
  23.760 +		if (rc == 0) {
  23.761 +			pak->tpm_instance = 0;
  23.762 +			rc = vtpm_queue_packet(pak);
  23.763 +		}
  23.764 +		if (rc < 0) {
  23.765 +			/* could not be queued or built */
  23.766 +			packet_free(pak);
  23.767 +		}
  23.768 +	} else {
  23.769 +		rc = -ENOMEM;
  23.770 +	}
  23.771 +	return rc;
  23.772 +}
  23.773 +
  23.774 +
  23.775 +/***************************************************************
  23.776 + Utility functions
  23.777 +***************************************************************/
  23.778 +
  23.779 +static int
  23.780 +tpm_send_fail_message(struct packet *pak, u8 req_tag)
  23.781 +{
  23.782 +	int rc;
  23.783 +	static const unsigned char tpm_error_message_fail[] = {
  23.784 +		0x00, 0x00,
  23.785 +		0x00, 0x00, 0x00, 0x0a,
  23.786 +		0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
  23.787 +	};
  23.788 +	unsigned char buffer[sizeof(tpm_error_message_fail)];
  23.789 +
  23.790 +	memcpy(buffer, tpm_error_message_fail, sizeof(tpm_error_message_fail));
  23.791 +	/*
  23.792 +	 * Insert the right response tag depending on the given tag
  23.793 +	 * All response tags are '+3' to the request tag.
  23.794 +	 */
  23.795 +	buffer[1] = req_tag + 3;
  23.796 +
  23.797 +	/*
  23.798 +	 * Write the data to shared memory and notify the front-end
  23.799 +	 */
  23.800 +	rc = packet_write(pak, buffer, sizeof(buffer), 0);
  23.801 +
  23.802 +	return rc;
  23.803 +}
  23.804 +
  23.805 +
  23.806 +static void
  23.807 +_vtpm_release_packets(struct list_head *head, tpmif_t *tpmif,
  23.808 +                      int send_msgs)
  23.809 +{
  23.810 +	struct packet *pak;
  23.811 +	struct list_head *pos, *tmp;
  23.812 +
  23.813 +	list_for_each_safe(pos, tmp, head) {
  23.814 +		pak = list_entry(pos, struct packet, next);
  23.815 +		if (tpmif == NULL || pak->tpmif == tpmif) {
  23.816 +			int can_send = 0;
  23.817 +			del_singleshot_timer_sync(&pak->processing_timer);
  23.818 +			list_del(&pak->next);
  23.819 +
  23.820 +			if (pak->tpmif && pak->tpmif->status == CONNECTED) {
  23.821 +				can_send = 1;
  23.822 +			}
  23.823 +
  23.824 +			if (send_msgs && can_send) {
  23.825 +				tpm_send_fail_message(pak, pak->req_tag);
  23.826 +			}
  23.827 +			packet_free(pak);
  23.828 +		}
  23.829 +	}
  23.830 +}
  23.831 +
  23.832 +
  23.833 +int
  23.834 +vtpm_release_packets(tpmif_t *tpmif, int send_msgs)
  23.835 +{
  23.836 +	unsigned long flags;
  23.837 +
  23.838 +	write_lock_irqsave(&dataex.pak_lock, flags);
  23.839 +
  23.840 +	_vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs);
  23.841 +	_vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
  23.842 +
  23.843 +	write_unlock_irqrestore(&dataex.pak_lock,
  23.844 +	                        flags);
  23.845 +	return 0;
  23.846 +}
  23.847 +
  23.848 +
  23.849 +static int vtpm_queue_packet(struct packet *pak)
  23.850 +{
  23.851 +	int rc = 0;
  23.852 +	if (dataex.has_opener) {
  23.853 +		unsigned long flags;
  23.854 +		write_lock_irqsave(&dataex.pak_lock, flags);
  23.855 +		list_add_tail(&pak->next, &dataex.pending_pak);
  23.856 +		/* give the TPM some time to pick up the request */
  23.857 +		mod_timer(&pak->processing_timer, jiffies + (10 * HZ));
  23.858 +		write_unlock_irqrestore(&dataex.pak_lock,
  23.859 +		                        flags);
  23.860 +
  23.861 +		wake_up_interruptible(&dataex.wait_queue);
  23.862 +	} else {
  23.863 +		rc = -EFAULT;
  23.864 +	}
  23.865 +	return rc;
  23.866 +}
  23.867 +
  23.868 +
  23.869 +static int vtpm_receive(tpmif_t *tpmif, u32 size)
  23.870 +{
  23.871 +	int rc = 0;
  23.872 +	unsigned char buffer[10];
  23.873 +	__be32 *native_size;
  23.874 +
  23.875 +	struct packet *pak = packet_alloc(tpmif, size, buffer[4], 0);
  23.876 +	if (NULL == pak) {
  23.877 +		return -ENOMEM;
  23.878 +	}
  23.879 +	/*
  23.880 +	 * Read 10 bytes from the received buffer to test its
  23.881 +	 * content for validity.
  23.882 +	 */
  23.883 +	if (sizeof(buffer) != packet_read(pak,
  23.884 +	                                  sizeof(buffer), buffer,
  23.885 +	                                  sizeof(buffer), 0)) {
  23.886 +		goto failexit;
  23.887 +	}
  23.888 +	/*
  23.889 +	 * Reset the packet read pointer so we can read all its
  23.890 +	 * contents again.
  23.891 +	 */
  23.892 +	packet_reset(pak);
  23.893 +
  23.894 +	native_size = (__force __be32 *)(&buffer[4+2]);
  23.895 +	/*
  23.896 +	 * Verify that the size of the packet is correct
  23.897 +	 * as indicated and that there's actually someone reading packets.
  23.898 +	 * The minimum size of the packet is '10' for tag, size indicator
  23.899 +	 * and ordinal.
  23.900 +	 */
  23.901 +	if (size < 10 ||
  23.902 +	    be32_to_cpu(*native_size) != size ||
  23.903 +	    0 == dataex.has_opener) {
  23.904 +	    	rc = -EINVAL;
  23.905 +	    	goto failexit;
  23.906 +	} else {
  23.907 +		if ((rc = vtpm_queue_packet(pak)) < 0) {
  23.908 +			goto failexit;
  23.909 +		}
  23.910 +	}
  23.911 +	return 0;
  23.912 +
  23.913 +failexit:
  23.914 +	if (pak) {
  23.915 +		tpm_send_fail_message(pak, buffer[4+1]);
  23.916 +		packet_free(pak);
  23.917 +	}
  23.918 +	return rc;
  23.919 +}
  23.920 +
  23.921 +
  23.922 +/*
  23.923 + * Timeout function that gets invoked when a packet has not been processed
  23.924 + * during the timeout period.
  23.925 + * The packet must be on a list when this function is invoked. This
  23.926 + * also means that once its taken off a list, the timer must be
  23.927 + * destroyed as well.
  23.928 + */
  23.929 +static void processing_timeout(unsigned long ptr)
  23.930 +{
  23.931 +	struct packet *pak = (struct packet *)ptr;
  23.932 +	unsigned long flags;
  23.933 +	write_lock_irqsave(&dataex.pak_lock, flags);
  23.934 +	/*
  23.935 +	 * The packet needs to be searched whether it
  23.936 +	 * is still on the list.
  23.937 +	 */
  23.938 +	if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
  23.939 +	    pak == packet_find_packet(&dataex.current_pak, pak) ) {
  23.940 +		list_del(&pak->next);
  23.941 +		tpm_send_fail_message(pak, pak->req_tag);
  23.942 +		packet_free(pak);
  23.943 +	}
  23.944 +
  23.945 +	write_unlock_irqrestore(&dataex.pak_lock, flags);
  23.946 +}
  23.947 +
  23.948 +
  23.949 +
  23.950 +static void tpm_tx_action(unsigned long unused);
  23.951 +static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
  23.952 +
  23.953 +#define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
  23.954 +
  23.955 +static struct list_head tpm_schedule_list;
  23.956 +static spinlock_t tpm_schedule_list_lock;
  23.957 +
  23.958 +static inline void
  23.959 +maybe_schedule_tx_action(void)
  23.960 +{
  23.961 +	smp_mb();
  23.962 +	tasklet_schedule(&tpm_tx_tasklet);
  23.963 +}
  23.964 +
  23.965 +static inline int
  23.966 +__on_tpm_schedule_list(tpmif_t * tpmif)
  23.967 +{
  23.968 +	return tpmif->list.next != NULL;
  23.969 +}
  23.970 +
  23.971 +static void
  23.972 +remove_from_tpm_schedule_list(tpmif_t * tpmif)
  23.973 +{
  23.974 +	spin_lock_irq(&tpm_schedule_list_lock);
  23.975 +	if (likely(__on_tpm_schedule_list(tpmif))) {
  23.976 +		list_del(&tpmif->list);
  23.977 +		tpmif->list.next = NULL;
  23.978 +		tpmif_put(tpmif);
  23.979 +	}
  23.980 +	spin_unlock_irq(&tpm_schedule_list_lock);
  23.981 +}
  23.982 +
  23.983 +static void
  23.984 +add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
  23.985 +{
  23.986 +	if (__on_tpm_schedule_list(tpmif))
  23.987 +		return;
  23.988 +
  23.989 +	spin_lock_irq(&tpm_schedule_list_lock);
  23.990 +	if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
  23.991 +		list_add_tail(&tpmif->list, &tpm_schedule_list);
  23.992 +		tpmif_get(tpmif);
  23.993 +	}
  23.994 +	spin_unlock_irq(&tpm_schedule_list_lock);
  23.995 +}
  23.996 +
  23.997 +void
  23.998 +tpmif_schedule_work(tpmif_t * tpmif)
  23.999 +{
 23.1000 +	add_to_tpm_schedule_list_tail(tpmif);
 23.1001 +	maybe_schedule_tx_action();
 23.1002 +}
 23.1003 +
 23.1004 +void
 23.1005 +tpmif_deschedule_work(tpmif_t * tpmif)
 23.1006 +{
 23.1007 +	remove_from_tpm_schedule_list(tpmif);
 23.1008 +}
 23.1009 +
 23.1010 +
 23.1011 +static void
 23.1012 +tpm_tx_action(unsigned long unused)
 23.1013 +{
 23.1014 +	struct list_head *ent;
 23.1015 +	tpmif_t *tpmif;
 23.1016 +	tpmif_tx_request_t *tx;
 23.1017 +
 23.1018 +	DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
 23.1019 +
 23.1020 +	while (!list_empty(&tpm_schedule_list)) {
 23.1021 +		/* Get a tpmif from the list with work to do. */
 23.1022 +		ent = tpm_schedule_list.next;
 23.1023 +		tpmif = list_entry(ent, tpmif_t, list);
 23.1024 +		tpmif_get(tpmif);
 23.1025 +		remove_from_tpm_schedule_list(tpmif);
 23.1026 +		/*
 23.1027 +		 * Ensure that we see the request when we read from it.
 23.1028 +		 */
 23.1029 +		mb();
 23.1030 +
 23.1031 +		tx = &tpmif->tx->ring[0].req;
 23.1032 +
 23.1033 +		/* pass it up */
 23.1034 +		vtpm_receive(tpmif, tx->size);
 23.1035 +
 23.1036 +		tpmif_put(tpmif);
 23.1037 +	}
 23.1038 +}
 23.1039 +
 23.1040 +irqreturn_t
 23.1041 +tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 23.1042 +{
 23.1043 +	tpmif_t *tpmif = dev_id;
 23.1044 +	add_to_tpm_schedule_list_tail(tpmif);
 23.1045 +	maybe_schedule_tx_action();
 23.1046 +	return IRQ_HANDLED;
 23.1047 +}
 23.1048 +
 23.1049 +static int __init
 23.1050 +tpmback_init(void)
 23.1051 +{
 23.1052 +	int rc;
 23.1053 +	if (!(xen_start_info.flags & SIF_TPM_BE_DOMAIN) &&
 23.1054 +	    !(xen_start_info.flags & SIF_INITDOMAIN)) {
 23.1055 +	    	printk(KERN_ALERT "Neither TPM-BE Domain nor INIT domain!\n");
 23.1056 +		return 0;
 23.1057 +	}
 23.1058 +
 23.1059 +	if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) {
 23.1060 +		printk(KERN_ALERT "Could not register misc device for TPM BE.\n");
 23.1061 +		return rc;
 23.1062 +	}
 23.1063 +
 23.1064 +	INIT_LIST_HEAD(&dataex.pending_pak);
 23.1065 +	INIT_LIST_HEAD(&dataex.current_pak);
 23.1066 +	dataex.has_opener = 0;
 23.1067 +	rwlock_init(&dataex.pak_lock);
 23.1068 +	init_waitqueue_head(&dataex.wait_queue);
 23.1069 +
 23.1070 +	spin_lock_init(&tpm_schedule_list_lock);
 23.1071 +	INIT_LIST_HEAD(&tpm_schedule_list);
 23.1072 +
 23.1073 +	tpmif_interface_init();
 23.1074 +	tpmif_xenbus_init();
 23.1075 +
 23.1076 +	printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
 23.1077 +
 23.1078 +	return 0;
 23.1079 +}
 23.1080 +
 23.1081 +__initcall(tpmback_init);
    24.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Tue Aug 30 11:48:08 2005 -0800
    24.3 @@ -0,0 +1,271 @@
    24.4 +/*  Xenbus code for tpmif backend
    24.5 +    Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
    24.6 +
    24.7 +    This program is free software; you can redistribute it and/or modify
    24.8 +    it under the terms of the GNU General Public License as published by
    24.9 +    the Free Software Foundation; either version 2 of the License, or
   24.10 +    (at your option) any later version.
   24.11 +
   24.12 +    This program is distributed in the hope that it will be useful,
   24.13 +    but WITHOUT ANY WARRANTY; without even the implied warranty of
   24.14 +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   24.15 +    GNU General Public License for more details.
   24.16 +
   24.17 +    You should have received a copy of the GNU General Public License
   24.18 +    along with this program; if not, write to the Free Software
   24.19 +    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   24.20 +*/
   24.21 +#include <stdarg.h>
   24.22 +#include <linux/module.h>
   24.23 +#include <asm-xen/xenbus.h>
   24.24 +#include "common.h"
   24.25 +
   24.26 +struct backend_info
   24.27 +{
   24.28 +	struct xenbus_device *dev;
   24.29 +
   24.30 +	/* our communications channel */
   24.31 +	tpmif_t *tpmif;
   24.32 +
   24.33 +	long int frontend_id;
   24.34 +	long int instance; // instance of TPM
   24.35 +
   24.36 +	/* watch front end for changes */
   24.37 +	struct xenbus_watch backend_watch;
   24.38 +
   24.39 +	struct xenbus_watch watch;
   24.40 +	char * frontpath;
   24.41 +};
   24.42 +
   24.43 +static int tpmback_remove(struct xenbus_device *dev)
   24.44 +{
   24.45 +	struct backend_info *be = dev->data;
   24.46 +
   24.47 +	if (be->watch.node) {
   24.48 +		unregister_xenbus_watch(&be->watch);
   24.49 +	}
   24.50 +	unregister_xenbus_watch(&be->backend_watch);
   24.51 +
   24.52 +	tpmif_vtpm_close(be->instance);
   24.53 +
   24.54 +	if (be->tpmif) {
   24.55 +		tpmif_put(be->tpmif);
   24.56 +	}
   24.57 +
   24.58 +	if (be->frontpath)
   24.59 +		kfree(be->frontpath);
   24.60 +	kfree(be);
   24.61 +	return 0;
   24.62 +}
   24.63 +
   24.64 +
   24.65 +static void frontend_changed(struct xenbus_watch *watch, const char *node)
   24.66 +{
   24.67 +	unsigned long ringref;
   24.68 +	unsigned int evtchn;
   24.69 +	unsigned long ready = 1;
   24.70 +	int err;
   24.71 +	struct backend_info *be
   24.72 +		= container_of(watch, struct backend_info, watch);
   24.73 +
   24.74 +	/* If other end is gone, delete ourself. */
   24.75 +	if (node && !xenbus_exists(be->frontpath, "")) {
   24.76 +		xenbus_rm(be->dev->nodename, "");
   24.77 +		device_unregister(&be->dev->dev);
   24.78 +		return;
   24.79 +	}
   24.80 +
   24.81 +	if (be->tpmif == NULL || be->tpmif->status == CONNECTED)
   24.82 +		return;
   24.83 +
   24.84 +	err = xenbus_gather(be->frontpath,
   24.85 +	                    "ring-ref", "%lu", &ringref,
   24.86 +			    "event-channel", "%u", &evtchn, NULL);
   24.87 +	if (err) {
   24.88 +		xenbus_dev_error(be->dev, err,
   24.89 +				 "reading %s/grant-id and event-channel",
   24.90 +				 be->frontpath);
   24.91 +		return;
   24.92 +	}
   24.93 +
   24.94 +
   24.95 +	/*
   24.96 +	 * Tell the front-end that we are ready to go -
   24.97 +	 * unless something bad happens
   24.98 +	 */
   24.99 +	err = xenbus_transaction_start(be->dev->nodename);
  24.100 +	if (err) {
  24.101 +		xenbus_dev_error(be->dev, err, "starting transaction");
  24.102 +		return;
  24.103 +	}
  24.104 +
  24.105 +	err = xenbus_printf(be->dev->nodename,
  24.106 +	                    "ready", "%lu", ready);
  24.107 +	if (err) {
  24.108 +		xenbus_dev_error(be->dev, err, "writing 'ready'");
  24.109 +		goto abort;
  24.110 +	}
  24.111 +
  24.112 +	err = tpmif_map(be->tpmif, ringref, evtchn);
  24.113 +	if (err) {
  24.114 +		xenbus_dev_error(be->dev, err,
  24.115 +				 "mapping shared-frame %lu port %u",
  24.116 +				 ringref, evtchn);
  24.117 +		goto abort;
  24.118 +	}
  24.119 +
  24.120 +	err = tpmif_vtpm_open(be->tpmif,
  24.121 +	                      be->frontend_id,
  24.122 +	                      be->instance);
  24.123 +	if (err) {
  24.124 +		xenbus_dev_error(be->dev, err,
  24.125 +		                 "queueing vtpm open packet");
  24.126 +		/*
  24.127 +		 * Should close down this device and notify FE
  24.128 +		 * about closure.
  24.129 +		 */
  24.130 +		goto abort;
  24.131 +	}
  24.132 +
  24.133 +	xenbus_transaction_end(0);
  24.134 +	xenbus_dev_ok(be->dev);
  24.135 +	return;
  24.136 +abort:
  24.137 +	xenbus_transaction_end(1);
  24.138 +}
  24.139 +
  24.140 +
  24.141 +static void backend_changed(struct xenbus_watch *watch, const char *node)
  24.142 +{
  24.143 +	int err;
  24.144 +	long int instance;
  24.145 +	struct backend_info *be
  24.146 +		= container_of(watch, struct backend_info, backend_watch);
  24.147 +	struct xenbus_device *dev = be->dev;
  24.148 +
  24.149 +	err = xenbus_scanf(dev->nodename, "instance", "%li", &instance);
  24.150 +	if (XENBUS_EXIST_ERR(err))
  24.151 +		return;
  24.152 +	if (err < 0) {
  24.153 +		xenbus_dev_error(dev, err, "reading 'instance' variable");
  24.154 +		return;
  24.155 +	}
  24.156 +
  24.157 +	if (be->instance != -1 && be->instance != instance) {
  24.158 +		printk(KERN_WARNING
  24.159 +		       "cannot change the instance\n");
  24.160 +		return;
  24.161 +	}
  24.162 +	be->instance = instance;
  24.163 +
  24.164 +	if (be->tpmif == NULL) {
  24.165 +		be->tpmif = tpmif_find(be->frontend_id,
  24.166 +		                       instance);
  24.167 +		if (IS_ERR(be->tpmif)) {
  24.168 +			err = PTR_ERR(be->tpmif);
  24.169 +			be->tpmif = NULL;
  24.170 +			xenbus_dev_error(dev, err, "creating interface");
  24.171 +			return;
  24.172 +		}
  24.173 +
  24.174 +		/* Pass in NULL node to skip exist test. */
  24.175 +		frontend_changed(&be->watch, be->frontpath);
  24.176 +	}
  24.177 +}
  24.178 +
  24.179 +
  24.180 +static int tpmback_probe(struct xenbus_device *dev,
  24.181 +			 const struct xenbus_device_id *id)
  24.182 +{
  24.183 +	struct backend_info *be;
  24.184 +	char *frontend;
  24.185 +	int err;
  24.186 +
  24.187 +	be = kmalloc(sizeof(*be), GFP_KERNEL);
  24.188 +	if (!be) {
  24.189 +		xenbus_dev_error(dev, -ENOMEM, "allocating backend structure");
  24.190 +		err = -ENOMEM;
  24.191 +	}
  24.192 +
  24.193 +	memset(be, 0, sizeof(*be));
  24.194 +
  24.195 +	frontend = NULL;
  24.196 +	err = xenbus_gather(dev->nodename,
  24.197 +			    "frontend-id", "%li", &be->frontend_id,
  24.198 +			    "frontend", NULL, &frontend,
  24.199 +			    NULL);
  24.200 +	if (XENBUS_EXIST_ERR(err))
  24.201 +		goto free_be;
  24.202 +	if (err < 0) {
  24.203 +		xenbus_dev_error(dev, err,
  24.204 +				 "reading %s/frontend or frontend-id",
  24.205 +				 dev->nodename);
  24.206 +		goto free_be;
  24.207 +	}
  24.208 +	if (strlen(frontend) == 0 || !xenbus_exists(frontend, "")) {
  24.209 +		/* If we can't get a frontend path and a frontend-id,
  24.210 +		 * then our bus-id is no longer valid and we need to
  24.211 +		 * destroy the backend device.
  24.212 +		 */
  24.213 +		err = -ENOENT;
  24.214 +		goto free_be;
  24.215 +	}
  24.216 +
  24.217 +	be->dev = dev;
  24.218 +	be->backend_watch.node     = dev->nodename;
  24.219 +	be->backend_watch.callback = backend_changed;
  24.220 +	be->instance = -1;
  24.221 +	err = register_xenbus_watch(&be->backend_watch);
  24.222 +	if (err) {
  24.223 +		be->backend_watch.node = NULL;
  24.224 +		xenbus_dev_error(dev, err, "adding backend watch on %s",
  24.225 +				 dev->nodename);
  24.226 +		goto free_be;
  24.227 +	}
  24.228 +
  24.229 +	be->frontpath = frontend;
  24.230 +	be->watch.node = be->frontpath;
  24.231 +	be->watch.callback = frontend_changed;
  24.232 +	err = register_xenbus_watch(&be->watch);
  24.233 +	if (err) {
  24.234 +		be->watch.node = NULL;
  24.235 +		xenbus_dev_error(dev, err,
  24.236 +				 "adding frontend watch on %s",
  24.237 +				 be->frontpath);
  24.238 +		goto free_be;
  24.239 +	}
  24.240 +
  24.241 +	dev->data = be;
  24.242 +
  24.243 +	backend_changed(&be->backend_watch, dev->nodename);
  24.244 +	return err;
  24.245 +
  24.246 +free_be:
  24.247 +	if (be->backend_watch.node)
  24.248 +		unregister_xenbus_watch(&be->backend_watch);
  24.249 +	if (frontend)
  24.250 +		kfree(frontend);
  24.251 +	kfree(be);
  24.252 +	return err;
  24.253 +}
  24.254 +
  24.255 +
  24.256 +static struct xenbus_device_id tpmback_ids[] = {
  24.257 +	{ "vtpm" },
  24.258 +	{ "" }
  24.259 +};
  24.260 +
  24.261 +
  24.262 +static struct xenbus_driver tpmback = {
  24.263 +	.name = "vtpm",
  24.264 +	.owner = THIS_MODULE,
  24.265 +	.ids = tpmback_ids,
  24.266 +	.probe = tpmback_probe,
  24.267 +	.remove = tpmback_remove,
  24.268 +};
  24.269 +
  24.270 +
  24.271 +void tpmif_xenbus_init(void)
  24.272 +{
  24.273 +	xenbus_register_backend(&tpmback);
  24.274 +}
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile	Tue Aug 30 11:48:08 2005 -0800
    25.3 @@ -0,0 +1,2 @@
    25.4 +
    25.5 +obj-$(CONFIG_XEN_TPMDEV_FRONTEND)	+= tpmfront.o
    26.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Tue Aug 30 11:48:08 2005 -0800
    26.3 @@ -0,0 +1,739 @@
    26.4 +/*
    26.5 + * Copyright (c) 2005, IBM Corporation
    26.6 + *
    26.7 + * Author: Stefan Berger, stefanb@us.ibm.com
    26.8 + * Grant table support: Mahadevan Gomathisankaran
    26.9 + *
   26.10 + * This code has been derived from drivers/xen/netfront/netfront.c
   26.11 + *
   26.12 + * Copyright (c) 2002-2004, K A Fraser
   26.13 + *
   26.14 + * This file may be distributed separately from the Linux kernel, or
   26.15 + * incorporated into other software packages, subject to the following license:
   26.16 + *
   26.17 + * Permission is hereby granted, free of charge, to any person obtaining a copy
   26.18 + * of this source file (the "Software"), to deal in the Software without
   26.19 + * restriction, including without limitation the rights to use, copy, modify,
   26.20 + * merge, publish, distribute, sublicense, and/or sell copies of the Software,
   26.21 + * and to permit persons to whom the Software is furnished to do so, subject to
   26.22 + * the following conditions:
   26.23 + *
   26.24 + * The above copyright notice and this permission notice shall be included in
   26.25 + * all copies or substantial portions of the Software.
   26.26 + *
   26.27 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   26.28 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   26.29 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   26.30 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   26.31 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   26.32 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
   26.33 + * IN THE SOFTWARE.
   26.34 + */
   26.35 +
   26.36 +#include <linux/config.h>
   26.37 +#include <linux/module.h>
   26.38 +#include <linux/version.h>
   26.39 +#include <linux/kernel.h>
   26.40 +#include <linux/slab.h>
   26.41 +#include <linux/errno.h>
   26.42 +#include <linux/interrupt.h>
   26.43 +#include <linux/init.h>
   26.44 +#include <linux/tpmfe.h>
   26.45 +
   26.46 +#include <asm/semaphore.h>
   26.47 +#include <asm/io.h>
   26.48 +#include <asm-xen/evtchn.h>
   26.49 +#include <asm-xen/ctrl_if.h>
   26.50 +#include <asm-xen/xen-public/io/tpmif.h>
   26.51 +#include <asm/uaccess.h>
   26.52 +#include <asm-xen/xenbus.h>
   26.53 +#include <asm-xen/xen-public/io/domain_controller.h>
   26.54 +#include <asm-xen/xen-public/grant_table.h>
   26.55 +
   26.56 +#include "tpmfront.h"
   26.57 +
   26.58 +#undef DEBUG
   26.59 +
   26.60 +#if 1
   26.61 +#define ASSERT(_p) \
   26.62 +    if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
   26.63 +        __LINE__, __FILE__); *(int*)0=0; }
   26.64 +#else
   26.65 +#define ASSERT(_p)
   26.66 +#endif
   26.67 +
   26.68 +/* locally visible variables */
   26.69 +static grant_ref_t gref_head;
   26.70 +static struct tpm_private my_private;
   26.71 +
   26.72 +/* local function prototypes */
   26.73 +static irqreturn_t tpmif_int(int irq,
   26.74 +                             void *tpm_priv,
   26.75 +                             struct pt_regs *ptregs);
   26.76 +static void tpmif_rx_action(unsigned long unused);
   26.77 +static void tpmif_connect(u16 evtchn, domid_t domid);
   26.78 +static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
   26.79 +static int tpm_allocate_buffers(struct tpm_private *tp);
   26.80 +static void tpmif_set_connected_state(struct tpm_private *tp, int newstate);
   26.81 +static int tpm_xmit(struct tpm_private *tp,
   26.82 +                    const u8 * buf, size_t count, int userbuffer,
   26.83 +                    void *remember);
   26.84 +
   26.85 +#if DEBUG
   26.86 +#define DPRINTK(fmt, args...) \
   26.87 +    printk(KERN_ALERT "xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
   26.88 +#else
   26.89 +#define DPRINTK(fmt, args...) ((void)0)
   26.90 +#endif
   26.91 +#define IPRINTK(fmt, args...) \
   26.92 +    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
   26.93 +#define WPRINTK(fmt, args...) \
   26.94 +    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
   26.95 +
   26.96 +
   26.97 +static inline int
   26.98 +tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
   26.99 +               int isuserbuffer)
  26.100 +{
  26.101 +	int copied = len;
  26.102 +
  26.103 +	if (len > txb->size) {
  26.104 +		copied = txb->size;
  26.105 +	}
  26.106 +	if (isuserbuffer) {
  26.107 +		if (copy_from_user(txb->data,
  26.108 +		                   src,
  26.109 +		                   copied)) {
  26.110 +			return -EFAULT;
  26.111 +		}
  26.112 +	} else {
  26.113 +		memcpy(txb->data, src, copied);
  26.114 +	}
  26.115 +	txb->len = len;
  26.116 +	return copied;
  26.117 +}
  26.118 +
  26.119 +static inline struct tx_buffer *tx_buffer_alloc(void)
  26.120 +{
  26.121 +	struct tx_buffer *txb = kmalloc(sizeof (struct tx_buffer),
  26.122 +					GFP_KERNEL);
  26.123 +
  26.124 +	if (txb) {
  26.125 +		txb->len = 0;
  26.126 +		txb->size = PAGE_SIZE;
  26.127 +		txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
  26.128 +		if (txb->data == NULL) {
  26.129 +			kfree(txb);
  26.130 +			txb = NULL;
  26.131 +		}
  26.132 +	}
  26.133 +	return txb;
  26.134 +}
  26.135 +
  26.136 +
  26.137 +/**************************************************************
  26.138 +
  26.139 + The interface to let the tpm plugin register its callback
  26.140 + function and send data to another partition using this module
  26.141 +
  26.142 +**************************************************************/
  26.143 +
  26.144 +static DECLARE_MUTEX(upperlayer_lock);
  26.145 +static DECLARE_MUTEX(suspend_lock);
  26.146 +static struct tpmfe_device *upperlayer_tpmfe;
  26.147 +
  26.148 +/*
  26.149 + * Send data via this module by calling this function
  26.150 + */
  26.151 +int tpm_fe_send(const u8 * buf, size_t count, void *ptr)
  26.152 +{
  26.153 +	int sent = 0;
  26.154 +	struct tpm_private *tp = &my_private;
  26.155 +
  26.156 +	down(&suspend_lock);
  26.157 +	sent = tpm_xmit(tp, buf, count, 0, ptr);
  26.158 +	up(&suspend_lock);
  26.159 +
  26.160 +	return sent;
  26.161 +}
  26.162 +EXPORT_SYMBOL(tpm_fe_send);
  26.163 +
  26.164 +/*
  26.165 + * Register a callback for receiving data from this module
  26.166 + */
  26.167 +int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
  26.168 +{
  26.169 +	int rc = 0;
  26.170 +
  26.171 +	down(&upperlayer_lock);
  26.172 +	if (NULL == upperlayer_tpmfe) {
  26.173 +		upperlayer_tpmfe = tpmfe_dev;
  26.174 +		tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
  26.175 +	} else {
  26.176 +		rc = -EBUSY;
  26.177 +	}
  26.178 +	up(&upperlayer_lock);
  26.179 +	return rc;
  26.180 +}
  26.181 +EXPORT_SYMBOL(tpm_fe_register_receiver);
  26.182 +
  26.183 +/*
  26.184 + * Unregister the callback for receiving data from this module
  26.185 + */
  26.186 +void tpm_fe_unregister_receiver(void)
  26.187 +{
  26.188 +	down(&upperlayer_lock);
  26.189 +	upperlayer_tpmfe = NULL;
  26.190 +	up(&upperlayer_lock);
  26.191 +}
  26.192 +EXPORT_SYMBOL(tpm_fe_unregister_receiver);
  26.193 +
  26.194 +/*
  26.195 + * Call this function to send data to the upper layer's
  26.196 + * registered receiver function.
  26.197 + */
  26.198 +static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
  26.199 +                                  const void *ptr)
  26.200 +{
  26.201 +	int rc;
  26.202 +
  26.203 +	down(&upperlayer_lock);
  26.204 +
  26.205 +	if (upperlayer_tpmfe && upperlayer_tpmfe->receive) {
  26.206 +		rc = upperlayer_tpmfe->receive(buf, count, ptr);
  26.207 +	} else {
  26.208 +		rc = 0;
  26.209 +	}
  26.210 +
  26.211 +	up(&upperlayer_lock);
  26.212 +	return rc;
  26.213 +}
  26.214 +
  26.215 +/**************************************************************
  26.216 + XENBUS support code
  26.217 +**************************************************************/
  26.218 +
  26.219 +static void watch_for_status(struct xenbus_watch *watch, const char *node)
  26.220 +{
  26.221 +	struct tpmfront_info *info;
  26.222 +	int err;
  26.223 +	unsigned long ready;
  26.224 +	struct tpm_private *tp = &my_private;
  26.225 +
  26.226 +	info = container_of(watch, struct tpmfront_info, watch);
  26.227 +	node += strlen(watch->node);
  26.228 +
  26.229 +	if (tp->connected)
  26.230 +		return;
  26.231 +
  26.232 +	err = xenbus_gather(watch->node,
  26.233 +	                    "ready", "%lu", &ready,
  26.234 +	                    NULL);
  26.235 +	if (err) {
  26.236 +		xenbus_dev_error(info->dev, err, "reading 'ready' field");
  26.237 +		return;
  26.238 +	}
  26.239 +
  26.240 +	tpmif_set_connected_state(tp, 1);
  26.241 +
  26.242 +	xenbus_dev_ok(info->dev);
  26.243 +}
  26.244 +
  26.245 +
  26.246 +static int setup_tpmring(struct xenbus_device *dev,
  26.247 +                         struct tpmfront_info * info,
  26.248 +                         domid_t backend_id)
  26.249 +{
  26.250 +	tpmif_tx_interface_t *sring;
  26.251 +	struct tpm_private *tp = &my_private;
  26.252 +
  26.253 +	evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
  26.254 +	int err;
  26.255 +
  26.256 +	sring = (void *)__get_free_page(GFP_KERNEL);
  26.257 +	if (!sring) {
  26.258 +		xenbus_dev_error(dev, -ENOMEM, "allocating shared ring");
  26.259 +		return -ENOMEM;
  26.260 +	}
  26.261 +	tp->tx = sring;
  26.262 +
  26.263 +	tpm_allocate_buffers(tp);
  26.264 +
  26.265 +	info->ring_ref = gnttab_claim_grant_reference(&gref_head);
  26.266 +	ASSERT(info->ring_ref != -ENOSPC);
  26.267 +	gnttab_grant_foreign_access_ref(info->ring_ref,
  26.268 +					backend_id,
  26.269 +					(virt_to_machine(tp->tx) >> PAGE_SHIFT),
  26.270 +					0);
  26.271 +
  26.272 +	op.u.alloc_unbound.dom = backend_id;
  26.273 +	err = HYPERVISOR_event_channel_op(&op);
  26.274 +	if (err) {
  26.275 +		free_page((unsigned long)sring);
  26.276 +		tp->tx = 0;
  26.277 +		xenbus_dev_error(dev, err, "allocating event channel");
  26.278 +		return err;
  26.279 +	}
  26.280 +	tpmif_connect(op.u.alloc_unbound.port, backend_id);
  26.281 +	return 0;
  26.282 +}
  26.283 +
  26.284 +
  26.285 +static void destroy_tpmring(struct tpmfront_info *info, struct tpm_private *tp)
  26.286 +{
  26.287 +	tpmif_set_connected_state(tp,0);
  26.288 +
  26.289 +	if ( tp->tx != NULL ) {
  26.290 +		free_page((unsigned long)tp->tx);
  26.291 +		tp->tx = NULL;
  26.292 +	}
  26.293 +	unbind_evtchn_from_irqhandler(tp->evtchn, NULL);
  26.294 +	tp->evtchn = 0;
  26.295 +}
  26.296 +
  26.297 +
  26.298 +static int talk_to_backend(struct xenbus_device *dev,
  26.299 +                           struct tpmfront_info *info)
  26.300 +{
  26.301 +	char *backend;
  26.302 +	const char *message;
  26.303 +	int err;
  26.304 +	int backend_id;
  26.305 +
  26.306 +	backend = NULL;
  26.307 +	err = xenbus_gather(dev->nodename,
  26.308 +			    "backend-id", "%i", &backend_id,
  26.309 +			    "backend", NULL, &backend,
  26.310 +			    NULL);
  26.311 +	if (XENBUS_EXIST_ERR(err))
  26.312 +		goto out;
  26.313 +	if (backend && strlen(backend) == 0) {
  26.314 +		err = -ENOENT;
  26.315 +		goto out;
  26.316 +	}
  26.317 +	if (err < 0) {
  26.318 +		xenbus_dev_error(dev, err, "reading %s/backend or backend-id",
  26.319 +				 dev->nodename);
  26.320 +		goto out;
  26.321 +	}
  26.322 +
  26.323 +	info->backend_id      = backend_id;
  26.324 +	my_private.backend_id = backend_id;
  26.325 +
  26.326 +	err = setup_tpmring(dev, info, backend_id);
  26.327 +	if (err) {
  26.328 +		xenbus_dev_error(dev, err, "setting up ring");
  26.329 +		goto out;
  26.330 +	}
  26.331 +
  26.332 +	err = xenbus_transaction_start(dev->nodename);
  26.333 +	if (err) {
  26.334 +		xenbus_dev_error(dev, err, "starting transaction");
  26.335 +		goto destroy_tpmring;
  26.336 +	}
  26.337 +
  26.338 +	err = xenbus_printf(dev->nodename,
  26.339 +	                    "ring-ref","%u", info->ring_ref);
  26.340 +	if (err) {
  26.341 +		message = "writing ring-ref";
  26.342 +		goto abort_transaction;
  26.343 +	}
  26.344 +
  26.345 +	err = xenbus_printf(dev->nodename,
  26.346 +			    "event-channel", "%u", my_private.evtchn);
  26.347 +	if (err) {
  26.348 +		message = "writing event-channel";
  26.349 +		goto abort_transaction;
  26.350 +	}
  26.351 +
  26.352 +	info->backend = backend;
  26.353 +	backend = NULL;
  26.354 +
  26.355 +	info->watch.node = info->backend;
  26.356 +	info->watch.callback = watch_for_status;
  26.357 +	err = register_xenbus_watch(&info->watch);
  26.358 +	if (err) {
  26.359 +		message = "registering watch on backend";
  26.360 +		goto abort_transaction;
  26.361 +	}
  26.362 +
  26.363 +	err = xenbus_transaction_end(0);
  26.364 +	if (err) {
  26.365 +		xenbus_dev_error(dev, err, "completing transaction");
  26.366 +		goto destroy_tpmring;
  26.367 +	}
  26.368 +
  26.369 +out:
  26.370 +	if (backend)
  26.371 +		kfree(backend);
  26.372 +	return err;
  26.373 +
  26.374 +abort_transaction:
  26.375 +	xenbus_transaction_end(1);
  26.376 +	/* Have to do this *outside* transaction.  */
  26.377 +	xenbus_dev_error(dev, err, "%s", message);
  26.378 +destroy_tpmring:
  26.379 +	destroy_tpmring(info, &my_private);
  26.380 +	goto out;
  26.381 +}
  26.382 +
  26.383 +
  26.384 +static int tpmfront_probe(struct xenbus_device *dev,
  26.385 +                          const struct xenbus_device_id *id)
  26.386 +{
  26.387 +	int err;
  26.388 +	struct tpmfront_info *info;
  26.389 +	int handle;
  26.390 +
  26.391 +	err = xenbus_scanf(dev->nodename,
  26.392 +	                   "handle", "%i", &handle);
  26.393 +	if (XENBUS_EXIST_ERR(err))
  26.394 +		return err;
  26.395 +
  26.396 +	if (err < 0) {
  26.397 +		xenbus_dev_error(dev,err,"reading virtual-device");
  26.398 +		return err;
  26.399 +	}
  26.400 +
  26.401 +	info = kmalloc(sizeof(*info), GFP_KERNEL);
  26.402 +	if (!info) {
  26.403 +		xenbus_dev_error(dev,err,"allocating info structure");
  26.404 +		return err;
  26.405 +	}
  26.406 +	memset(info, 0x0, sizeof(*info));
  26.407 +
  26.408 +	info->dev = dev;
  26.409 +	info->handle = handle;
  26.410 +	dev->data = info;
  26.411 +
  26.412 +	err = talk_to_backend(dev, info);
  26.413 +	if (err) {
  26.414 +		kfree(info);
  26.415 +		dev->data = NULL;
  26.416 +		return err;
  26.417 +	}
  26.418 +
  26.419 +	watch_for_status(&info->watch, info->watch.node);
  26.420 +	return 0;
  26.421 +}
  26.422 +
  26.423 +static int tpmfront_remove(struct xenbus_device *dev)
  26.424 +{
  26.425 +	struct tpmfront_info *info = dev->data;
  26.426 +	if (info->backend)
  26.427 +		unregister_xenbus_watch(&info->watch);
  26.428 +
  26.429 +	destroy_tpmring(info, &my_private);
  26.430 +
  26.431 +	kfree(info->backend);
  26.432 +	kfree(info);
  26.433 +
  26.434 +	return 0;
  26.435 +}
  26.436 +
  26.437 +static int tpmfront_suspend(struct xenbus_device *dev)
  26.438 +{
  26.439 +	struct tpmfront_info *info = dev->data;
  26.440 +	struct tpm_private *tp = &my_private;
  26.441 +
  26.442 +	/* lock so no app can send */
  26.443 +	down(&suspend_lock);
  26.444 +
  26.445 +	while (atomic_read(&tp->tx_busy)) {
  26.446 +		printk("---- TPMIF: Outstanding request.\n");
  26.447 +#if 0
  26.448 +		/*
  26.449 +		 * Would like to wait until the outstanding request
  26.450 +		 * has come back, but this does not work properly, yet.
  26.451 +		 */
  26.452 +		interruptible_sleep_on_timeout(&tp->wait_q,
  26.453 +		                               100);
  26.454 +#else
  26.455 +		break;
  26.456 +#endif
  26.457 +	}
  26.458 +
  26.459 +	unregister_xenbus_watch(&info->watch);
  26.460 +
  26.461 +	kfree(info->backend);
  26.462 +	info->backend = NULL;
  26.463 +
  26.464 +	destroy_tpmring(info, tp);
  26.465 +
  26.466 +	return 0;
  26.467 +}
  26.468 +
  26.469 +static int tpmif_recover(void)
  26.470 +{
  26.471 +	return 0;
  26.472 +}
  26.473 +
  26.474 +static int tpmfront_resume(struct xenbus_device *dev)
  26.475 +{
  26.476 +	struct tpmfront_info *info = dev->data;
  26.477 +	int err;
  26.478 +
  26.479 +	err = talk_to_backend(dev, info);
  26.480 +	if (!err) {
  26.481 +		tpmif_recover();
  26.482 +	}
  26.483 +
  26.484 +	/* unlock so apps can resume */
  26.485 +	up(&suspend_lock);
  26.486 +
  26.487 +	return err;
  26.488 +}
  26.489 +
  26.490 +static void tpmif_connect(u16 evtchn, domid_t domid)
  26.491 +{
  26.492 +	int err = 0;
  26.493 +	struct tpm_private *tp = &my_private;
  26.494 +
  26.495 +	tp->evtchn = evtchn;
  26.496 +	tp->backend_id  = domid;
  26.497 +
  26.498 +	err = bind_evtchn_to_irqhandler(
  26.499 +		tp->evtchn,
  26.500 +		tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
  26.501 +	if ( err != 0 ) {
  26.502 +		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
  26.503 +		return;
  26.504 +	}
  26.505 +}
  26.506 +
  26.507 +static struct xenbus_device_id tpmfront_ids[] = {
  26.508 +	{ "vtpm" },
  26.509 +	{ "" }
  26.510 +};
  26.511 +
  26.512 +static struct xenbus_driver tpmfront = {
  26.513 +	.name = "vtpm",
  26.514 +	.owner = THIS_MODULE,
  26.515 +	.ids = tpmfront_ids,
  26.516 +	.probe = tpmfront_probe,
  26.517 +	.remove =  tpmfront_remove,
  26.518 +	.resume = tpmfront_resume,
  26.519 +	.suspend = tpmfront_suspend,
  26.520 +};
  26.521 +
  26.522 +static void __init init_tpm_xenbus(void)
  26.523 +{
  26.524 +	xenbus_register_device(&tpmfront);
  26.525 +}
  26.526 +
  26.527 +
  26.528 +static int
  26.529 +tpm_allocate_buffers(struct tpm_private *tp)
  26.530 +{
  26.531 +	unsigned int i;
  26.532 +
  26.533 +	i = 0;
  26.534 +	while (i < TPMIF_TX_RING_SIZE) {
  26.535 +		tp->tx_buffers[i] = tx_buffer_alloc();
  26.536 +		i++;
  26.537 +	}
  26.538 +
  26.539 +	return 1;
  26.540 +}
  26.541 +
  26.542 +static void
  26.543 +tpmif_rx_action(unsigned long unused)
  26.544 +{
  26.545 +	struct tpm_private *tp = &my_private;
  26.546 +
  26.547 +	int i = 0;
  26.548 +	unsigned int received;
  26.549 +	unsigned int offset = 0;
  26.550 +	u8 *buffer;
  26.551 +	tpmif_tx_request_t *tx;
  26.552 +	tx = &tp->tx->ring[i].req;
  26.553 +
  26.554 +	received = tx->size;
  26.555 +
  26.556 +	buffer = kmalloc(received, GFP_KERNEL);
  26.557 +	if (NULL == buffer) {
  26.558 +		goto exit;
  26.559 +	}
  26.560 +
  26.561 +	i = 0;
  26.562 +	while (i < TPMIF_TX_RING_SIZE &&
  26.563 +	       offset < received) {
  26.564 +		struct tx_buffer *txb = tp->tx_buffers[i];
  26.565 +		tpmif_tx_request_t *tx;
  26.566 +		unsigned int tocopy;
  26.567 +
  26.568 +		tx = &tp->tx->ring[i].req;
  26.569 +		tocopy = tx->size;
  26.570 +		if (tocopy > PAGE_SIZE) {
  26.571 +			tocopy = PAGE_SIZE;
  26.572 +		}
  26.573 +
  26.574 +		memcpy(&buffer[offset], txb->data, tocopy);
  26.575 +
  26.576 +		gnttab_release_grant_reference(&gref_head, tx->ref);
  26.577 +
  26.578 +		offset += tocopy;
  26.579 +		i++;
  26.580 +	}
  26.581 +
  26.582 +	tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
  26.583 +	kfree(buffer);
  26.584 +
  26.585 +exit:
  26.586 +	atomic_set(&tp->tx_busy, 0);
  26.587 +	wake_up_interruptible(&tp->wait_q);
  26.588 +}
  26.589 +
  26.590 +
  26.591 +static irqreturn_t
  26.592 +tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
  26.593 +{
  26.594 +	struct tpm_private *tp = tpm_priv;
  26.595 +	unsigned long flags;
  26.596 +
  26.597 +	spin_lock_irqsave(&tp->tx_lock, flags);
  26.598 +	tasklet_schedule(&tpmif_rx_tasklet);
  26.599 +	spin_unlock_irqrestore(&tp->tx_lock, flags);
  26.600 +
  26.601 +	return IRQ_HANDLED;
  26.602 +}
  26.603 +
  26.604 +
  26.605 +static int
  26.606 +tpm_xmit(struct tpm_private *tp,
  26.607 +         const u8 * buf, size_t count, int isuserbuffer,
  26.608 +         void *remember)
  26.609 +{
  26.610 +	tpmif_tx_request_t *tx;
  26.611 +	TPMIF_RING_IDX i;
  26.612 +	unsigned int offset = 0;
  26.613 +
  26.614 +	spin_lock_irq(&tp->tx_lock);
  26.615 +
  26.616 +	if (unlikely(atomic_read(&tp->tx_busy))) {
  26.617 +		printk("There's an outstanding request/response on the way!\n");
  26.618 +		spin_unlock_irq(&tp->tx_lock);
  26.619 +		return -EBUSY;
  26.620 +	}
  26.621 +
  26.622 +	if (tp->connected != 1) {
  26.623 +		spin_unlock_irq(&tp->tx_lock);
  26.624 +		return -EIO;
  26.625 +	}
  26.626 +
  26.627 +	i = 0;
  26.628 +	while (count > 0 && i < TPMIF_TX_RING_SIZE) {
  26.629 +		struct tx_buffer *txb = tp->tx_buffers[i];
  26.630 +		int copied;
  26.631 +
  26.632 +		if (NULL == txb) {
  26.633 +			DPRINTK("txb (i=%d) is NULL. buffers initilized?\n", i);
  26.634 +			DPRINTK("Not transmittin anything!\n");
  26.635 +			spin_unlock_irq(&tp->tx_lock);
  26.636 +			return -EFAULT;
  26.637 +		}
  26.638 +		copied = tx_buffer_copy(txb, &buf[offset], count,
  26.639 +		                        isuserbuffer);
  26.640 +		if (copied < 0) {
  26.641 +			/* An error occurred */
  26.642 +			return copied;
  26.643 +		}
  26.644 +		count -= copied;
  26.645 +		offset += copied;
  26.646 +
  26.647 +		tx = &tp->tx->ring[i].req;
  26.648 +
  26.649 +		tx->id = i;
  26.650 +		tx->addr = virt_to_machine(txb->data);
  26.651 +		tx->size = txb->len;
  26.652 +
  26.653 +		DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
  26.654 +		        txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
  26.655 +
  26.656 +		/* get the granttable reference for this page */
  26.657 +		tx->ref = gnttab_claim_grant_reference( &gref_head );
  26.658 +
  26.659 +		if(-ENOSPC == tx->ref ) {
  26.660 +			DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
  26.661 +			return -ENOSPC;
  26.662 +		}
  26.663 +		gnttab_grant_foreign_access_ref( tx->ref,
  26.664 +		                                 tp->backend_id,
  26.665 +		                                 (tx->addr >> PAGE_SHIFT),
  26.666 +		                                 0 /*RW*/);
  26.667 +		i++;
  26.668 +		wmb();
  26.669 +	}
  26.670 +
  26.671 +	atomic_set(&tp->tx_busy, 1);
  26.672 +	tp->tx_remember = remember;
  26.673 +	mb();
  26.674 +
  26.675 +	DPRINTK("Notifying backend via event channel %d\n",
  26.676 +	        tp->evtchn);
  26.677 +
  26.678 +	notify_via_evtchn(tp->evtchn);
  26.679 +
  26.680 +	spin_unlock_irq(&tp->tx_lock);
  26.681 +	return offset;
  26.682 +}
  26.683 +
  26.684 +
  26.685 +static void tpmif_notify_upperlayer(struct tpm_private *tp)
  26.686 +{
  26.687 +	/*
  26.688 +	 * Notify upper layer about the state of the connection
  26.689 +	 * to the BE.
  26.690 +	 */
  26.691 +	down(&upperlayer_lock);
  26.692 +
  26.693 +	if (upperlayer_tpmfe != NULL) {
  26.694 +		switch (tp->connected) {
  26.695 +			case 1:
  26.696 +				upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
  26.697 +			break;
  26.698 +
  26.699 +			default:
  26.700 +				upperlayer_tpmfe->status(0);
  26.701 +			break;
  26.702 +		}
  26.703 +	}
  26.704 +	up(&upperlayer_lock);
  26.705 +}
  26.706 +
  26.707 +
  26.708 +static void tpmif_set_connected_state(struct tpm_private *tp, int newstate)
  26.709 +{
  26.710 +	if (newstate != tp->connected) {
  26.711 +		tp->connected = newstate;
  26.712 +		tpmif_notify_upperlayer(tp);
  26.713 +	}
  26.714 +}
  26.715 +
  26.716 +
  26.717 +/* =================================================================
  26.718 + * Initialization function.
  26.719 + * =================================================================
  26.720 + */
  26.721 +
  26.722 +static int __init
  26.723 +tpmif_init(void)
  26.724 +{
  26.725 +	IPRINTK("Initialising the vTPM driver.\n");
  26.726 +	if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
  26.727 +	                                     &gref_head ) < 0) {
  26.728 +		return -EFAULT;
  26.729 +	}
  26.730 +	/*
  26.731 +	 * Only don't send the driver status when we are in the
  26.732 +	 * INIT domain.
  26.733 +	 */
  26.734 +	spin_lock_init(&my_private.tx_lock);
  26.735 +	init_waitqueue_head(&my_private.wait_q);
  26.736 +
  26.737 +	init_tpm_xenbus();
  26.738 +
  26.739 +	return 0;
  26.740 +}
  26.741 +
  26.742 +__initcall(tpmif_init);
    27.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h	Tue Aug 30 11:48:08 2005 -0800
    27.3 @@ -0,0 +1,38 @@
    27.4 +#ifndef TPM_FRONT_H
    27.5 +#define TPM_FRONT_H
    27.6 +
    27.7 +
    27.8 +struct tpm_private {
    27.9 +	tpmif_tx_interface_t *tx;
   27.10 +	unsigned int evtchn;
   27.11 +	int connected;
   27.12 +
   27.13 +	spinlock_t tx_lock;
   27.14 +
   27.15 +	struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
   27.16 +
   27.17 +	atomic_t tx_busy;
   27.18 +	void *tx_remember;
   27.19 +	domid_t backend_id;
   27.20 +	wait_queue_head_t wait_q;
   27.21 +};
   27.22 +
   27.23 +
   27.24 +struct tpmfront_info
   27.25 +{
   27.26 +	struct xenbus_watch watch;
   27.27 +	int handle;
   27.28 +	struct xenbus_device *dev;
   27.29 +	char *backend;
   27.30 +	int ring_ref;
   27.31 +	domid_t backend_id;
   27.32 +};
   27.33 +
   27.34 +
   27.35 +struct tx_buffer {
   27.36 +	unsigned int size;	// available space in data
   27.37 +	unsigned int len;	// used space in data
   27.38 +	unsigned char *data;    // pointer to a page
   27.39 +};
   27.40 +
   27.41 +#endif
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/linux-2.6-xen-sparse/include/linux/tpmfe.h	Tue Aug 30 11:48:08 2005 -0800
    28.3 @@ -0,0 +1,33 @@
    28.4 +#ifndef TPM_FE_H
    28.5 +#define TPM_FE_H
    28.6 +
    28.7 +struct tpmfe_device {
    28.8 +	/*
    28.9 +	 * Let upper layer receive data from front-end
   28.10 +	 */
   28.11 +	int (*receive)(const u8 *buffer, size_t count, const void *ptr);
   28.12 +	/*
   28.13 +	 * Indicate the status of the front-end to the upper
   28.14 +	 * layer.
   28.15 +	 */
   28.16 +	void (*status)(unsigned int flags);
   28.17 +
   28.18 +	/*
   28.19 +	 * This field indicates the maximum size the driver can
   28.20 +	 * transfer in one chunk. It is filled out by the front-end
   28.21 +	 * driver and should be propagated to the generic tpm driver
   28.22 +	 * for allocation of buffers.
   28.23 +	 */
   28.24 +	unsigned int max_tx_size;
   28.25 +};
   28.26 +
   28.27 +enum {
   28.28 +	TPMFE_STATUS_DISCONNECTED = 0x0,
   28.29 +	TPMFE_STATUS_CONNECTED = 0x1
   28.30 +};
   28.31 +
   28.32 +int tpm_fe_send(const u8 * buf, size_t count, void *ptr);
   28.33 +int tpm_fe_register_receiver(struct tpmfe_device *);
   28.34 +void tpm_fe_unregister_receiver(void);
   28.35 +
   28.36 +#endif
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/xen/include/public/io/tpmif.h	Tue Aug 30 11:48:08 2005 -0800
    29.3 @@ -0,0 +1,42 @@
    29.4 +/******************************************************************************
    29.5 + * tpmif.h
    29.6 + *
    29.7 + * TPM I/O interface for Xen guest OSes.
    29.8 + *
    29.9 + * Copyright (c) 2005, IBM Corporation
   29.10 + *
   29.11 + * Author: Stefan Berger, stefanb@us.ibm.com
   29.12 + * Grant table support: Mahadevan Gomathisankaran
   29.13 + *
   29.14 + * This code has been derived from tools/libxc/xen/io/netif.h
   29.15 + *
   29.16 + * Copyright (c) 2003-2004, Keir Fraser
   29.17 + */
   29.18 +
   29.19 +#ifndef __XEN_PUBLIC_IO_TPMIF_H__
   29.20 +#define __XEN_PUBLIC_IO_TPMIF_H__
   29.21 +
   29.22 +typedef struct {
   29.23 +    unsigned long addr;   /* Machine address of packet.   */
   29.24 +    int      ref;         /* grant table access reference */
   29.25 +    u16      id;          /* Echoed in response message.  */
   29.26 +    u16      size:15;     /* Packet size in bytes.        */
   29.27 +    u16      mapped:1;
   29.28 +} tpmif_tx_request_t;
   29.29 +
   29.30 +/*
   29.31 + * The TPMIF_TX_RING_SIZE defines the number of pages the
   29.32 + * front-end and backend can exchange (= size of array).
   29.33 + */
   29.34 +typedef u32 TPMIF_RING_IDX;
   29.35 +
   29.36 +#define TPMIF_TX_RING_SIZE 16
   29.37 +
   29.38 +/* This structure must fit in a memory page. */
   29.39 +typedef struct {
   29.40 +    union {
   29.41 +        tpmif_tx_request_t  req;
   29.42 +    } ring[TPMIF_TX_RING_SIZE];
   29.43 +} tpmif_tx_interface_t;
   29.44 +
   29.45 +#endif
    30.1 --- a/xen/include/public/xen.h	Tue Aug 30 11:39:25 2005 -0800
    30.2 +++ b/xen/include/public/xen.h	Tue Aug 30 11:48:08 2005 -0800
    30.3 @@ -455,6 +455,7 @@ typedef struct start_info {
    30.4  #define SIF_BLK_BE_DOMAIN (1<<4)  /* Is this a block backend domain? */
    30.5  #define SIF_NET_BE_DOMAIN (1<<5)  /* Is this a net backend domain? */
    30.6  #define SIF_USB_BE_DOMAIN (1<<6)  /* Is this a usb backend domain? */
    30.7 +#define SIF_TPM_BE_DOMAIN (1<<7)  /* Is this a TPM backend domain? */
    30.8  /* For use in guest OSes. */
    30.9  extern shared_info_t *HYPERVISOR_shared_info;
   30.10