+++ /dev/null
-Copyright (c) 2010-2012 United States Government, as represented by
-the Secretary of Defense. All rights reserved.
-November 12 2012
-Authors: Matthew Fioravante (JHUAPL),
-
-This document describes the operation and command line interface
-of vtpm-stubdom. See docs/misc/vtpm.txt for details on the
-vTPM subsystem as a whole.
-
-
-------------------------------
-OPERATION
-------------------------------
-
-The vtpm-stubdom is a mini-OS domain that emulates a TPM for the guest OS to
-use. It is a small wrapper around the Berlios TPM emulator
-version 0.7.4. Commands are passed from the linux guest via the
-mini-os TPM backend driver. vTPM data is encrypted and stored via a disk image
-provided to the virtual machine. The key used to encrypt the data along
-with a hash of the vTPM's data is sent to the vTPM manager for secure storage
-and later retrieval. The vTPM domain communicates with the manager using a
-mini-os tpm front/back device pair.
-
-------------------------------
-COMMAND LINE ARGUMENTS
-------------------------------
-
-Command line arguments are passed to the domain via the 'extra'
-parameter in the VM config file. Each parameter is separated
-by white space. For example:
-
-extra="foo=bar baz"
-
-List of Arguments:
-------------------
-
-loglevel=<LOG>: Controls the amount of logging printed to the console.
- The possible values for <LOG> are:
- error
- info (default)
- debug
-
-clear: Start the Berlios emulator in "clear" mode. (default)
-
-save: Start the Berlios emulator in "save" mode.
-
-deactivated: Start the Berlios emulator in "deactivated" mode.
- See the Berlios TPM emulator documentation for details
- about the startup mode. For all normal use, always use clear
- which is the default. You should not need to specify any of these.
-
-maintcmds=<1|0>: Enable to disable the TPM maintenance commands.
- These commands are used by tpm manufacturers and thus
- open a security hole. They are disabled by default.
-
-hwinitpcr=<PCRSPEC>: Initialize the virtual Platform Configuration Registers
- (PCRs) with PCR values from the hardware TPM. Each pcr specified by
- <PCRSPEC> will be initialized with the value of that same PCR in TPM
- once at startup. By default all PCRs are zero initialized.
- Value values of <PCRSPEC> are:
- all: copy all pcrs
- none: copy no pcrs (default)
- <N>: copy pcr n
- <X-Y>: copy pcrs x to y (inclusive)
-
- These can also be combined by comma separation, for example:
- hwinitpcrs=5,12-16
- will copy pcrs 5, 12, 13, 14, 15, and 16.
-
-------------------------------
-REFERENCES
-------------------------------
-
-Berlios TPM Emulator:
-http://tpm-emulator.berlios.de/
XEN_ROOT=../..
TARGET=vtpmmgr.a
-OBJS=vtpmmgr.o vtpm_cmd_handler.o vtpm_storage.o init.o tpmrsa.o tpm.o log.o
+OBJS=vtpmmgr.o vtpm_cmd_handler.o init.o tpmrsa.o tpm.o log.o
+OBJS += vtpm_disk.o disk_tpm.o disk_io.o disk_crypto.o disk_read.o disk_write.o
+OBJS += mgmt_authority.o
CFLAGS+=-Werror -Iutil -Icrypto -Itcs
CFLAGS+=-Wno-declaration-after-statement -Wno-unused-label
+++ /dev/null
-Copyright (c) 2010-2012 United States Government, as represented by
-the Secretary of Defense. All rights reserved.
-November 12 2012
-Authors: Matthew Fioravante (JHUAPL),
-
-This document describes the operation and command line interface
-of vtpmmgr-stubdom. See docs/misc/vtpm.txt for details on the
-vTPM subsystem as a whole.
-
-
-------------------------------
-OPERATION
-------------------------------
-
-The vtpmmgr-stubdom implements a vTPM manager who has two major functions:
-
- - Securely store encryption keys for each of the vTPMS
- - Regulate access to the hardware TPM for the entire system
-
-The manager accepts commands from the vtpm-stubdom domains via the mini-os
-TPM backend driver. The vTPM manager communicates directly with hardware TPM
-using the mini-os tpm_tis driver.
-
-
-When the manager starts for the first time it will check if the TPM
-has an owner. If the TPM is unowned, it will attempt to take ownership
-with the supplied owner_auth (see below) and then create a TPM
-storage key which will be used to secure vTPM key data. Currently the
-manager only binds vTPM keys to the disk. In the future support
-for sealing to PCRs should be added.
-
-------------------------------
-COMMAND LINE ARGUMENTS
-------------------------------
-
-Command line arguments are passed to the domain via the 'extra'
-parameter in the VM config file. Each parameter is separated
-by white space. For example:
-
-extra="foo=bar baz"
-
-List of Arguments:
-------------------
-
-owner_auth=<AUTHSPEC>: Set the owner auth of the TPM. The default
- is the well known owner auth of all ones.
-
-srk_auth=<AUTHSPEC>: Set the SRK auth for the TPM. The default is
- the well known srk auth of all zeroes.
- The possible values of <AUTHSPEC> are:
- well-known: Use the well known auth (default)
- random: Randomly generate an auth
- hash: <HASH>: Use the given 40 character ASCII hex string
- text: <STR>: Use sha1 hash of <STR>.
-
-tpmdriver=<DRIVER>: Which driver to use to talk to the hardware TPM.
- Don't change this unless you know what you're doing.
- The possible values of <DRIVER> are:
- tpm_tis: Use the tpm_tis driver to talk directly to the TPM.
- The domain must have access to TPM IO memory. (default)
- tpmfront: Use tpmfront to talk to the TPM. The domain must have
- a tpmfront device setup to talk to another domain
- which provides access to the TPM.
-
-The following options only apply to the tpm_tis driver:
-
-tpmiomem=<ADDR>: The base address of the hardware memory pages of the
- TPM (default 0xfed40000).
-
-tpmirq=<IRQ>: The irq of the hardware TPM if using interrupts. A value of
- "probe" can be set to probe for the irq. A value of 0
- disabled interrupts and uses polling (default 0).
-
-tpmlocality=<LOC>: Attempt to use locality <LOC> of the hardware TPM.
- (default 0)
--- /dev/null
+#include <inttypes.h>
+#include <mini-os/byteorder.h>
+#include <polarssl/aes.h>
+#include <polarssl/sha2.h>
+#include <polarssl/ctr_drbg.h>
+
+#include "log.h"
+#include "vtpmmgr.h"
+#include "vtpm_disk.h"
+#include "disk_io.h"
+#include "disk_crypto.h"
+
+// XXX defining this stubs out all disk encryption for easier debugging
+#undef DISK_IS_PLAINTEXT
+
+void do_random(void *buf, size_t size)
+{
+ int rc = ctr_drbg_random(&vtpm_globals.ctr_drbg, buf, size);
+ if (rc) abort();
+}
+
+void aes_setup(aes_context *ctx, const struct key128 *key)
+{
+ aes_setkey_enc(ctx, (void*)key, 128);
+}
+
+static void aes_encrypt_ecb(void *target, const void *src, const aes_context *key_e)
+{
+ aes_crypt_ecb((void*)key_e, AES_ENCRYPT, src, target);
+}
+
+void aes_encrypt_one(void *target, const void *src, const struct key128 *key)
+{
+ aes_context ctx;
+ aes_setkey_enc(&ctx, (void*)key, 128);
+ aes_crypt_ecb(&ctx, AES_ENCRYPT, src, target);
+}
+
+void aes_decrypt_one(void *target, const void *src, const struct key128 *key)
+{
+ aes_context ctx;
+ aes_setkey_dec(&ctx, (void*)key, 128);
+ aes_crypt_ecb(&ctx, AES_DECRYPT, src, target);
+}
+
+static void aes_ctr_one(uint64_t out[2], uint64_t ctr[2], const aes_context *key_e)
+{
+#ifdef DISK_IS_PLAINTEXT
+ memset(out, 0, 16);
+#else
+ aes_encrypt_ecb(out, ctr, key_e);
+#endif
+ ctr[1]++;
+}
+
+void aes_encrypt_ctr(void *target, size_t target_size, const void *srcv, size_t pt_size, const aes_context *key_e)
+{
+ uint64_t ctr[2];
+ uint64_t tmp[2];
+ uint64_t *dst = target;
+ const uint64_t *src = srcv;
+
+ do_random(ctr, sizeof(ctr));
+ dst[0] = ctr[0];
+ dst[1] = ctr[1];
+ dst += 2;
+ target_size -= 16;
+
+ if (pt_size > target_size)
+ abort(); // invalid argument: target too small for plaintext
+
+ while (pt_size >= 16) {
+ aes_ctr_one(tmp, ctr, key_e);
+
+ dst[0] = tmp[0] ^ src[0];
+ dst[1] = tmp[1] ^ src[1];
+
+ dst += 2;
+ src += 2;
+ pt_size -= 16;
+ target_size -= 16;
+ }
+ if (pt_size) {
+ uint64_t stmp[2];
+ uint64_t dtmp[2];
+ memset(stmp, 0, 16);
+ memcpy(stmp, src, pt_size);
+
+ aes_ctr_one(tmp, ctr, key_e);
+
+ dtmp[0] = tmp[0] ^ stmp[0];
+ dtmp[1] = tmp[1] ^ stmp[1];
+ if (target_size < 16) {
+ memcpy(dst, dtmp, target_size);
+ return;
+ } else {
+ memcpy(dst, dtmp, 16);
+ target_size -= 16;
+ }
+ }
+ while (target_size >= 16) {
+ aes_ctr_one(dst, ctr, key_e);
+
+ dst += 2;
+ target_size -= 16;
+ }
+ if (target_size)
+ abort(); // invalid argument: overlarge target size is not a full block
+}
+
+void aes_decrypt_ctr(void *target, size_t pt_size, const void *srcv, size_t src_size, const aes_context *key_e)
+{
+ uint64_t ctr[2];
+ uint64_t tmp[2];
+ uint64_t *dst = target;
+ const uint64_t *src = srcv;
+
+ ctr[0] = src[0];
+ ctr[1] = src[1];
+ src += 2;
+ src_size -= 16;
+
+ if (pt_size > src_size)
+ abort(); // invalid argument: source too small for plaintext
+ // we discard src_size now
+
+ while (pt_size >= 16) {
+ aes_ctr_one(tmp, ctr, key_e);
+ dst[0] = tmp[0] ^ src[0];
+ dst[1] = tmp[1] ^ src[1];
+
+ dst += 2;
+ src += 2;
+ pt_size -= 16;
+ }
+ if (pt_size) {
+ uint64_t stmp[2];
+ uint64_t dtmp[2];
+ memset(stmp, 0, 16);
+ memcpy(stmp, src, pt_size);
+
+ aes_ctr_one(tmp, ctr, key_e);
+
+ dtmp[0] = tmp[0] ^ stmp[0];
+ dtmp[1] = tmp[1] ^ stmp[1];
+ memcpy(dst, dtmp, pt_size);
+ }
+}
+
+static void shl_128_mod_hex87(struct mac128 *dst, const struct mac128 *src)
+{
+ int i;
+ int carry = 0x87 * !!(src->bits[0] & 0x80);
+ for(i=0; i < 15; i++)
+ dst->bits[i] = (src->bits[i] << 1) | (src->bits[i+1] >> 7);
+ dst->bits[15] = (src->bits[15] << 1) ^ carry;
+}
+
+static void xor128(struct mac128 *dst, const struct mac128 *s1, const struct mac128 *s2)
+{
+ int i;
+ for(i=0; i < 16; i++)
+ dst->bits[i] = s1->bits[i] ^ s2->bits[i];
+}
+
+void aes_cmac(struct mac128 *target, const void *src, size_t size, const aes_context *key)
+{
+ const struct mac128 *M = src;
+ struct mac128 x, y, L, K1, K2;
+ int i;
+ size_t bsize = (size - 1) / 16;
+
+ memset(&x, 0, sizeof(x));
+ aes_encrypt_ecb(&L, &x, key);
+ shl_128_mod_hex87(&K1, &L);
+ shl_128_mod_hex87(&K2, &K1);
+
+ for(i=0; i < bsize; i++) {
+ xor128(&y, &x, &M[i]);
+ aes_encrypt_ecb(&x, &y, key);
+ }
+ if (size & 0xF) {
+ struct mac128 z;
+ memset(&z, 0, sizeof(z));
+ memcpy(&z, M + bsize, size & 0xF);
+ xor128(&y, &x, &K2);
+ xor128(&x, &y, &z);
+ } else {
+ xor128(&y, &x, &K1);
+ xor128(&x, &y, M + bsize);
+ }
+ aes_encrypt_ecb(target, &x, key);
+}
+
+static int verify_128(const void *a, const void* b)
+{
+ const volatile uint64_t *x = a;
+ const volatile uint64_t *y = b;
+ if ((x[0] ^ y[0]) | (x[1] ^ y[1]))
+ return 1;
+ return 0;
+}
+
+int aes_cmac_verify(const struct mac128 *target, const void *src, size_t size, const aes_context *key)
+{
+ struct mac128 mac;
+ aes_cmac(&mac, src, size, key);
+ return verify_128(&mac, target);
+}
+
+static int verify_256(const void *a, const void* b)
+{
+ const volatile uint64_t *x = a;
+ const volatile uint64_t *y = b;
+ if ((x[0] ^ y[0]) | (x[1] ^ y[1]) | (x[2] ^ y[2]) | (x[3] ^ y[3]))
+ return 1;
+ return 0;
+}
+
+void sha256(struct hash256 *target, const void *src, size_t size)
+{
+ void* dst = target;
+ sha2(src, size, dst, 0);
+}
+
+int sha256_verify(const struct hash256 *targ, const void *data, size_t size)
+{
+ struct hash256 hash;
+ sha256(&hash, data, size);
+ return verify_256(&hash, targ);
+}
--- /dev/null
+#ifndef __VTPMMGR_DISK_CRYPTO_H
+#define __VTPMMGR_DISK_CRYPTO_H
+
+void do_random(void *buf, size_t size);
+void aes_encrypt_one(void *target, const void *src, const struct key128 *key);
+void aes_decrypt_one(void *target, const void *src, const struct key128 *key);
+
+void aes_setup(aes_context *ctx, const struct key128 *key);
+void aes_encrypt_ctr(void *target, size_t target_size, const void *srcv, size_t src_size, const aes_context *key_e);
+void aes_decrypt_ctr(void *target, size_t target_size, const void *srcv, size_t src_size, const aes_context *key_e);
+void aes_cmac(struct mac128 *target, const void *src, size_t size, const aes_context *key);
+int aes_cmac_verify(const struct mac128 *target, const void *src, size_t size, const aes_context *key);
+
+void sha256(struct hash256 *target, const void *src, size_t size);
+int sha256_verify(const struct hash256 *targ, const void *data, size_t size);
+
+#endif
--- /dev/null
+#ifndef __VTPMMGR_DISK_FORMAT_H
+#define __VTPMMGR_DISK_FORMAT_H
+
+static const uint8_t TPM_MGR_MAGIC[12] = {
+ 'T','P','M',0xfe,'M','G','R',0xdd,'D','O','M',0x00
+};
+
+/**
+ * Sector 0 on disk: stored in plaintext
+ */
+struct disk_header {
+ char magic[12];
+#define TPM_MGR_VERSION 0
+ be32_t version;
+};
+
+/**
+ * Raw contents of disk sectors that need both encryption and authentication
+ */
+struct disk_crypt_sector_plain {
+ struct mac128 mac;
+ union {
+ struct {
+ uint8_t iv[16];
+ char data[4096-32];
+ };
+ uint8_t iv_data[4096-16];
+ };
+};
+
+/**
+ * Contents of the sealed blob in the root seal list
+ */
+struct disk_root_sealed_data {
+#define DISK_ROOT_BOUND_MAGIC "Root"
+ char magic[4];
+ uuid_t tpm_manager_uuid;
+
+ be32_t nvram_slot;
+ struct tpm_authdata nvram_auth;
+ be32_t counter_index;
+ struct tpm_authdata counter_auth;
+
+ /* encrypted (AES-ECB) with key from NVRAM */
+ struct key128 tm_key;
+};
+
+/**
+ * Contents of the sealed blob in a group's seal list
+ */
+struct disk_group_sealed_data {
+#define DISK_GROUP_BOUND_MAGIC "TGrp"
+ char magic[4];
+ uuid_t tpm_manager_uuid;
+ struct tpm_authdata aik_authdata;
+
+ struct key128 group_key;
+ struct key128 rollback_mac_key;
+};
+
+/**
+ * Contents of the seal_list_N sectors on disk (plaintext, linked list)
+ *
+ * The hdr field is unused except in sector 0
+ */
+struct disk_seal_list {
+ struct disk_header hdr;
+ be32_t length;
+ sector_t next;
+#define SEALS_PER_ROOT_SEAL_LIST 13
+ struct disk_seal_entry entry[SEALS_PER_ROOT_SEAL_LIST];
+};
+
+/**
+ * TODO - overflow for struct disk_group_boot_config_list
+ */
+struct disk_group_seal_list {
+ sector_t next;
+#define SEALS_PER_GROUP_SEAL_LIST 13
+ struct disk_seal_entry entry[SEALS_PER_GROUP_SEAL_LIST];
+};
+
+/**
+ * Rollback detection MAC entry
+ */
+struct disk_rb_mac_entry {
+ be32_t id;
+ struct mac128 mac;
+};
+
+#define NR_ENTRIES_PER_ROOT 16
+/**
+ * The area of the root sector protected by rollback MACs
+ */
+struct disk_root_sector_mac1_area {
+ be64_t sequence;
+ be32_t tpm_counter_value;
+
+ be32_t nr_groups;
+ struct hash256 group_hash[NR_ENTRIES_PER_ROOT];
+};
+
+/**
+ * Decrypted contents of the root sector (sector 1 and 2) on disk
+ */
+struct disk_root_sector {
+ struct disk_root_sector_mac1_area v;
+
+ sector_t group_loc[NR_ENTRIES_PER_ROOT];
+
+ uint8_t pad[8];
+
+ /* Rollback detection MACs */
+ be32_t nr_rb_macs;
+ sector_t rb_next_loc;
+ /* used if rb_macs overflows */
+ struct hash256 rb_next_hash;
+
+#define NR_RB_MACS_PER_ROOT 128
+ struct disk_rb_mac_entry rb_macs[NR_RB_MACS_PER_ROOT];
+};
+
+/**
+ * Hash tree for list expansion. Used for the list of groups in the root and for
+ * the list of vTPMs in a group.
+ */
+struct disk_itree_sector {
+#define NR_ENTRIES_PER_ITREE 112
+ sector_t location[NR_ENTRIES_PER_ITREE];
+ /* SECTOR-HASH { */
+ struct hash256 hash[NR_ENTRIES_PER_ITREE];
+ /* SECTOR-HASH } */
+};
+
+#define NR_ENTRIES_PER_GROUP_BASE 16
+/**
+ * Data that must remain constant if a group is not open
+ */
+struct disk_group_sector_mac3_area {
+ struct group_id_data id_data; /* MAC2 */
+ struct group_details details;
+ struct disk_group_boot_config_list boot_configs;
+
+ be32_t nr_vtpms;
+ struct hash256 vtpm_hash[NR_ENTRIES_PER_GROUP_BASE];
+};
+
+/**
+ * Group metadata sector
+ *
+ * Encrypted with TM_KEY - takes 16 bytes for IV; integrity from parent.
+ */
+struct disk_group_sector {
+ /* SECTOR-HASH { */
+ struct disk_group_sector_mac3_area v;
+
+ /* MAC(MAC3, group_key) */
+ struct mac128 group_mac;
+ /* SECTOR-HASH } */
+
+ sector_t vtpm_location[NR_ENTRIES_PER_GROUP_BASE];
+ sector_t boot_configs_next;
+};
+
+/**
+ * Data on a vTPM which is available when its group is not open
+ */
+struct disk_vtpm_plain {
+ uuid_t uuid;
+ be32_t flags;
+};
+
+/**
+ * Data on a vTPM which is only available when its group is open
+ */
+struct disk_vtpm_secret {
+ uint8_t data[64];
+};
+
+/**
+ * Contents of a vTPM data disk sector
+ *
+ * Encrypted with TM_KEY - takes 16 bytes for IV
+ */
+struct disk_vtpm_sector {
+ /* SECTOR-HASH { */
+ struct disk_vtpm_plain header[VTPMS_PER_SECTOR];
+ struct mac128 iv;
+ struct disk_vtpm_secret data[VTPMS_PER_SECTOR];
+ /* SECTOR-HASH } */
+};
+
+#endif
--- /dev/null
+#include <blkfront.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <mini-os/byteorder.h>
+
+#include "vtpm_manager.h"
+#include "log.h"
+#include "uuid.h"
+
+#include "vtpmmgr.h"
+#include "vtpm_disk.h"
+#include "disk_tpm.h"
+#include "disk_io.h"
+
+static uint8_t disk_staging_buf[4096] __attribute__((aligned(4096)));
+
+static struct blkfront_dev* blkdev;
+static int blkfront_fd = -1;
+
+int vtpm_storage_init(void) {
+ struct blkfront_info info;
+ blkdev = init_blkfront(NULL, &info);
+ if (blkdev == NULL)
+ return -1;
+ blkfront_fd = blkfront_open(blkdev);
+ if (blkfront_fd < 0)
+ return -1;
+ return 0;
+}
+
+void* disk_read_sector(sector_t sector)
+{
+ uint32_t pos = be32_native(sector);
+ int rc;
+ vtpmloginfo(VTPM_LOG_VTPM, "disk_read_sector %x\n", pos);
+ lseek(blkfront_fd, pos * 4096, SEEK_SET);
+ rc = read(blkfront_fd, disk_staging_buf, 4096);
+ if (rc != 4096)
+ abort();
+ return disk_staging_buf;
+}
+
+void* disk_write_buf(void) { return disk_staging_buf; }
+
+void disk_write_sector(sector_t sector, void* buf, size_t siz)
+{
+ int rc;
+ uint32_t pos = be32_native(sector);
+ lseek(blkfront_fd, pos * 4096, SEEK_SET);
+ if (siz < 4096) {
+ if (buf != disk_staging_buf)
+ memcpy(disk_staging_buf, buf, siz);
+ memset(disk_staging_buf + siz, 0, 4096 - siz);
+ buf = disk_staging_buf;
+ } else if (siz > 4096)
+ abort();
+
+ rc = write(blkfront_fd, buf, 4096);
+ if (rc != 4096)
+ abort();
+}
+
+void disk_write_barrier(void)
+{
+ blkfront_sync(blkdev);
+}
+
+enum inuse_value {
+ UNUSED,
+ SLOT_1,
+ SLOT_2,
+ SHARED
+};
+
+/* TODO make this dynamic to support using more than 2MB of disk */
+#define DISK_MAX_SECTOR 0x200
+
+/* The first 4 sectors are statically allocated:
+ * 0 - disk header (copy 1)
+ * 1 - disk header (copy 2)
+ * 2 - root sector (copy 1)
+ * 3 - root sector (copy 2)
+ */
+#define FIRST_DYNAMIC_SECTOR 4
+
+static uint8_t sector_inuse_map[DISK_MAX_SECTOR];
+
+static int active_slot(const struct mem_tpm_mgr *mgr)
+{
+ return 1 + mgr->active_root;
+}
+
+void disk_set_used(sector_t loc, const struct mem_tpm_mgr *mgr)
+{
+ uint32_t s = be32_native(loc);
+ if (s > DISK_MAX_SECTOR) {
+ printk("Attempted disk_set_used %x\n", s);
+ return;
+ }
+ sector_inuse_map[s] |= active_slot(mgr);
+}
+
+void disk_flush_slot(const struct mem_tpm_mgr *mgr)
+{
+ int i;
+ for(i = FIRST_DYNAMIC_SECTOR; i < DISK_MAX_SECTOR; i++)
+ sector_inuse_map[i] &= ~active_slot(mgr);
+}
+
+sector_t disk_find_free(const struct mem_tpm_mgr *mgr)
+{
+ int i;
+ for(i = FIRST_DYNAMIC_SECTOR; i < DISK_MAX_SECTOR; i++) {
+ if (sector_inuse_map[i])
+ continue;
+ sector_inuse_map[i] = active_slot(mgr);
+ return native_be32(i);
+ }
+ // TODO more graceful error handling (in callers)
+ abort();
+}
--- /dev/null
+#ifndef __VTPMMGR_DISK_IO_H
+#define __VTPMMGR_DISK_IO_H
+
+void* disk_read_sector(sector_t sector);
+void disk_write_sector(sector_t sector, void* buf, size_t siz);
+void* disk_write_buf(void);
+void disk_write_barrier(void);
+
+sector_t disk_find_free(const struct mem_tpm_mgr *mgr);
+void disk_flush_slot(const struct mem_tpm_mgr *mgr);
+void disk_set_used(sector_t loc, const struct mem_tpm_mgr *mgr);
+
+void disk_write_all(struct mem_tpm_mgr *mgr);
+
+static inline sector_t seal_loc(struct mem_tpm_mgr *mgr)
+{
+ return native_be32(mgr->active_root);
+}
+
+static inline sector_t root_loc(struct mem_tpm_mgr *mgr)
+{
+ return native_be32(2 + mgr->active_root);
+}
+
+#endif
--- /dev/null
+#include <console.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <mini-os/byteorder.h>
+
+#include "vtpm_manager.h"
+#include "log.h"
+#include "uuid.h"
+
+#include "vtpmmgr.h"
+#include "vtpm_disk.h"
+#include "disk_tpm.h"
+#include "disk_io.h"
+#include "disk_crypto.h"
+#include "disk_format.h"
+
+static int disk_read_crypt_sector(void *data, size_t size, sector_t block, const struct mem_tpm_mgr *mgr)
+{
+ struct disk_crypt_sector_plain *sector = disk_read_sector(block);
+ if (!sector)
+ return 2;
+
+ if (aes_cmac_verify(§or->mac, sector->data, sizeof(sector->data), &mgr->tm_key_e))
+ return 2;
+
+ aes_decrypt_ctr(data, size, sector->iv_data, sizeof(sector->iv_data), &mgr->tm_key_e);
+ return 0;
+}
+
+static void group_free(struct mem_group *group)
+{
+ int i, j;
+ if (!group)
+ return;
+ if (group->data) {
+ for (i = 0; i < group->nr_pages; i++) {
+ for (j = 0; j < group->data[i].size; j++) {
+ free(group->data[i].vtpms[j]);
+ }
+ }
+ free(group->data);
+ }
+ free(group->seals);
+ free(group);
+}
+
+static void mgr_free(struct mem_tpm_mgr *mgr)
+{
+ int i;
+ if (!mgr)
+ return;
+ if (mgr->groups) {
+ for(i=0; i < mgr->nr_groups; i++)
+ group_free(mgr->groups[i].v);
+ free(mgr->groups);
+ }
+ free(mgr);
+}
+
+/* Open the group keys from one of the sealed strutures */
+static int find_group_key(struct mem_group *dst,
+ const struct disk_group_sector *group,
+ const struct mem_tpm_mgr *parent)
+{
+ int i, rc, rv = 1;
+ struct hash160 buf;
+ struct disk_group_sealed_data sealed;
+
+ dst->nr_seals = be32_native(group->v.boot_configs.nr_cfgs);
+ if (dst->nr_seals > NR_SEALS_PER_GROUP)
+ return 3; // TODO support spill to extra pages
+
+ dst->seals = calloc(dst->nr_seals, sizeof(dst->seals[0]));
+ if (!dst->seals) {
+ vtpmlogerror(VTPM_LOG_VTPM, "find_group_key alloc %x\n", dst->nr_seals);
+ return 2;
+ }
+
+ for(i=0; i < dst->nr_seals; i++) {
+ const struct disk_seal_entry *cfg = &group->v.boot_configs.entry[i];
+ dst->seals[i].pcr_selection = cfg->pcr_selection;
+ memcpy(&dst->seals[i].digest_release, &cfg->digest_release, 20);
+
+ TPM_pcr_digest(&buf, cfg->pcr_selection);
+ if (memcmp(&buf, &cfg->digest_release, 20))
+ continue;
+ rc = TPM_disk_unseal(&sealed, sizeof(sealed), cfg);
+ if (rc)
+ continue;
+ if (memcmp(&sealed.magic, DISK_GROUP_BOUND_MAGIC, 4))
+ continue;
+ if (memcmp(sealed.tpm_manager_uuid, parent->uuid, 16))
+ continue;
+
+ memcpy(&dst->rollback_mac_key, &sealed.rollback_mac_key, 16);
+ memcpy(&dst->group_key, &sealed.group_key, 16);
+ memcpy(&dst->aik_authdata, &sealed.aik_authdata, 20);
+ rv = 0;
+ }
+
+ // cache the list to allow writes without touching the TPM
+ memcpy(&dst->seal_bits, &group->v.boot_configs, sizeof(dst->seal_bits));
+ dst->flags |= MEM_GROUP_FLAG_SEAL_VALID;
+
+ return rv;
+}
+
+static int parse_root_key(struct mem_tpm_mgr *dst, struct disk_seal_entry *src)
+{
+ int rc;
+ struct disk_root_sealed_data sealed;
+
+ rc = TPM_disk_unseal(&sealed, sizeof(sealed), src);
+ if (rc)
+ return rc;
+
+ if (memcmp(&sealed.magic, DISK_ROOT_BOUND_MAGIC, 4))
+ return 1;
+
+ rc = TPM_disk_nvread(&dst->nv_key, 16, sealed.nvram_slot, sealed.nvram_auth);
+ if (rc)
+ return rc;
+
+ // TODO when an NV slot in the physical TPM is used to populate nv_key,
+ // that value should be used to mask the master key so that the value
+ // can be changed to revoke old disk state
+#if 0
+ aes_decrypt_one(&dst->tm_key, &sealed.tm_key, &dst->nv_key);
+#else
+ memcpy(&dst->tm_key, &sealed.tm_key, 16);
+#endif
+
+ memcpy(dst->uuid, sealed.tpm_manager_uuid, 16);
+ dst->nvram_slot = sealed.nvram_slot;
+ memcpy(&dst->nvram_auth, &sealed.nvram_auth, sizeof(struct tpm_authdata));
+ dst->counter_index = sealed.counter_index;
+ memcpy(&dst->counter_auth, &sealed.counter_auth, sizeof(struct tpm_authdata));
+
+ return 0;
+}
+
+static struct mem_tpm_mgr *find_root_key(int active_root)
+{
+ sector_t seal_list = native_be32(active_root);
+ struct disk_seal_list *seal = disk_read_sector(seal_list);
+ struct hash160 buf;
+ int i, rc, nr;
+ struct mem_tpm_mgr *dst;
+
+ if (memcmp(seal->hdr.magic, TPM_MGR_MAGIC, 12))
+ return NULL;
+
+ if (be32_native(seal->hdr.version) != TPM_MGR_VERSION)
+ return NULL;
+
+ dst = calloc(1, sizeof(*dst));
+ dst->active_root = active_root;
+
+ for (nr = 0; nr < 100; nr++) {
+ disk_set_used(seal_list, dst);
+ uint32_t nr_seals = be32_native(seal->length);
+ if (nr_seals > SEALS_PER_ROOT_SEAL_LIST)
+ break;
+ for (i = 0; i < nr_seals; i++) {
+ struct disk_seal_entry *src = &seal->entry[i];
+
+ TPM_pcr_digest(&buf, src->pcr_selection);
+ if (memcmp(&buf, &src->digest_release, 20))
+ continue;
+
+ rc = parse_root_key(dst, src);
+ if (rc)
+ continue;
+ return dst;
+ }
+ seal_list = seal->next;
+ if (seal_list.value == 0)
+ break;
+ seal = disk_read_sector(seal_list);
+ }
+ mgr_free(dst);
+ return NULL;
+}
+
+/* Load and verify one sector's worth of vTPMs. This loads all the vTPM entries
+ * and decrypts their state data into memory.
+ */
+static int load_verify_vtpm_page(struct mem_vtpm_page *dst, int base,
+ const struct mem_tpm_mgr *mgr, const aes_context *group_key)
+{
+ struct disk_vtpm_sector pt;
+ int i, rc;
+
+ disk_set_used(dst->disk_loc, mgr);
+
+ rc = disk_read_crypt_sector(&pt, sizeof(pt), dst->disk_loc, mgr);
+ if (rc) {
+ printk("Malformed sector %d\n", be32_native(dst->disk_loc));
+ return rc;
+ }
+
+ rc = sha256_verify(&dst->disk_hash, &pt, sizeof(pt));
+ if (rc) {
+ printk("Hash mismatch in sector %d\n", be32_native(dst->disk_loc));
+ return rc;
+ }
+
+ if (!group_key)
+ return 0;
+
+ aes_decrypt_ctr(pt.data, sizeof(pt.data), &pt.iv, sizeof(pt.data) + 16, group_key);
+
+ for (i = 0; i < dst->size; i++) {
+ struct mem_vtpm *vtpm = calloc(1, sizeof(*vtpm));
+ dst->vtpms[i] = vtpm;
+ memcpy(vtpm->uuid, pt.header[i].uuid, 16);
+ memcpy(vtpm->data, pt.data[i].data, 64);
+ vtpm->flags = be32_native(pt.header[i].flags);
+ vtpm->index_in_parent = i + base;
+ }
+ return 0;
+}
+
+static int load_verify_vtpm_pages(struct mem_group *group, int base, int size,
+ const struct hash256 *hash, const sector_t *loc,
+ const struct mem_tpm_mgr *mgr, const aes_context *group_key)
+{
+ int i, rc;
+ struct mem_vtpm_page *page = group->data + base;
+
+ /* base was in terms of sectors; convert to vtpms */
+ base *= VTPMS_PER_SECTOR;
+
+ for (i = 0; i < size; i++) {
+ page->disk_hash = hash[i];
+ page->disk_loc = loc[i];
+ if (group->nr_vtpms - base > VTPMS_PER_SECTOR)
+ page->size = VTPMS_PER_SECTOR;
+ else
+ page->size = group->nr_vtpms - base;
+ rc = load_verify_vtpm_page(page, base, mgr, group_key);
+ if (rc)
+ return rc;
+ base += VTPMS_PER_SECTOR;
+ }
+
+ return 0;
+}
+
+static int load_verify_vtpm_itree(struct mem_group_hdr *hdr, int base, int nr_entries,
+ const struct hash256 *hash, const sector_t *loc, int hsize,
+ const struct mem_tpm_mgr *mgr, const aes_context *group_key);
+
+static int load_verify_vtpm_itree(struct mem_group_hdr *hdr, int base, int nr_entries,
+ const struct hash256 *hash, const sector_t *loc, int hsize,
+ const struct mem_tpm_mgr *mgr, const aes_context *group_key)
+{
+ int i, rc, incr = 1, inuse_base = hdr->disk_nr_inuse, lsize;
+
+ // increase tree depth until all entries fit
+ while (nr_entries > incr * hsize)
+ incr *= NR_ENTRIES_PER_ITREE;
+
+ // save the list of used sectors (itree and vtpm) in the header
+ lsize = 1 + (nr_entries - 1) / incr;
+ hdr->disk_nr_inuse += lsize;
+ hdr->disk_inuse = realloc(hdr->disk_inuse, hdr->disk_nr_inuse * sizeof(sector_t));
+ memcpy(&hdr->disk_inuse[inuse_base], loc, lsize * sizeof(sector_t));
+
+ // if the entries already fit, process vtpm pages
+ if (nr_entries <= hsize)
+ return load_verify_vtpm_pages(hdr->v, base, nr_entries, hash, loc, mgr, group_key);
+
+ for (i = 0; i * incr < nr_entries; i++) {
+ struct disk_itree_sector pt;
+ int child_entries = incr;
+
+ // the last sector is not completely full
+ if (nr_entries - i * incr < incr)
+ child_entries = nr_entries - i * incr;
+
+ disk_set_used(loc[i], mgr);
+ hdr->disk_inuse[inuse_base++] = loc[i];
+
+ rc = disk_read_crypt_sector(&pt, sizeof(pt), loc[i], mgr);
+ if (rc) {
+ printk("Malformed sector %d\n", be32_native(loc[i]));
+ return rc;
+ }
+
+ rc = sha256_verify(&hash[i], pt.hash, sizeof(pt.hash));
+ if (rc) {
+ printk("Hash mismatch in sector %d\n", be32_native(loc[i]));
+ return rc;
+ }
+
+ rc = load_verify_vtpm_itree(hdr, base, child_entries, pt.hash, pt.location,
+ NR_ENTRIES_PER_ITREE, mgr, group_key);
+ if (rc)
+ return rc;
+
+ base += incr;
+ }
+
+ return 0;
+}
+
+/* Load and verify one group's data structure, including its vTPMs.
+ */
+static int load_verify_group(struct mem_group_hdr *dst, const struct mem_tpm_mgr *mgr)
+{
+ struct mem_group *group;
+ struct disk_group_sector disk;
+ int rc;
+ aes_context key_e;
+ aes_context *opened_key = NULL;
+
+ disk_set_used(dst->disk_loc, mgr);
+
+ rc = disk_read_crypt_sector(&disk, sizeof(disk), dst->disk_loc, mgr);
+ if (rc) {
+ printk("Malformed sector %d\n", be32_native(dst->disk_loc));
+ return rc;
+ }
+
+ rc = sha256_verify(&dst->disk_hash, &disk.v, sizeof(disk.v) + sizeof(disk.group_mac));
+ if (rc) {
+ printk("Hash mismatch in sector %d\n", be32_native(dst->disk_loc));
+ return rc;
+ }
+
+ dst->v = group = calloc(1, sizeof(*group));
+
+ rc = find_group_key(group, &disk, mgr);
+ if (rc == 0) {
+ opened_key = &key_e;
+ /* Verify the group with the group's own key */
+ aes_setup(opened_key, &group->group_key);
+ if (aes_cmac_verify(&disk.group_mac, &disk.v, sizeof(disk.v), opened_key)) {
+ printk("Group CMAC failed\n");
+ return 2;
+ }
+
+ memcpy(&group->id_data, &disk.v.id_data, sizeof(group->id_data));
+ memcpy(&group->details, &disk.v.details, sizeof(group->details));
+ } else if (rc == 1) {
+ // still need to walk the vtpm list
+ rc = 0;
+ } else {
+ printk("Group key unsealing failed\n");
+ return rc;
+ }
+
+ group->nr_vtpms = be32_native(disk.v.nr_vtpms);
+ group->nr_pages = (group->nr_vtpms + VTPMS_PER_SECTOR - 1) / VTPMS_PER_SECTOR;
+
+ group->data = calloc(group->nr_pages, sizeof(group->data[0]));
+
+ rc = load_verify_vtpm_itree(dst, 0, group->nr_pages, disk.v.vtpm_hash,
+ disk.vtpm_location, NR_ENTRIES_PER_GROUP_BASE, mgr, opened_key);
+
+ if (!opened_key) {
+ /* remove the struct */
+ free(group->data);
+ free(group->seals);
+ free(group);
+ dst->v = NULL;
+ }
+
+ return rc;
+}
+
+static int load_root_pre(struct disk_root_sector *root, struct mem_tpm_mgr *dst)
+{
+ int rc;
+
+ aes_setup(&dst->tm_key_e, &dst->tm_key);
+
+ rc = disk_read_crypt_sector(root, sizeof(*root), root_loc(dst), dst);
+
+ if (rc) {
+ vtpmloginfo(VTPM_LOG_VTPM, "root cmac verify failed in slot %d\n", dst->active_root);
+ return 2;
+ }
+
+ dst->root_seals_valid = 1 + dst->active_root;
+ dst->sequence = be64_native(root->v.sequence);
+
+ return 0;
+}
+
+static int load_verify_group_itree(struct mem_tpm_mgr *dst, int base, int nr_entries,
+ const struct hash256 *hash, const sector_t *loc, int hsize);
+
+static int load_verify_group_itree(struct mem_tpm_mgr *dst, int base, int nr_entries,
+ const struct hash256 *hash, const sector_t *loc, int hsize)
+{
+ int i, rc, incr = 1;
+
+ if (nr_entries <= hsize) {
+ for(i=0; i < nr_entries; i++) {
+ struct mem_group_hdr *group = dst->groups + base + i;
+ group->disk_loc = loc[i];
+ memcpy(&group->disk_hash, &hash[i], sizeof(group->disk_hash));
+ rc = load_verify_group(group, dst);
+ if (rc) {
+ printk("Error loading group %d\n", base + i);
+ return rc;
+ }
+ }
+ return 0;
+ }
+
+ // increase tree depth until all entries fit
+ while (nr_entries > incr * hsize)
+ incr *= NR_ENTRIES_PER_ITREE;
+
+ for (i = 0; i * incr < nr_entries; i++) {
+ struct disk_itree_sector pt;
+ int child_entries = incr;
+
+ // the last sector is not completely full
+ if (nr_entries - i * incr < incr)
+ child_entries = nr_entries - i * incr;
+
+ disk_set_used(loc[i], dst);
+
+ rc = disk_read_crypt_sector(&pt, sizeof(pt), loc[i], dst);
+ if (rc) {
+ printk("Malformed sector %d\n", be32_native(loc[i]));
+ return rc;
+ }
+
+ rc = sha256_verify(&hash[i], pt.hash, sizeof(pt.hash));
+ if (rc) {
+ printk("Hash mismatch in sector %d\n", be32_native(loc[i]));
+ return rc;
+ }
+
+ rc = load_verify_group_itree(dst, base, child_entries, pt.hash, pt.location, NR_ENTRIES_PER_ITREE);
+ if (rc)
+ return rc;
+
+ base += incr;
+ }
+
+ return 0;
+}
+
+static int load_root_post(struct mem_tpm_mgr *dst, const struct disk_root_sector *root)
+{
+ int rc, i, j;
+ uint32_t nr_disk_rbs = be32_native(root->nr_rb_macs);
+
+ rc = TPM_disk_check_counter(dst->counter_index, dst->counter_auth,
+ root->v.tpm_counter_value);
+ if (rc)
+ return 2;
+ dst->counter_value = root->v.tpm_counter_value;
+
+ dst->nr_groups = be32_native(root->v.nr_groups);
+ dst->groups = calloc(sizeof(dst->groups[0]), dst->nr_groups);
+
+ if (!dst->groups) {
+ vtpmlogerror(VTPM_LOG_VTPM, "load_root_post alloc %x\n", dst->nr_groups);
+ return 2;
+ }
+
+ rc = load_verify_group_itree(dst, 0, dst->nr_groups,
+ root->v.group_hash, root->group_loc, NR_ENTRIES_PER_ROOT);
+ if (rc)
+ return rc;
+
+ /* Sanity check: group0 must be open */
+ if (!dst->groups[0].v) {
+ printk("Error opening group 0\n");
+ return 2;
+ }
+
+ /* TODO support for spilling rollback list */
+ if (nr_disk_rbs > NR_RB_MACS_PER_ROOT)
+ return 3;
+
+ i = 0;
+ j = 0;
+ while (i < dst->nr_groups) {
+ aes_context key_e;
+ struct mem_group_hdr *group = &dst->groups[i];
+ struct mem_group *groupv = group->v;
+ const struct disk_rb_mac_entry *ent = &root->rb_macs[j];
+
+ if (!groupv) {
+ i++;
+ // this group is not open - no need to verify now
+ continue;
+ }
+
+ if (be32_native(ent->id) < i) {
+ // this entry is for a group that is not open
+ j++;
+ continue;
+ }
+
+ if (j >= nr_disk_rbs || be32_native(ent->id) != i) {
+ // TODO allow delegation
+ if (!(groupv->details.flags.value & FLAG_ROLLBACK_DETECTED)) {
+ groupv->details.flags.value |= FLAG_ROLLBACK_DETECTED;
+ group->disk_loc.value = 0;
+ }
+ i++;
+ continue;
+ }
+
+ aes_setup(&key_e, &groupv->rollback_mac_key);
+ if (aes_cmac_verify(&ent->mac, &root->v, sizeof(root->v), &key_e)) {
+ if (!(groupv->details.flags.value & FLAG_ROLLBACK_DETECTED)) {
+ groupv->details.flags.value |= FLAG_ROLLBACK_DETECTED;
+ group->disk_loc.value = 0;
+ }
+ }
+ i++; j++;
+ }
+
+ return 0;
+}
+
+int vtpm_load_disk(void)
+{
+ struct disk_root_sector root1, root2;
+ int rc = 0;
+ TPM_read_pcrs();
+
+ printk("TPM Manager - disk format %d\n", TPM_MGR_VERSION);
+ printk(" root seal: %lu; sector of %d: %lu\n",
+ sizeof(struct disk_root_sealed_data), SEALS_PER_ROOT_SEAL_LIST, sizeof(struct disk_seal_list));
+ printk(" root: %lu v=%lu\n", sizeof(root1), sizeof(root1.v));
+ printk(" itree: %lu; sector of %d: %lu\n",
+ 4 + 32, NR_ENTRIES_PER_ITREE, sizeof(struct disk_itree_sector));
+ printk(" group: %lu v=%lu id=%lu md=%lu\n",
+ sizeof(struct disk_group_sector), sizeof(struct disk_group_sector_mac3_area),
+ sizeof(struct group_id_data), sizeof(struct group_details));
+ printk(" group seal: %lu; %d in parent: %lu; sector of %d: %lu\n",
+ sizeof(struct disk_group_sealed_data), NR_SEALS_PER_GROUP, sizeof(struct disk_group_boot_config_list),
+ SEALS_PER_GROUP_SEAL_LIST, sizeof(struct disk_group_seal_list));
+ printk(" vtpm: %lu+%lu; sector of %d: %lu\n",
+ sizeof(struct disk_vtpm_plain), sizeof(struct disk_vtpm_secret),
+ VTPMS_PER_SECTOR, sizeof(struct disk_vtpm_sector));
+
+ struct mem_tpm_mgr *mgr1 = find_root_key(0);
+ struct mem_tpm_mgr *mgr2 = find_root_key(1);
+
+ rc = mgr1 ? load_root_pre(&root1, mgr1) : 0;
+ if (rc) {
+ mgr_free(mgr1);
+ mgr1 = NULL;
+ }
+
+ rc = mgr2 ? load_root_pre(&root2, mgr2) : 0;
+ if (rc) {
+ mgr_free(mgr2);
+ mgr2 = NULL;
+ }
+
+ printk("load_root_pre: %c/%c\n", mgr1 ? 'y' : 'n', mgr2 ? 'y' : 'n');
+
+ if (!mgr1 && !mgr2)
+ return 2;
+
+ if (mgr1 && mgr2 && mgr2->sequence > mgr1->sequence) {
+ rc = load_root_post(mgr2, &root2);
+ if (rc) {
+ mgr_free(mgr2);
+ mgr2 = NULL;
+ } else {
+ mgr_free(mgr1);
+ g_mgr = mgr2;
+ return 0;
+ }
+ }
+ if (mgr1) {
+ rc = load_root_post(mgr1, &root1);
+ if (rc) {
+ mgr_free(mgr1);
+ } else {
+ mgr_free(mgr2);
+ g_mgr = mgr1;
+ return 0;
+ }
+ }
+ if (mgr2) {
+ rc = load_root_post(mgr2, &root2);
+ if (rc) {
+ mgr_free(mgr2);
+ } else {
+ g_mgr = mgr2;
+ return 0;
+ }
+ }
+ printk("Could not read vTPM disk\n");
+
+ return 2;
+}
--- /dev/null
+/* TPM disk interface */
+#include <blkfront.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <mini-os/byteorder.h>
+#include <polarssl/aes.h>
+#include <polarssl/sha1.h>
+
+#include "tpm.h"
+#include "tcg.h"
+
+#include "vtpmmgr.h"
+#include "vtpm_disk.h"
+#include "disk_tpm.h"
+
+// Print out input/output of seal/unseal operations (includes keys)
+#undef DEBUG_SEAL_OPS
+
+#ifdef DEBUG_SEAL_OPS
+#include "marshal.h"
+#endif
+
+struct pcr_list {
+ TPM_DIGEST pcrs[24];
+};
+
+static struct pcr_list hwtpm;
+
+void TPM_read_pcrs(void)
+{
+ int i;
+ for(i=0; i < 24; i++)
+ TPM_PCR_Read(i, &hwtpm.pcrs[i]);
+}
+
+struct pcr_composite_3 {
+ be16_t sel_size;
+ uint8_t sel[3];
+ be32_t val_size;
+ uint8_t val[0];
+} __attribute__((packed));
+
+void TPM_pcr_digest(struct hash160 *buf, le32_t selection)
+{
+ int i;
+ int count = 0;
+ uint32_t sel = le32_native(selection);
+ struct pcr_composite_3 *v;
+ for(i=0; i < 24; i++) {
+ if (sel & (1 << i))
+ count++;
+ }
+ v = alloca(sizeof(*v) + 20 * count);
+ v->sel_size = native_be16(3);
+ memcpy(v->sel, &selection, 3);
+ v->val_size = native_be32(20 * count);
+
+ count = 0;
+ for(i=0; i < 24; i++) {
+ if (sel & (1 << i)) {
+ memcpy(v->val + 20 * count, &hwtpm.pcrs[i], 20);
+ count++;
+ }
+ }
+
+ sha1((void*)v, sizeof(*v) + 20 * count, buf->bits);
+}
+
+
+int TPM_disk_seal(struct disk_seal_entry *dst, const void* src, size_t size)
+{
+ uint32_t rc;
+ TPM_PCR_INFO info;
+ TPM_STORED_DATA out;
+ TPM_AUTH_SESSION osap = TPM_AUTH_SESSION_INIT;
+ TPM_AUTHDATA sharedsecret;
+ TPM_AUTHDATA auth;
+
+ rc = TPM_OSAP(TPM_ET_KEYHANDLE, TPM_SRK_KEYHANDLE, (void*)&vtpm_globals.srk_auth,
+ &sharedsecret, &osap);
+
+ if (rc) abort();
+
+#ifdef DEBUG_SEAL_OPS
+ int i;
+ printk("to-seal:");
+ for(i=0; i < size; i++)
+ printk(" %02x", ((uint8_t*)src)[i]);
+ printk("\n");
+#endif
+
+ memset(auth, 0, 20);
+ info.pcrSelection.sizeOfSelect = 3;
+ info.pcrSelection.pcrSelect = (void*)&dst->pcr_selection;
+ memcpy(&info.digestAtCreation, &dst->digest_at_seal, 20);
+ memcpy(&info.digestAtRelease, &dst->digest_release, 20);
+
+ rc = TPM_Seal(TPM_SRK_KEYHANDLE, 45, &info, size, src, &out,
+ (void*)&sharedsecret, (void*)&auth, &osap);
+
+ TPM_TerminateHandle(osap.AuthHandle);
+
+#ifdef DEBUG_SEAL_OPS
+ printk("TPM_Seal rc=%d encDataSize=%d sealInfoSize=%d\n", rc, out.encDataSize, out.sealInfoSize);
+#endif
+ if (!rc)
+ memcpy(dst->sealed_data, out.encData, 256);
+
+#ifdef DEBUG_SEAL_OPS
+ uint8_t buf[512];
+ uint8_t *start = buf;
+ uint8_t *end = pack_TPM_STORED_DATA(buf, &out);
+ printk("stored_data:");
+ while (start != end) {
+ printk(" %02x", *start);
+ start++;
+ }
+ printk("\n");
+#endif
+
+ free_TPM_STORED_DATA(&out);
+ return rc;
+}
+
+int TPM_disk_unseal(void *dst, size_t size, const struct disk_seal_entry *src)
+{
+ uint32_t rc;
+ TPM_STORED_DATA in;
+ TPM_AUTH_SESSION oiap = TPM_AUTH_SESSION_INIT;
+ TPM_AUTHDATA auth;
+ uint32_t outSize = 0;
+ uint8_t *out = NULL;
+
+ rc = TPM_OIAP(&oiap);
+ if (rc) abort();
+
+ memset(auth, 0, 20);
+
+ in.ver = TPM_STRUCT_VER_1_1;
+ in.sealInfoSize = 45;
+ in.sealInfo.pcrSelection.sizeOfSelect = 3;
+ in.sealInfo.pcrSelection.pcrSelect = (void*)&src->pcr_selection;
+ memcpy(&in.sealInfo.digestAtCreation, &src->digest_at_seal, 20);
+ memcpy(&in.sealInfo.digestAtRelease, &src->digest_release, 20);
+ in.encDataSize = 256;
+ in.encData = (void*)src->sealed_data;
+
+#ifdef DEBUG_SEAL_OPS
+ uint8_t buf[512];
+ uint8_t *start = buf;
+ uint8_t *end = pack_TPM_STORED_DATA(buf, &in);
+ printk("stored_data:");
+ while (start != end) {
+ printk(" %02x", *start);
+ start++;
+ }
+ printk("\n");
+#endif
+
+ rc = TPM_Unseal(TPM_SRK_KEYHANDLE, &in, &outSize, &out,
+ (void*)&vtpm_globals.srk_auth, (void*)&auth, &vtpm_globals.oiap, &oiap);
+
+ TPM_TerminateHandle(oiap.AuthHandle);
+
+#ifdef DEBUG_SEAL_OPS
+ printk("TPM_Unseal rc=%d outSize=%d size=%d\n", rc, outSize, size);
+#endif
+ if (!rc) {
+ memcpy(dst, out, size);
+#ifdef DEBUG_SEAL_OPS
+ printk("unsealed:");
+ int i;
+ for(i=0; i < size; i++)
+ printk(" %02x", ((uint8_t*)dst)[i]);
+ printk("\n");
+#endif
+ }
+
+ free(out);
+
+ return rc;
+}
+
+int TPM_disk_nvalloc(be32_t *nvram_slot, struct tpm_authdata auth)
+{
+ // TODO-3
+ nvram_slot->value = 0;
+ return 0;
+}
+
+int TPM_disk_nvread(void *buf, size_t bufsiz, be32_t nvram_slot, struct tpm_authdata auth)
+{
+ // TODO-3
+ memset(buf, 0, bufsiz);
+ return 0;
+}
+
+int TPM_disk_nvwrite(void *buf, size_t bufsiz, be32_t nvram_slot, struct tpm_authdata auth)
+{
+ // TODO-3
+ return 0;
+}
+
+int TPM_disk_nvchange(be32_t nvram_slot, struct tpm_authdata old, struct tpm_authdata noo)
+{
+ // TODO-3
+ return 0;
+}
+
+int TPM_disk_alloc_counter(be32_t *slot, struct tpm_authdata auth, be32_t *value)
+{
+ // TODO-3
+ slot->value = 0;
+ value->value = 0;
+ return 0;
+}
+
+int TPM_disk_check_counter(be32_t slot, struct tpm_authdata auth, be32_t value)
+{
+ // TODO-3
+ return 0;
+}
+
+int TPM_disk_incr_counter(be32_t slot, struct tpm_authdata auth)
+{
+ // TODO-3
+ return 0;
+}
+
+int TPM_disk_change_counter(be32_t slot, struct tpm_authdata old, struct tpm_authdata noo)
+{
+ // TODO-3
+ return 0;
+}
--- /dev/null
+#ifndef __VTPMMGR_DISK_VTPM_H
+#define __VTPMMGR_DISK_VTPM_H
+#include "vtpm_disk.h"
+
+/* Read PCR values to determine which unseal to try */
+void TPM_read_pcrs(void);
+void TPM_pcr_digest(struct hash160 *buf, le32_t selection);
+
+/* Sealing for key storage */
+int TPM_disk_seal(struct disk_seal_entry *dst, const void* src, size_t size);
+int TPM_disk_unseal(void *dst, size_t size, const struct disk_seal_entry *src);
+
+/* NVRAM to allow revocation of TM-KEY */
+int TPM_disk_nvalloc(be32_t *nvram_slot, struct tpm_authdata auth);
+int TPM_disk_nvread(void *buf, size_t bufsiz, be32_t nvram_slot, struct tpm_authdata auth);
+int TPM_disk_nvwrite(void *buf, size_t bufsiz, be32_t nvram_slot, struct tpm_authdata auth);
+int TPM_disk_nvchange(be32_t nvram_slot, struct tpm_authdata old, struct tpm_authdata noo);
+
+/* Monotonic counters to detect rollback */
+int TPM_disk_alloc_counter(be32_t *slot, struct tpm_authdata auth, be32_t *value);
+int TPM_disk_check_counter(be32_t slot, struct tpm_authdata auth, be32_t value);
+int TPM_disk_incr_counter(be32_t slot, struct tpm_authdata auth);
+int TPM_disk_change_counter(be32_t slot, struct tpm_authdata old, struct tpm_authdata noo);
+
+#endif
--- /dev/null
+#include <console.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <mini-os/byteorder.h>
+
+#include "vtpm_manager.h"
+#include "log.h"
+#include "uuid.h"
+
+#include "vtpmmgr.h"
+#include "vtpm_disk.h"
+#include "disk_tpm.h"
+#include "disk_io.h"
+#include "disk_crypto.h"
+#include "disk_format.h"
+#include "mgmt_authority.h"
+
+static void disk_write_crypt_sector(sector_t *dst, const void *data, size_t size, const struct mem_tpm_mgr *mgr)
+{
+ struct disk_crypt_sector_plain *sector = disk_write_buf();
+ *dst = disk_find_free(mgr);
+ aes_encrypt_ctr(sector->iv_data, sizeof(sector->iv_data), data, size, &mgr->tm_key_e);
+ aes_cmac(§or->mac, sector->data, sizeof(sector->data), &mgr->tm_key_e);
+ disk_write_sector(*dst, sector, sizeof(*sector));
+}
+
+/*
+ * Mark unchanged sectors on disk as being used
+ */
+static void disk_populate_used_vtpm(const struct mem_vtpm_page *src, const struct mem_tpm_mgr *mgr)
+{
+ if (be32_native(src->disk_loc) != 0)
+ disk_set_used(src->disk_loc, mgr);
+}
+
+/*
+ * Write out a vTPM page to disk, doing nothing if the existing copy is valid
+ */
+static void disk_write_vtpm_page(struct mem_vtpm_page *dst, const aes_context *auth_key,
+ const struct mem_tpm_mgr *mgr)
+{
+ struct disk_vtpm_sector pt;
+ int i;
+ memset(&pt, 0, sizeof(pt));
+ if (be32_native(dst->disk_loc) != 0)
+ return;
+
+ for(i=0; i < dst->size; i++) {
+ memcpy(pt.header[i].uuid, dst->vtpms[i]->uuid, 16);
+ memcpy(pt.data[i].data, dst->vtpms[i]->data, 64);
+ pt.header[i].flags = native_be32(dst->vtpms[i]->flags & VTPM_FLAG_DISK_MASK);
+ }
+ aes_encrypt_ctr(&pt.iv, sizeof(pt.data) + 16, &pt.data, sizeof(pt.data), auth_key);
+
+ sha256(&dst->disk_hash, &pt, sizeof(pt));
+
+ disk_write_crypt_sector(&dst->disk_loc, &pt, sizeof(pt), mgr);
+}
+
+/*
+ * Generate TPM seal blobs for a group's keys; do nothing if existing copy is valid
+ */
+static void generate_group_seals(struct mem_group *src, const struct mem_tpm_mgr *parent)
+{
+ int i;
+ struct disk_group_sealed_data sblob;
+
+ // previous seals are still valid, skip talking to the TPM
+ if (src->flags & MEM_GROUP_FLAG_SEAL_VALID)
+ return;
+
+ memcpy(&sblob.magic, DISK_GROUP_BOUND_MAGIC, 4);
+ memcpy(sblob.tpm_manager_uuid, parent->uuid, 16);
+ memcpy(&sblob.aik_authdata, &src->aik_authdata, 20);
+ memcpy(&sblob.group_key, &src->group_key, 16);
+ memcpy(&sblob.rollback_mac_key, &src->rollback_mac_key, 16);
+
+ /* TODO support for more than NR_SEALS_PER_GROUP seals */
+ if (src->nr_seals > NR_SEALS_PER_GROUP)
+ abort();
+
+ for(i=0; i < src->nr_seals; i++) {
+ struct disk_seal_entry *dst = &src->seal_bits.entry[i];
+ dst->pcr_selection = src->seals[i].pcr_selection;
+ memcpy(&dst->digest_release, &src->seals[i].digest_release, 20);
+ TPM_pcr_digest(&dst->digest_at_seal, dst->pcr_selection);
+ TPM_disk_seal(dst, &sblob, sizeof(sblob));
+ }
+ src->seal_bits.nr_cfgs = native_be32(src->nr_seals);
+
+ src->flags |= MEM_GROUP_FLAG_SEAL_VALID;
+}
+
+/*
+ * Mark unchanged sectors on disk as being used
+ */
+static void disk_populate_used_group(const struct mem_group_hdr *src, const struct mem_tpm_mgr *mgr)
+{
+ int i;
+ struct mem_group *group = src->v;
+ if (be32_native(src->disk_loc) != 0) {
+ // entire group is unchanged - mark group, itree, and vtpm sectors
+ // TODO mark other children (seal)
+ disk_set_used(src->disk_loc, mgr);
+ for(i = 0; i < src->disk_nr_inuse; i++)
+ disk_set_used(src->disk_inuse[i], mgr);
+ return;
+ }
+
+ // unopened groups should never have been invalidated
+ if (!group)
+ abort();
+
+ for (i = 0; i < group->nr_pages; i++)
+ disk_populate_used_vtpm(&group->data[i], mgr);
+}
+
+static void disk_write_vtpm_itree(struct mem_group_hdr *hdr, int base, int nr_entries,
+ struct hash256 *hash, sector_t *loc, int hsize,
+ const aes_context *group_key, const struct mem_tpm_mgr *mgr);
+
+static void disk_write_vtpm_itree(struct mem_group_hdr *hdr, int base, int nr_entries,
+ struct hash256 *hash, sector_t *loc, int hsize,
+ const aes_context *group_key, const struct mem_tpm_mgr *mgr)
+{
+ int i, incr = 1, inuse_base, lsize;
+
+ while (nr_entries > incr * hsize)
+ incr *= NR_ENTRIES_PER_ITREE;
+
+ if (nr_entries <= hsize) {
+ struct mem_group *group = hdr->v;
+ for (i = 0; i < nr_entries; i++) {
+ struct mem_vtpm_page *page = group->data + base + i;
+ disk_write_vtpm_page(page, group_key, mgr);
+ loc[i] = page->disk_loc;
+ hash[i] = page->disk_hash;
+ }
+ } else {
+ for (i = 0; i * incr < nr_entries; i++) {
+ struct disk_itree_sector pt;
+ int child_entries = incr;
+
+ // the last sector is not completely full
+ if (nr_entries - i * incr < incr)
+ child_entries = nr_entries - i * incr;
+
+ disk_write_vtpm_itree(hdr, base, child_entries, pt.hash, pt.location,
+ NR_ENTRIES_PER_ITREE, group_key, mgr);
+
+ sha256(&hash[i], &pt.hash, sizeof(pt.hash));
+ disk_write_crypt_sector(&loc[i], &pt, sizeof(pt), mgr);
+
+ base += incr;
+ }
+ }
+
+ // save the list of used sectors (itree and vtpm) in the header
+ inuse_base = hdr->disk_nr_inuse;
+ lsize = 1 + (nr_entries - 1) / incr;
+ hdr->disk_nr_inuse += lsize;
+ hdr->disk_inuse = realloc(hdr->disk_inuse, hdr->disk_nr_inuse * sizeof(sector_t));
+ memcpy(&hdr->disk_inuse[inuse_base], loc, lsize * sizeof(sector_t));
+}
+
+/*
+ * Write out a vTPM group sector and its children
+ */
+static void disk_write_group_sector(struct mem_group_hdr *src,
+ const struct mem_tpm_mgr *mgr)
+{
+ struct disk_group_sector disk;
+ struct mem_group *group = src->v;
+ aes_context key_e;
+
+ /* Don't write if the data hasn't changed */
+ if (be32_native(src->disk_loc) != 0)
+ return;
+
+ // if the group was not opened, it should not have been changed
+ if (!group)
+ abort();
+
+ memset(&disk, 0, sizeof(disk));
+ memcpy(&disk.v.id_data, &group->id_data, sizeof(disk.v.id_data));
+ memcpy(&disk.v.details, &group->details, sizeof(disk.v.details));
+
+ aes_setup(&key_e, &group->group_key);
+
+ disk.v.nr_vtpms = native_be32(group->nr_vtpms);
+
+ // regenerated
+ src->disk_nr_inuse = 0;
+
+ disk_write_vtpm_itree(src, 0, group->nr_pages, disk.v.vtpm_hash, disk.vtpm_location,
+ NR_ENTRIES_PER_GROUP_BASE, &key_e, mgr);
+
+ generate_group_seals(group, mgr);
+ memcpy(&disk.v.boot_configs, &group->seal_bits, sizeof(group->seal_bits));
+
+ aes_cmac(&disk.group_mac, &disk.v, sizeof(disk.v), &key_e);
+ sha256(&src->disk_hash, &disk.v, sizeof(disk.v) + sizeof(disk.group_mac));
+ disk_write_crypt_sector(&src->disk_loc, &disk, sizeof(disk), mgr);
+}
+
+/*
+ * Write TPM seal blobs for the manager's keys, using the given group's list
+ * of valid configurations
+ */
+static void disk_write_seal_list(struct mem_tpm_mgr *mgr, struct mem_group *group)
+{
+ int i;
+ struct disk_seal_list *seal = disk_write_buf();
+ struct disk_root_sealed_data sblob;
+
+ if (mgr->root_seals_valid & (1 + mgr->active_root))
+ return;
+
+ memcpy(&sblob.magic, DISK_ROOT_BOUND_MAGIC, 4);
+ memcpy(sblob.tpm_manager_uuid, mgr->uuid, 16);
+ memcpy(&sblob.nvram_slot, &mgr->nvram_slot, 4);
+ memcpy(&sblob.nvram_auth, &mgr->nvram_auth, 20);
+ memcpy(&sblob.counter_index, &mgr->counter_index, 4);
+ memcpy(&sblob.counter_auth, &mgr->counter_auth, 20);
+
+ // TODO when an NV slot in the physical TPM is used to populate nv_key,
+ // that value should be used to mask the master key so that the value
+ // can be changed to revoke old disk state
+#if 0
+ aes_encrypt_one(&sblob.tm_key, &mgr->tm_key, &mgr->nv_key);
+#else
+ memcpy(&sblob.tm_key, &mgr->tm_key, 16);
+#endif
+
+ memset(seal, 0, sizeof(*seal));
+ seal->length = native_be32(group->nr_seals);
+
+ // TODO support for more entries
+ if (group->nr_seals > SEALS_PER_ROOT_SEAL_LIST)
+ abort();
+
+ for(i=0; i < group->nr_seals; i++) {
+ struct mem_seal *src = &group->seals[i];
+ struct disk_seal_entry *dst = &seal->entry[i];
+ dst->pcr_selection = src->pcr_selection;
+ memcpy(&dst->digest_release, &src->digest_release, 20);
+ TPM_pcr_digest(&dst->digest_at_seal, dst->pcr_selection);
+
+ TPM_disk_seal(dst, &sblob, sizeof(sblob));
+ }
+
+ memcpy(seal->hdr.magic, TPM_MGR_MAGIC, 12);
+ seal->hdr.version = native_be32(TPM_MGR_VERSION);
+
+ disk_write_sector(seal_loc(mgr), seal, sizeof(*seal));
+ mgr->root_seals_valid |= 1 + mgr->active_root;
+}
+
+/*
+ * Mark unchanged sectors on disk as being used
+ */
+static void disk_populate_used_mgr(const struct mem_tpm_mgr *mgr)
+{
+ int i;
+
+ // TODO walk the linked lists for seals, rb_macs here (when supported)
+
+ for(i=0; i < mgr->nr_groups; i++)
+ disk_populate_used_group(&mgr->groups[i], mgr);
+}
+
+static void disk_write_group_itree(struct mem_tpm_mgr *mgr, int base, int nr_entries,
+ struct hash256 *hash, sector_t *loc, int hsize);
+
+static void disk_write_group_itree(struct mem_tpm_mgr *mgr, int base, int nr_entries,
+ struct hash256 *hash, sector_t *loc, int hsize)
+{
+ int i, incr = 1;
+
+ if (nr_entries <= hsize) {
+ for(i=0; i < mgr->nr_groups; i++) {
+ struct mem_group_hdr *group = mgr->groups + base + i;
+ disk_write_group_sector(group, mgr);
+ loc[i] = group->disk_loc;
+ hash[i] = group->disk_hash;
+ }
+ return;
+ }
+
+ while (nr_entries > incr * hsize)
+ incr *= NR_ENTRIES_PER_ITREE;
+
+ for (i = 0; i * incr < nr_entries; i++) {
+ struct disk_itree_sector pt;
+ int child_entries = incr;
+
+ // the last sector is not completely full
+ if (nr_entries - i * incr < incr)
+ child_entries = nr_entries - i * incr;
+
+ disk_write_group_itree(mgr, base, child_entries, pt.hash, pt.location, NR_ENTRIES_PER_ITREE);
+
+ sha256(&hash[i], &pt.hash, sizeof(pt.hash));
+ disk_write_crypt_sector(&loc[i], &pt, sizeof(pt), mgr);
+
+ base += incr;
+ }
+}
+
+/*
+ * Write out the root TPM Manager sector and its children
+ */
+static void disk_write_root_sector(struct mem_tpm_mgr *mgr)
+{
+ int i, j;
+ struct disk_root_sector root;
+ memset(&root, 0, sizeof(root));
+ root.v.sequence = native_be64(mgr->sequence);
+ root.v.tpm_counter_value = mgr->counter_value;
+
+ root.v.nr_groups = native_be32(mgr->nr_groups);
+
+ disk_write_group_itree(mgr, 0, mgr->nr_groups, root.v.group_hash, root.group_loc, NR_ENTRIES_PER_ROOT);
+
+ i = 0;
+ j = 0;
+ while (i < mgr->nr_groups) {
+ aes_context key_e;
+ struct mem_group_hdr *group = &mgr->groups[i];
+ struct mem_group *groupv = group->v;
+
+ if (!groupv) {
+ i++;
+ continue;
+ }
+ if (groupv->details.flags.value & FLAG_ROLLBACK_DETECTED) {
+ i++;
+ continue;
+ }
+ if (j >= NR_RB_MACS_PER_ROOT)
+ break; // TODO support for nr_rb_macs > 128
+
+ aes_setup(&key_e, &groupv->rollback_mac_key);
+ root.rb_macs[j].id = native_be32(i);
+ aes_cmac(&root.rb_macs[j].mac, &root.v, sizeof(root.v), &key_e);
+ i++; j++;
+ }
+ root.nr_rb_macs = native_be32(j);
+
+ struct disk_crypt_sector_plain *root_sect = disk_write_buf();
+ aes_encrypt_ctr(root_sect->iv_data, sizeof(root_sect->iv_data), &root, sizeof(root), &mgr->tm_key_e);
+ aes_cmac(&root_sect->mac, &root_sect->data, sizeof(root_sect->data), &mgr->tm_key_e);
+ disk_write_sector(root_loc(mgr), root_sect, sizeof(*root_sect));
+}
+
+/*
+ * Write out changes to disk
+ */
+void disk_write_all(struct mem_tpm_mgr *mgr)
+{
+ disk_flush_slot(mgr);
+ disk_populate_used_mgr(mgr);
+ disk_write_root_sector(mgr);
+
+ disk_write_seal_list(mgr, mgr->groups[0].v);
+
+ disk_write_barrier();
+}
+
+/*
+ * Create a new (blank) TPM Manager disk image.
+ *
+ * Does not actually write anything to disk.
+ */
+int vtpm_new_disk(void)
+{
+ int rc;
+ struct mem_tpm_mgr *mgr = calloc(1, sizeof(*mgr));
+
+ do_random(mgr->uuid, 16);
+ do_random(&mgr->tm_key, 16);
+ do_random(&mgr->nvram_auth, 20);
+ do_random(&mgr->counter_auth, 20);
+ do_random(&mgr->nv_key, 16);
+
+ aes_setup(&mgr->tm_key_e, &mgr->tm_key);
+
+ // TODO postpone these allocs until first write?
+ rc = TPM_disk_nvalloc(&mgr->nvram_slot, mgr->nvram_auth);
+ if (rc)
+ return rc;
+
+ rc = TPM_disk_alloc_counter(&mgr->counter_index, mgr->counter_auth, &mgr->counter_value);
+ if (rc)
+ return rc;
+
+ mgr->nr_groups = 1;
+ mgr->groups = calloc(1, sizeof(mgr->groups[0]));
+ mgr->groups[0].v = vtpm_new_group(NULL);
+
+ TPM_disk_nvwrite(&mgr->nv_key, 16, mgr->nvram_slot, mgr->nvram_auth);
+
+ g_mgr = mgr;
+
+ return 0;
+}
--- /dev/null
+#ifndef __VTPMMGR_ENDIAN_INT_H
+#define __VTPMMGR_ENDIAN_INT_H
+
+#include <mini-os/byteorder.h>
+
+/* These wrapper structs force the use of endian-to-CPU conversions */
+
+typedef struct be_int16 {
+ uint16_t value;
+} be16_t;
+
+typedef struct be_int32 {
+ uint32_t value;
+} be32_t;
+
+typedef struct le_int32 {
+ uint32_t value;
+} le32_t;
+
+typedef struct be_int64 {
+ uint64_t value;
+} be64_t;
+
+static inline uint16_t be16_native(be16_t v)
+{
+ return be16_to_cpu(v.value);
+}
+
+static inline uint32_t le32_native(le32_t v)
+{
+ return le32_to_cpu(v.value);
+}
+
+static inline uint32_t be32_native(be32_t v)
+{
+ return be32_to_cpu(v.value);
+}
+
+static inline uint64_t be64_native(be64_t v)
+{
+ return be64_to_cpu(v.value);
+}
+
+static inline be16_t native_be16(uint16_t v)
+{
+ be16_t rv;
+ rv.value = cpu_to_be16(v);
+ return rv;
+}
+
+static inline le32_t native_le32(uint32_t v)
+{
+ le32_t rv;
+ rv.value = cpu_to_le32(v);
+ return rv;
+}
+
+static inline be32_t native_be32(uint32_t v)
+{
+ be32_t rv;
+ rv.value = cpu_to_be32(v);
+ return rv;
+}
+
+static inline be64_t native_be64(uint64_t v)
+{
+ be64_t rv;
+ rv.value = cpu_to_be64(v);
+ return rv;
+}
+
+#endif
#include "log.h"
#include "vtpmmgr.h"
-#include "vtpm_storage.h"
+#include "vtpm_disk.h"
#include "tpm.h"
#include "marshal.h"
};
// --------------------------- Well Known Auths --------------------------
-const TPM_AUTHDATA WELLKNOWN_SRK_AUTH = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+const TPM_AUTHDATA WELLKNOWN_AUTH = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-const TPM_AUTHDATA WELLKNOWN_OWNER_AUTH = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
struct vtpm_globals vtpm_globals = {
.tpm_fd = -1,
- .storage_key = TPM_KEY_INIT,
- .storage_key_handle = 0,
.oiap = { .AuthHandle = 0 }
};
return status;
}
-static void init_storage_key(TPM_KEY* key) {
- key->ver.major = 1;
- key->ver.minor = 1;
- key->ver.revMajor = 0;
- key->ver.revMinor = 0;
-
- key->keyUsage = TPM_KEY_BIND;
- key->keyFlags = 0;
- key->authDataUsage = TPM_AUTH_ALWAYS;
-
- TPM_KEY_PARMS* p = &key->algorithmParms;
- p->algorithmID = TPM_ALG_RSA;
- p->encScheme = TPM_ES_RSAESOAEP_SHA1_MGF1;
- p->sigScheme = TPM_SS_NONE;
- p->parmSize = 12;
-
- TPM_RSA_KEY_PARMS* r = &p->parms.rsa;
- r->keyLength = RSA_KEY_SIZE;
- r->numPrimes = 2;
- r->exponentSize = 0;
- r->exponent = NULL;
-
- key->PCRInfoSize = 0;
- key->encDataSize = 0;
- key->encData = NULL;
-}
-
-static int parse_auth_string(char* authstr, BYTE* target, const TPM_AUTHDATA wellknown, int allowrandom) {
+static int parse_auth_string(char* authstr, BYTE* target) {
int rc;
/* well known owner auth */
if(!strcmp(authstr, "well-known")) {
- memcpy(target, wellknown, sizeof(TPM_AUTHDATA));
- }
- /* Create a randomly generated owner auth */
- else if(allowrandom && !strcmp(authstr, "random")) {
- return 1;
+ return 0;
}
/* owner auth is a raw hash */
else if(!strncmp(authstr, "hash:", 5)) {
int i;
//Set defaults
- memcpy(vtpm_globals.owner_auth, WELLKNOWN_OWNER_AUTH, sizeof(TPM_AUTHDATA));
- memcpy(vtpm_globals.srk_auth, WELLKNOWN_SRK_AUTH, sizeof(TPM_AUTHDATA));
+ memcpy(vtpm_globals.owner_auth, WELLKNOWN_AUTH, sizeof(TPM_AUTHDATA));
+ memcpy(vtpm_globals.srk_auth, WELLKNOWN_AUTH, sizeof(TPM_AUTHDATA));
for(i = 1; i < argc; ++i) {
if(!strncmp(argv[i], "owner_auth:", 10)) {
- if((rc = parse_auth_string(argv[i] + 10, vtpm_globals.owner_auth, WELLKNOWN_OWNER_AUTH, 1)) < 0) {
+ if((rc = parse_auth_string(argv[i] + 10, vtpm_globals.owner_auth)) < 0) {
goto err_invalid;
}
if(rc == 1) {
}
}
else if(!strncmp(argv[i], "srk_auth:", 8)) {
- if((rc = parse_auth_string(argv[i] + 8, vtpm_globals.srk_auth, WELLKNOWN_SRK_AUTH, 0)) != 0) {
+ if((rc = parse_auth_string(argv[i] + 8, vtpm_globals.srk_auth)) != 0) {
goto err_invalid;
}
}
TPMTRYRETURN(try_take_ownership());
// Generate storage key's auth
- memset(&vtpm_globals.storage_key_usage_auth, 0, sizeof(TPM_AUTHDATA));
-
TPMTRYRETURN( TPM_OSAP(
TPM_ET_KEYHANDLE,
TPM_SRK_KEYHANDLE,
&sharedsecret,
&osap) );
- init_storage_key(&vtpm_globals.storage_key);
-
- //initialize the storage key
- TPMTRYRETURN( TPM_CreateWrapKey(
- TPM_SRK_KEYHANDLE,
- (const TPM_AUTHDATA*)&sharedsecret,
- (const TPM_AUTHDATA*)&vtpm_globals.storage_key_usage_auth,
- (const TPM_AUTHDATA*)&vtpm_globals.storage_key_usage_auth,
- &vtpm_globals.storage_key,
- &osap) );
-
- //Load Storage Key
- TPMTRYRETURN( TPM_LoadKey(
- TPM_SRK_KEYHANDLE,
- &vtpm_globals.storage_key,
- &vtpm_globals.storage_key_handle,
- (const TPM_AUTHDATA*) &vtpm_globals.srk_auth,
- &vtpm_globals.oiap));
-
//Make sure TPM has commited changes
TPMTRYRETURN( TPM_SaveState() );
//Create new disk image
- TPMTRYRETURN(vtpm_storage_new_header());
+ TPMTRYRETURN(vtpm_new_disk());
goto egress;
abort_egress:
return status;
}
-/* Set up the opaque field to contain a pointer to the UUID */
-static void set_opaque_to_uuid(domid_t domid, unsigned int handle)
+static void set_opaque(domid_t domid, unsigned int handle)
+{
+ struct tpm_opaque* opq;
+
+ opq = calloc(1, sizeof(*opq));
+ opq->uuid = (uuid_t*)tpmback_get_uuid(domid, handle);
+ tpmback_set_opaque(domid, handle, opq);
+}
+
+static void free_opaque(domid_t domid, unsigned int handle)
{
- tpmback_set_opaque(domid, handle, tpmback_get_uuid(domid, handle));
+ struct tpm_opaque* opq = tpmback_get_opaque(domid, handle);
+ if (opq && opq->vtpm)
+ opq->vtpm->flags &= ~VTPM_FLAG_OPEN;
+ free(opq);
}
TPM_RESULT vtpmmgr_init(int argc, char** argv) {
}
//Setup tpmback device
- init_tpmback(set_opaque_to_uuid, NULL);
+ init_tpmback(set_opaque, free_opaque);
//Setup tpm access
switch(opts.tpmdriver) {
TPMTRYRETURN( TPM_OIAP(&vtpm_globals.oiap) );
/* Load the Manager data, if it fails create a new manager */
- if (vtpm_storage_load_header() != TPM_SUCCESS) {
+ // TODO handle upgrade recovery of auth0
+ if (vtpm_load_disk()) {
/* If the OIAP session was closed by an error, create a new one */
if(vtpm_globals.oiap.AuthHandle == 0) {
TPMTRYRETURN( TPM_OIAP(&vtpm_globals.oiap) );
void vtpmmgr_shutdown(void)
{
- /* Cleanup resources */
- free_TPM_KEY(&vtpm_globals.storage_key);
-
/* Cleanup TPM resources */
- TPM_EvictKey(vtpm_globals.storage_key_handle);
TPM_TerminateHandle(vtpm_globals.oiap.AuthHandle);
/* Close tpmback */
shutdown_tpmback();
- /* Close the storage system and blkfront */
- vtpm_storage_shutdown();
-
/* Close tpmfront/tpm_tis */
close(vtpm_globals.tpm_fd);
#define SETBIT(num,idx) (num) |= BITMASK(idx)
#define CLEARBIT(num,idx) (num) &= ( ~ BITMASK(idx) )
+void printk(const char *fmt, ...);
+
#define vtpmloginfo(module, fmt, args...) \
if (GETBIT (LOGGING_MODULES, module) == 1) { \
- fprintf (stdout, "INFO[%s]: " fmt, module_names[module], ##args); \
+ printk("INFO[%s]: " fmt, module_names[module], ##args); \
}
#define vtpmloginfomore(module, fmt, args...) \
if (GETBIT (LOGGING_MODULES, module) == 1) { \
- fprintf (stdout, fmt,##args); \
+ printk(fmt,##args); \
}
#define vtpmlogerror(module, fmt, args...) \
- fprintf (stderr, "ERROR[%s]: " fmt, module_names[module], ##args);
+ printk("ERROR[%s]: " fmt, module_names[module], ##args);
//typedef UINT32 tpm_size_t;
#include "tcg.h"
typedef enum UnpackPtr {
- UNPACK_ALIAS,
- UNPACK_ALLOC
+ UNPACK_ALIAS,
+ UNPACK_ALLOC
} UnpackPtr;
inline BYTE* pack_BYTE(BYTE* ptr, BYTE t) {
- ptr[0] = t;
- return ++ptr;
+ ptr[0] = t;
+ return ++ptr;
}
inline BYTE* unpack_BYTE(BYTE* ptr, BYTE* t) {
- t[0] = ptr[0];
- return ++ptr;
+ t[0] = ptr[0];
+ return ++ptr;
}
+inline int unpack3_BYTE(BYTE* ptr, UINT32* pos, UINT32 max, BYTE *t)
+{
+ if (*pos + 1 > max)
+ return TPM_SIZE;
+ unpack_BYTE(ptr + *pos, t);
+ *pos += 1;
+ return 0;
+}
+
+
#define pack_BOOL(p, t) pack_BYTE(p, t)
#define unpack_BOOL(p, t) unpack_BYTE(p, t)
+#define unpack3_BOOL(p, x, m, t) unpack3_BYTE(p, x, m, t)
+#define sizeof_BOOL(t) 1
-inline BYTE* pack_UINT16(BYTE* ptr, UINT16 t) {
- BYTE* b = (BYTE*)&t;
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- ptr[0] = b[1];
- ptr[1] = b[0];
-#elif __BYTE_ORDER == __BIG_ENDIAN
- ptr[0] = b[0];
- ptr[1] = b[1];
-#endif
- return ptr + sizeof(UINT16);
+inline BYTE* pack_UINT16(void* ptr, UINT16 t) {
+ UINT16* p = ptr;
+ *p = cpu_to_be16(t);
+ return ptr + sizeof(UINT16);
}
-inline BYTE* unpack_UINT16(BYTE* ptr, UINT16* t) {
- BYTE* b = (BYTE*)t;
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- b[0] = ptr[1];
- b[1] = ptr[0];
-#elif __BYTE_ORDER == __BIG_ENDIAN
- b[0] = ptr[0];
- b[1] = ptr[1];
-#endif
- return ptr + sizeof(UINT16);
-}
-
-inline BYTE* pack_UINT32(BYTE* ptr, UINT32 t) {
- BYTE* b = (BYTE*)&t;
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- ptr[3] = b[0];
- ptr[2] = b[1];
- ptr[1] = b[2];
- ptr[0] = b[3];
-#elif __BYTE_ORDER == __BIG_ENDIAN
- ptr[0] = b[0];
- ptr[1] = b[1];
- ptr[2] = b[2];
- ptr[3] = b[3];
-#endif
- return ptr + sizeof(UINT32);
-}
-
-inline BYTE* unpack_UINT32(BYTE* ptr, UINT32* t) {
- BYTE* b = (BYTE*)t;
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- b[0] = ptr[3];
- b[1] = ptr[2];
- b[2] = ptr[1];
- b[3] = ptr[0];
-#elif __BYTE_ORDER == __BIG_ENDIAN
- b[0] = ptr[0];
- b[1] = ptr[1];
- b[2] = ptr[2];
- b[3] = ptr[3];
-#endif
- return ptr + sizeof(UINT32);
+inline BYTE* unpack_UINT16(void* ptr, UINT16* t) {
+ UINT16* p = ptr;
+ *t = be16_to_cpu(*p);
+ return ptr + sizeof(UINT16);
+}
+
+inline int unpack3_UINT16(BYTE* ptr, UINT32* pos, UINT32 max, UINT16 *t)
+{
+ if (*pos + 2 > max)
+ return TPM_SIZE;
+ unpack_UINT16(ptr + *pos, t);
+ *pos += 2;
+ return 0;
+}
+
+inline BYTE* pack_UINT32(void* ptr, UINT32 t) {
+ UINT32* p = ptr;
+ *p = cpu_to_be32(t);
+ return ptr + sizeof(UINT32);
+}
+
+inline BYTE* unpack_UINT32(void* ptr, UINT32* t) {
+ UINT32* p = ptr;
+ *t = be32_to_cpu(*p);
+ return ptr + sizeof(UINT32);
+}
+
+inline int unpack3_UINT32(BYTE* ptr, UINT32* pos, UINT32 max, UINT32 *t)
+{
+ if (*pos + 4 > max)
+ return TPM_SIZE;
+ unpack_UINT32(ptr + *pos, t);
+ *pos += 4;
+ return 0;
}
+#define sizeof_BYTE(x) 1
+#define sizeof_UINT16(x) 2
+#define sizeof_UINT32(x) 4
+
#define pack_TPM_RESULT(p, t) pack_UINT32(p, t)
#define pack_TPM_PCRINDEX(p, t) pack_UINT32(p, t)
#define pack_TPM_DIRINDEX(p, t) pack_UINT32(p, t)
#define unpack_TPM_PHYSICAL_PRESENCE(p, t) unpack_UINT16(p, t)
#define unpack_TPM_KEY_FLAGS(p, t) unpack_UINT32(p, t)
-#define pack_TPM_AUTH_HANDLE(p, t) pack_UINT32(p, t);
-#define pack_TCS_CONTEXT_HANDLE(p, t) pack_UINT32(p, t);
-#define pack_TCS_KEY_HANDLE(p, t) pack_UINT32(p, t);
+#define unpack3_TPM_RESULT(p, l, m, t) unpack3_UINT32(p, l, m, t)
+#define unpack3_TPM_PCRINDEX(p, l, m, t) unpack3_UINT32(p, l, m, t)
+#define unpack3_TPM_DIRINDEX(p, l, m, t) unpack3_UINT32(p, l, m, t)
+#define unpack3_TPM_HANDLE(p, l, m, t) unpack3_UINT32(p, l, m, t)
+#define unpack3_TPM_AUTHHANDLE(p, l, m, t) unpack3_TPM_HANDLE(p, l, m, t)
+#define unpack3_TCPA_HASHHANDLE(p, l, m, t) unpack3_TPM_HANDLE(p, l, m, t)
+#define unpack3_TCPA_HMACHANDLE(p, l, m, t) unpack3_TPM_HANDLE(p, l, m, t)
+#define unpack3_TCPA_ENCHANDLE(p, l, m, t) unpack3_TPM_HANDLE(p, l, m, t)
+#define unpack3_TPM_KEY_HANDLE(p, l, m, t) unpack3_TPM_HANDLE(p, l, m, t)
+#define unpack3_TCPA_ENTITYHANDLE(p, l, m, t) unpack3_TPM_HANDLE(p, l, m, t)
+#define unpack3_TPM_RESOURCE_TYPE(p, l, m, t) unpack3_UINT32(p, l, m, t)
+#define unpack3_TPM_COMMAND_CODE(p, l, m, t) unpack3_UINT32(p, l, m, t)
+#define unpack3_TPM_PROTOCOL_ID(p, l, m, t) unpack3_UINT16(p, l, m, t)
+#define unpack3_TPM_AUTH_DATA_USAGE(p, l, m, t) unpack3_BYTE(p, l, m, t)
+#define unpack3_TPM_ENTITY_TYPE(p, l, m, t) unpack3_UINT16(p, l, m, t)
+#define unpack3_TPM_ALGORITHM_ID(p, l, m, t) unpack3_UINT32(p, l, m, t)
+#define unpack3_TPM_KEY_USAGE(p, l, m, t) unpack3_UINT16(p, l, m, t)
+#define unpack3_TPM_STARTUP_TYPE(p, l, m, t) unpack3_UINT16(p, l, m, t)
+#define unpack3_TPM_CAPABILITY_AREA(p, l, m, t) unpack3_UINT32(p, l, m, t)
+#define unpack3_TPM_ENC_SCHEME(p, l, m, t) unpack3_UINT16(p, l, m, t)
+#define unpack3_TPM_SIG_SCHEME(p, l, m, t) unpack3_UINT16(p, l, m, t)
+#define unpack3_TPM_MIGRATE_SCHEME(p, l, m, t) unpack3_UINT16(p, l, m, t)
+#define unpack3_TPM_PHYSICAL_PRESENCE(p, l, m, t) unpack3_UINT16(p, l, m, t)
+#define unpack3_TPM_KEY_FLAGS(p, l, m, t) unpack3_UINT32(p, l, m, t)
+
+#define sizeof_TPM_RESULT(t) sizeof_UINT32(t)
+#define sizeof_TPM_PCRINDEX(t) sizeof_UINT32(t)
+#define sizeof_TPM_DIRINDEX(t) sizeof_UINT32(t)
+#define sizeof_TPM_HANDLE(t) sizeof_UINT32(t)
+#define sizeof_TPM_AUTHHANDLE(t) sizeof_TPM_HANDLE(t)
+#define sizeof_TCPA_HASHHANDLE(t) sizeof_TPM_HANDLE(t)
+#define sizeof_TCPA_HMACHANDLE(t) sizeof_TPM_HANDLE(t)
+#define sizeof_TCPA_ENCHANDLE(t) sizeof_TPM_HANDLE(t)
+#define sizeof_TPM_KEY_HANDLE(t) sizeof_TPM_HANDLE(t)
+#define sizeof_TCPA_ENTITYHANDLE(t) sizeof_TPM_HANDLE(t)
+#define sizeof_TPM_RESOURCE_TYPE(t) sizeof_UINT32(t)
+#define sizeof_TPM_COMMAND_CODE(t) sizeof_UINT32(t)
+#define sizeof_TPM_PROTOCOL_ID(t) sizeof_UINT16(t)
+#define sizeof_TPM_AUTH_DATA_USAGE(t) sizeof_BYTE(t)
+#define sizeof_TPM_ENTITY_TYPE(t) sizeof_UINT16(t)
+#define sizeof_TPM_ALGORITHM_ID(t) sizeof_UINT32(t)
+#define sizeof_TPM_KEY_USAGE(t) sizeof_UINT16(t)
+#define sizeof_TPM_STARTUP_TYPE(t) sizeof_UINT16(t)
+#define sizeof_TPM_CAPABILITY_AREA(t) sizeof_UINT32(t)
+#define sizeof_TPM_ENC_SCHEME(t) sizeof_UINT16(t)
+#define sizeof_TPM_SIG_SCHEME(t) sizeof_UINT16(t)
+#define sizeof_TPM_MIGRATE_SCHEME(t) sizeof_UINT16(t)
+#define sizeof_TPM_PHYSICAL_PRESENCE(t) sizeof_UINT16(t)
+#define sizeof_TPM_KEY_FLAGS(t) sizeof_UINT32(t)
+
+#define pack_TPM_AUTH_HANDLE(p, t) pack_UINT32(p, t)
+#define pack_TCS_CONTEXT_HANDLE(p, t) pack_UINT32(p, t)
+#define pack_TCS_KEY_HANDLE(p, t) pack_UINT32(p, t)
+
+#define unpack_TPM_AUTH_HANDLE(p, t) unpack_UINT32(p, t)
+#define unpack_TCS_CONTEXT_HANDLE(p, t) unpack_UINT32(p, t)
+#define unpack_TCS_KEY_HANDLE(p, t) unpack_UINT32(p, t)
+
+#define sizeof_TPM_AUTH_HANDLE(t) sizeof_UINT32(t)
+#define sizeof_TCS_CONTEXT_HANDLE(t) sizeof_UINT32(t)
+#define sizeof_TCS_KEY_HANDLE(t) sizeof_UINT32(t)
-#define unpack_TPM_AUTH_HANDLE(p, t) unpack_UINT32(p, t);
-#define unpack_TCS_CONTEXT_HANDLE(p, t) unpack_UINT32(p, t);
-#define unpack_TCS_KEY_HANDLE(p, t) unpack_UINT32(p, t);
inline BYTE* pack_BUFFER(BYTE* ptr, const BYTE* buf, UINT32 size) {
- memcpy(ptr, buf, size);
- return ptr + size;
+ memcpy(ptr, buf, size);
+ return ptr + size;
}
inline BYTE* unpack_BUFFER(BYTE* ptr, BYTE* buf, UINT32 size) {
- memcpy(buf, ptr, size);
- return ptr + size;
+ memcpy(buf, ptr, size);
+ return ptr + size;
+}
+
+inline int unpack3_BUFFER(BYTE* ptr, UINT32* pos, UINT32 max, BYTE* buf, UINT32 size) {
+ if (*pos + size > max)
+ return TPM_SIZE;
+ memcpy(buf, ptr + *pos, size);
+ *pos += size;
+ return 0;
}
+#define sizeof_BUFFER(b, s) s
+
inline BYTE* unpack_ALIAS(BYTE* ptr, BYTE** buf, UINT32 size) {
- *buf = ptr;
- return ptr + size;
+ *buf = ptr;
+ return ptr + size;
}
inline BYTE* unpack_ALLOC(BYTE* ptr, BYTE** buf, UINT32 size) {
- if(size) {
- *buf = malloc(size);
- memcpy(*buf, ptr, size);
- } else {
- *buf = NULL;
- }
- return ptr + size;
+ if(size) {
+ *buf = malloc(size);
+ memcpy(*buf, ptr, size);
+ } else {
+ *buf = NULL;
+ }
+ return ptr + size;
}
inline BYTE* unpack_PTR(BYTE* ptr, BYTE** buf, UINT32 size, UnpackPtr alloc) {
- if(alloc == UNPACK_ALLOC) {
- return unpack_ALLOC(ptr, buf, size);
- } else {
- return unpack_ALIAS(ptr, buf, size);
- }
-}
+ if(alloc == UNPACK_ALLOC) {
+ return unpack_ALLOC(ptr, buf, size);
+ } else {
+ return unpack_ALIAS(ptr, buf, size);
+ }
+}
+
+inline int unpack3_PTR(BYTE* ptr, UINT32* pos, UINT32 max, BYTE** buf, UINT32 size, UnpackPtr alloc) {
+ if (size > max || *pos + size > max)
+ return TPM_SIZE;
+ if (alloc == UNPACK_ALLOC) {
+ unpack_ALLOC(ptr + *pos, buf, size);
+ } else {
+ unpack_ALIAS(ptr + *pos, buf, size);
+ }
+ *pos += size;
+ return 0;
+}
+#define unpack3_VPTR(ptr, pos, max, buf, size, alloc) unpack3_PTR(ptr, pos, max, (void*)(buf), size, alloc)
inline BYTE* pack_TPM_AUTHDATA(BYTE* ptr, const TPM_AUTHDATA* d) {
- return pack_BUFFER(ptr, *d, TPM_DIGEST_SIZE);
+ return pack_BUFFER(ptr, *d, TPM_DIGEST_SIZE);
}
inline BYTE* unpack_TPM_AUTHDATA(BYTE* ptr, TPM_AUTHDATA* d) {
- return unpack_BUFFER(ptr, *d, TPM_DIGEST_SIZE);
+ return unpack_BUFFER(ptr, *d, TPM_DIGEST_SIZE);
+}
+
+inline int unpack3_TPM_AUTHDATA(BYTE* ptr, UINT32* pos, UINT32 len, TPM_AUTHDATA* d) {
+ return unpack3_BUFFER(ptr, pos, len, *d, TPM_DIGEST_SIZE);
}
+#define sizeof_TPM_AUTHDATA(d) TPM_DIGEST_SIZE
+
#define pack_TPM_SECRET(p, t) pack_TPM_AUTHDATA(p, t)
#define pack_TPM_ENCAUTH(p, t) pack_TPM_AUTHDATA(p, t)
#define pack_TPM_PAYLOAD_TYPE(p, t) pack_BYTE(p, t)
#define unpack_TPM_TAG(p, t) unpack_UINT16(p, t)
#define unpack_TPM_STRUCTURE_TAG(p, t) unpack_UINT16(p, t)
+#define sizeof_TPM_SECRET(t) sizeof_TPM_AUTHDATA(t)
+#define sizeof_TPM_ENCAUTH(t) sizeof_TPM_AUTHDATA(t)
+#define sizeof_TPM_PAYLOAD_TYPE(t) sizeof_BYTE(t)
+#define sizeof_TPM_TAG(t) sizeof_UINT16(t)
+#define sizeof_TPM_STRUCTURE_TAG(t) sizeof_UINT16(t)
+
inline BYTE* pack_TPM_VERSION(BYTE* ptr, const TPM_VERSION* t) {
- ptr[0] = t->major;
- ptr[1] = t->minor;
- ptr[2] = t->revMajor;
- ptr[3] = t->revMinor;
- return ptr + 4;
+ ptr[0] = t->major;
+ ptr[1] = t->minor;
+ ptr[2] = t->revMajor;
+ ptr[3] = t->revMinor;
+ return ptr + 4;
}
inline BYTE* unpack_TPM_VERSION(BYTE* ptr, TPM_VERSION* t) {
- t->major = ptr[0];
- t->minor = ptr[1];
- t->revMajor = ptr[2];
- t->revMinor = ptr[3];
- return ptr + 4;
+ t->major = ptr[0];
+ t->minor = ptr[1];
+ t->revMajor = ptr[2];
+ t->revMinor = ptr[3];
+ return ptr + 4;
+}
+
+inline int unpack3_TPM_VERSION(BYTE* ptr, UINT32 *pos, UINT32 max, TPM_VERSION* t) {
+ if (*pos + 4 > max)
+ return TPM_SIZE;
+ ptr += *pos;
+ t->major = ptr[0];
+ t->minor = ptr[1];
+ t->revMajor = ptr[2];
+ t->revMinor = ptr[3];
+ *pos += 4;
+ return 0;
}
+#define sizeof_TPM_VERSION(x) 4
+
inline BYTE* pack_TPM_CAP_VERSION_INFO(BYTE* ptr, const TPM_CAP_VERSION_INFO* v) {
- ptr = pack_TPM_STRUCTURE_TAG(ptr, v->tag);
- ptr = pack_TPM_VERSION(ptr, &v->version);
- ptr = pack_UINT16(ptr, v->specLevel);
- ptr = pack_BYTE(ptr, v->errataRev);
- ptr = pack_BUFFER(ptr, v->tpmVendorID, sizeof(v->tpmVendorID));
- ptr = pack_UINT16(ptr, v->vendorSpecificSize);
- ptr = pack_BUFFER(ptr, v->vendorSpecific, v->vendorSpecificSize);
- return ptr;
+ ptr = pack_TPM_STRUCTURE_TAG(ptr, v->tag);
+ ptr = pack_TPM_VERSION(ptr, &v->version);
+ ptr = pack_UINT16(ptr, v->specLevel);
+ ptr = pack_BYTE(ptr, v->errataRev);
+ ptr = pack_BUFFER(ptr, v->tpmVendorID, sizeof(v->tpmVendorID));
+ ptr = pack_UINT16(ptr, v->vendorSpecificSize);
+ ptr = pack_BUFFER(ptr, v->vendorSpecific, v->vendorSpecificSize);
+ return ptr;
}
inline BYTE* unpack_TPM_CAP_VERSION_INFO(BYTE* ptr, TPM_CAP_VERSION_INFO* v, UnpackPtr alloc) {
- ptr = unpack_TPM_STRUCTURE_TAG(ptr, &v->tag);
- ptr = unpack_TPM_VERSION(ptr, &v->version);
- ptr = unpack_UINT16(ptr, &v->specLevel);
- ptr = unpack_BYTE(ptr, &v->errataRev);
- ptr = unpack_BUFFER(ptr, v->tpmVendorID, sizeof(v->tpmVendorID));
- ptr = unpack_UINT16(ptr, &v->vendorSpecificSize);
- ptr = unpack_PTR(ptr, &v->vendorSpecific, v->vendorSpecificSize, alloc);
- return ptr;
+ ptr = unpack_TPM_STRUCTURE_TAG(ptr, &v->tag);
+ ptr = unpack_TPM_VERSION(ptr, &v->version);
+ ptr = unpack_UINT16(ptr, &v->specLevel);
+ ptr = unpack_BYTE(ptr, &v->errataRev);
+ ptr = unpack_BUFFER(ptr, v->tpmVendorID, sizeof(v->tpmVendorID));
+ ptr = unpack_UINT16(ptr, &v->vendorSpecificSize);
+ ptr = unpack_PTR(ptr, &v->vendorSpecific, v->vendorSpecificSize, alloc);
+ return ptr;
}
inline BYTE* pack_TPM_DIGEST(BYTE* ptr, const TPM_DIGEST* d) {
- return pack_BUFFER(ptr, d->digest, TPM_DIGEST_SIZE);
+ return pack_BUFFER(ptr, d->digest, TPM_DIGEST_SIZE);
}
inline BYTE* unpack_TPM_DIGEST(BYTE* ptr, TPM_DIGEST* d) {
- return unpack_BUFFER(ptr, d->digest, TPM_DIGEST_SIZE);
+ return unpack_BUFFER(ptr, d->digest, TPM_DIGEST_SIZE);
+}
+
+inline int unpack3_TPM_DIGEST(BYTE* ptr, UINT32* pos, UINT32 max, TPM_DIGEST* d) {
+ return unpack3_BUFFER(ptr, pos, max, d->digest, TPM_DIGEST_SIZE);
}
-#define pack_TPM_PCRVALUE(ptr, d) pack_TPM_DIGEST(ptr, d);
-#define unpack_TPM_PCRVALUE(ptr, d) unpack_TPM_DIGEST(ptr, d);
+#define sizeof_TPM_DIGEST(d) TPM_DIGEST_SIZE
-#define pack_TPM_COMPOSITE_HASH(ptr, d) pack_TPM_DIGEST(ptr, d);
-#define unpack_TPM_COMPOSITE_HASH(ptr, d) unpack_TPM_DIGEST(ptr, d);
+#define pack_TPM_PCRVALUE(ptr, d) pack_TPM_DIGEST(ptr, d)
+#define unpack_TPM_PCRVALUE(ptr, d) unpack_TPM_DIGEST(ptr, d)
+#define unpack3_TPM_PCRVALUE(p...) unpack3_TPM_DIGEST(p)
-#define pack_TPM_DIRVALUE(ptr, d) pack_TPM_DIGEST(ptr, d);
-#define unpack_TPM_DIRVALUE(ptr, d) unpack_TPM_DIGEST(ptr, d);
+#define pack_TPM_COMPOSITE_HASH(ptr, d) pack_TPM_DIGEST(ptr, d)
+#define unpack_TPM_COMPOSITE_HASH(ptr, d) unpack_TPM_DIGEST(ptr, d)
+#define unpack3_TPM_COMPOSITE_HASH(ptr, p, m, d) unpack3_TPM_DIGEST(ptr, p, m, d)
+#define sizeof_TPM_COMPOSITE_HASH(d) TPM_DIGEST_SIZE
-#define pack_TPM_HMAC(ptr, d) pack_TPM_DIGEST(ptr, d);
-#define unpack_TPM_HMAC(ptr, d) unpack_TPM_DIGEST(ptr, d);
+#define pack_TPM_DIRVALUE(ptr, d) pack_TPM_DIGEST(ptr, d)
+#define unpack_TPM_DIRVALUE(ptr, d) unpack_TPM_DIGEST(ptr, d)
-#define pack_TPM_CHOSENID_HASH(ptr, d) pack_TPM_DIGEST(ptr, d);
-#define unpack_TPM_CHOSENID_HASH(ptr, d) unpack_TPM_DIGEST(ptr, d);
+#define pack_TPM_HMAC(ptr, d) pack_TPM_DIGEST(ptr, d)
+#define unpack_TPM_HMAC(ptr, d) unpack_TPM_DIGEST(ptr, d)
+
+#define pack_TPM_CHOSENID_HASH(ptr, d) pack_TPM_DIGEST(ptr, d)
+#define unpack_TPM_CHOSENID_HASH(ptr, d) unpack_TPM_DIGEST(ptr, d)
inline BYTE* pack_TPM_NONCE(BYTE* ptr, const TPM_NONCE* n) {
- return pack_BUFFER(ptr, n->nonce, TPM_DIGEST_SIZE);
+ return pack_BUFFER(ptr, n->nonce, TPM_DIGEST_SIZE);
}
inline BYTE* unpack_TPM_NONCE(BYTE* ptr, TPM_NONCE* n) {
- return unpack_BUFFER(ptr, n->nonce, TPM_DIGEST_SIZE);
+ return unpack_BUFFER(ptr, n->nonce, TPM_DIGEST_SIZE);
+}
+
+#define sizeof_TPM_NONCE(x) TPM_DIGEST_SIZE
+
+inline int unpack3_TPM_NONCE(BYTE* ptr, UINT32* pos, UINT32 max, TPM_NONCE* n) {
+ return unpack3_BUFFER(ptr, pos, max, n->nonce, TPM_DIGEST_SIZE);
}
inline BYTE* pack_TPM_SYMMETRIC_KEY_PARMS(BYTE* ptr, const TPM_SYMMETRIC_KEY_PARMS* k) {
- ptr = pack_UINT32(ptr, k->keyLength);
- ptr = pack_UINT32(ptr, k->blockSize);
- ptr = pack_UINT32(ptr, k->ivSize);
- return pack_BUFFER(ptr, k->IV, k->ivSize);
+ ptr = pack_UINT32(ptr, k->keyLength);
+ ptr = pack_UINT32(ptr, k->blockSize);
+ ptr = pack_UINT32(ptr, k->ivSize);
+ return pack_BUFFER(ptr, k->IV, k->ivSize);
+}
+
+inline BYTE* pack_TPM_SYMMETRIC_KEY(BYTE* ptr, const TPM_SYMMETRIC_KEY* k) {
+ ptr = pack_UINT32(ptr, k->algId);
+ ptr = pack_UINT16(ptr, k->encScheme);
+ ptr = pack_UINT16(ptr, k->size);
+ return pack_BUFFER(ptr, k->data, k->size);
+}
+
+inline int unpack3_TPM_SYMMETRIC_KEY_PARMS(BYTE* ptr, UINT32* pos, UINT32 max, TPM_SYMMETRIC_KEY_PARMS* k, UnpackPtr alloc) {
+ return unpack3_UINT32(ptr, pos, max, &k->keyLength) ||
+ unpack3_UINT32(ptr, pos, max, &k->blockSize) ||
+ unpack3_UINT32(ptr, pos, max, &k->ivSize) ||
+ unpack3_PTR(ptr, pos, max, &k->IV, k->ivSize, alloc);
}
-inline BYTE* unpack_TPM_SYMMETRIC_KEY_PARMS(BYTE* ptr, TPM_SYMMETRIC_KEY_PARMS* k, UnpackPtr alloc) {
- ptr = unpack_UINT32(ptr, &k->keyLength);
- ptr = unpack_UINT32(ptr, &k->blockSize);
- ptr = unpack_UINT32(ptr, &k->ivSize);
- return unpack_PTR(ptr, &k->IV, k->ivSize, alloc);
+inline int sizeof_TPM_SYMMETRIC_KEY_PARMS(const TPM_SYMMETRIC_KEY_PARMS* k) {
+ return 12 + k->ivSize;
+}
+
+inline int unpack3_TPM_SYMMETRIC_KEY(BYTE* ptr, UINT32* pos, UINT32 max, TPM_SYMMETRIC_KEY* k, UnpackPtr alloc) {
+ return unpack3_UINT32(ptr, pos, max, &k->algId) ||
+ unpack3_UINT16(ptr, pos, max, &k->encScheme) ||
+ unpack3_UINT16(ptr, pos, max, &k->size) ||
+ unpack3_PTR(ptr, pos, max, &k->data, k->size, alloc);
}
inline BYTE* pack_TPM_RSA_KEY_PARMS(BYTE* ptr, const TPM_RSA_KEY_PARMS* k) {
- ptr = pack_UINT32(ptr, k->keyLength);
- ptr = pack_UINT32(ptr, k->numPrimes);
- ptr = pack_UINT32(ptr, k->exponentSize);
- return pack_BUFFER(ptr, k->exponent, k->exponentSize);
+ ptr = pack_UINT32(ptr, k->keyLength);
+ ptr = pack_UINT32(ptr, k->numPrimes);
+ ptr = pack_UINT32(ptr, k->exponentSize);
+ return pack_BUFFER(ptr, k->exponent, k->exponentSize);
+}
+
+inline int unpack3_TPM_RSA_KEY_PARMS(BYTE* ptr, UINT32* pos, UINT32 max, TPM_RSA_KEY_PARMS* k, UnpackPtr alloc) {
+ return unpack3_UINT32(ptr, pos, max, &k->keyLength) ||
+ unpack3_UINT32(ptr, pos, max, &k->numPrimes) ||
+ unpack3_UINT32(ptr, pos, max, &k->exponentSize) ||
+ unpack3_PTR(ptr, pos, max, &k->exponent, k->exponentSize, alloc);
}
-inline BYTE* unpack_TPM_RSA_KEY_PARMS(BYTE* ptr, TPM_RSA_KEY_PARMS* k, UnpackPtr alloc) {
- ptr = unpack_UINT32(ptr, &k->keyLength);
- ptr = unpack_UINT32(ptr, &k->numPrimes);
- ptr = unpack_UINT32(ptr, &k->exponentSize);
- return unpack_PTR(ptr, &k->exponent, k->exponentSize, alloc);
+inline int sizeof_TPM_RSA_KEY_PARMS(const TPM_RSA_KEY_PARMS* k) {
+ return 12 + k->exponentSize;
}
+
inline BYTE* pack_TPM_KEY_PARMS(BYTE* ptr, const TPM_KEY_PARMS* k) {
- ptr = pack_TPM_ALGORITHM_ID(ptr, k->algorithmID);
- ptr = pack_TPM_ENC_SCHEME(ptr, k->encScheme);
- ptr = pack_TPM_SIG_SCHEME(ptr, k->sigScheme);
- ptr = pack_UINT32(ptr, k->parmSize);
-
- if(k->parmSize) {
- switch(k->algorithmID) {
- case TPM_ALG_RSA:
- return pack_TPM_RSA_KEY_PARMS(ptr, &k->parms.rsa);
- case TPM_ALG_AES128:
- case TPM_ALG_AES192:
- case TPM_ALG_AES256:
- return pack_TPM_SYMMETRIC_KEY_PARMS(ptr, &k->parms.sym);
- }
- }
- return ptr;
-}
-
-inline BYTE* unpack_TPM_KEY_PARMS(BYTE* ptr, TPM_KEY_PARMS* k, UnpackPtr alloc) {
- ptr = unpack_TPM_ALGORITHM_ID(ptr, &k->algorithmID);
- ptr = unpack_TPM_ENC_SCHEME(ptr, &k->encScheme);
- ptr = unpack_TPM_SIG_SCHEME(ptr, &k->sigScheme);
- ptr = unpack_UINT32(ptr, &k->parmSize);
-
- if(k->parmSize) {
- switch(k->algorithmID) {
- case TPM_ALG_RSA:
- return unpack_TPM_RSA_KEY_PARMS(ptr, &k->parms.rsa, alloc);
- case TPM_ALG_AES128:
- case TPM_ALG_AES192:
- case TPM_ALG_AES256:
- return unpack_TPM_SYMMETRIC_KEY_PARMS(ptr, &k->parms.sym, alloc);
- }
- }
- return ptr;
+ ptr = pack_TPM_ALGORITHM_ID(ptr, k->algorithmID);
+ ptr = pack_TPM_ENC_SCHEME(ptr, k->encScheme);
+ ptr = pack_TPM_SIG_SCHEME(ptr, k->sigScheme);
+ ptr = pack_UINT32(ptr, k->parmSize);
+
+ if(k->parmSize) {
+ switch(k->algorithmID) {
+ case TPM_ALG_RSA:
+ return pack_TPM_RSA_KEY_PARMS(ptr, &k->parms.rsa);
+ case TPM_ALG_AES128:
+ case TPM_ALG_AES192:
+ case TPM_ALG_AES256:
+ return pack_TPM_SYMMETRIC_KEY_PARMS(ptr, &k->parms.sym);
+ }
+ }
+ return ptr;
+}
+
+inline int unpack3_TPM_KEY_PARMS(BYTE* ptr, UINT32* pos, UINT32 len, TPM_KEY_PARMS* k, UnpackPtr alloc) {
+ int rc = unpack3_TPM_ALGORITHM_ID(ptr, pos, len, &k->algorithmID) ||
+ unpack3_TPM_ENC_SCHEME(ptr, pos, len, &k->encScheme) ||
+ unpack3_TPM_SIG_SCHEME(ptr, pos, len, &k->sigScheme) ||
+ unpack3_UINT32(ptr, pos, len, &k->parmSize);
+ if (rc || k->parmSize == 0)
+ return rc;
+ switch(k->algorithmID) {
+ case TPM_ALG_RSA:
+ return unpack3_TPM_RSA_KEY_PARMS(ptr, pos, len, &k->parms.rsa, alloc);
+ case TPM_ALG_AES128:
+ case TPM_ALG_AES192:
+ case TPM_ALG_AES256:
+ return unpack3_TPM_SYMMETRIC_KEY_PARMS(ptr, pos, len, &k->parms.sym, alloc);
+ }
+ return TPM_FAIL;
+}
+
+inline int sizeof_TPM_KEY_PARMS(const TPM_KEY_PARMS* k) {
+ int rc = 0;
+ rc += sizeof_TPM_ALGORITHM_ID(&k->algorithmID);
+ rc += sizeof_TPM_ENC_SCHEME(&k->encScheme);
+ rc += sizeof_TPM_SIG_SCHEME(&k->sigScheme);
+ rc += sizeof_UINT32(&k->parmSize);
+ if (!k->parmSize)
+ return rc;
+ switch(k->algorithmID) {
+ case TPM_ALG_RSA:
+ rc += sizeof_TPM_RSA_KEY_PARMS(&k->parms.rsa);
+ break;
+ case TPM_ALG_AES128:
+ case TPM_ALG_AES192:
+ case TPM_ALG_AES256:
+ rc += sizeof_TPM_SYMMETRIC_KEY_PARMS(&k->parms.sym);
+ break;
+ }
+ return rc;
}
inline BYTE* pack_TPM_STORE_PUBKEY(BYTE* ptr, const TPM_STORE_PUBKEY* k) {
- ptr = pack_UINT32(ptr, k->keyLength);
- ptr = pack_BUFFER(ptr, k->key, k->keyLength);
- return ptr;
+ ptr = pack_UINT32(ptr, k->keyLength);
+ ptr = pack_BUFFER(ptr, k->key, k->keyLength);
+ return ptr;
+}
+
+inline int unpack3_TPM_STORE_PUBKEY(BYTE* ptr, UINT32* pos, UINT32 max, TPM_STORE_PUBKEY* k, UnpackPtr alloc) {
+ return unpack3_UINT32(ptr, pos, max, &k->keyLength) ||
+ unpack3_PTR(ptr, pos, max, &k->key, k->keyLength, alloc);
}
-inline BYTE* unpack_TPM_STORE_PUBKEY(BYTE* ptr, TPM_STORE_PUBKEY* k, UnpackPtr alloc) {
- ptr = unpack_UINT32(ptr, &k->keyLength);
- ptr = unpack_PTR(ptr, &k->key, k->keyLength, alloc);
- return ptr;
+inline int sizeof_TPM_STORE_PUBKEY(const TPM_STORE_PUBKEY* k) {
+ return 4 + k->keyLength;
}
inline BYTE* pack_TPM_PUBKEY(BYTE* ptr, const TPM_PUBKEY* k) {
- ptr = pack_TPM_KEY_PARMS(ptr, &k->algorithmParms);
- return pack_TPM_STORE_PUBKEY(ptr, &k->pubKey);
+ ptr = pack_TPM_KEY_PARMS(ptr, &k->algorithmParms);
+ return pack_TPM_STORE_PUBKEY(ptr, &k->pubKey);
}
-inline BYTE* unpack_TPM_PUBKEY(BYTE* ptr, TPM_PUBKEY* k, UnpackPtr alloc) {
- ptr = unpack_TPM_KEY_PARMS(ptr, &k->algorithmParms, alloc);
- return unpack_TPM_STORE_PUBKEY(ptr, &k->pubKey, alloc);
+inline int unpack3_TPM_PUBKEY(BYTE* ptr, UINT32* pos, UINT32 len, TPM_PUBKEY* k, UnpackPtr alloc) {
+ return unpack3_TPM_KEY_PARMS(ptr, pos, len, &k->algorithmParms, alloc) ||
+ unpack3_TPM_STORE_PUBKEY(ptr, pos, len, &k->pubKey, alloc);
}
inline BYTE* pack_TPM_PCR_SELECTION(BYTE* ptr, const TPM_PCR_SELECTION* p) {
- ptr = pack_UINT16(ptr, p->sizeOfSelect);
- ptr = pack_BUFFER(ptr, p->pcrSelect, p->sizeOfSelect);
- return ptr;
+ ptr = pack_UINT16(ptr, p->sizeOfSelect);
+ ptr = pack_BUFFER(ptr, p->pcrSelect, p->sizeOfSelect);
+ return ptr;
}
inline BYTE* unpack_TPM_PCR_SELECTION(BYTE* ptr, TPM_PCR_SELECTION* p, UnpackPtr alloc) {
- ptr = unpack_UINT16(ptr, &p->sizeOfSelect);
- ptr = unpack_PTR(ptr, &p->pcrSelect, p->sizeOfSelect, alloc);
- return ptr;
+ ptr = unpack_UINT16(ptr, &p->sizeOfSelect);
+ ptr = unpack_PTR(ptr, &p->pcrSelect, p->sizeOfSelect, alloc);
+ return ptr;
+}
+
+inline int unpack3_TPM_PCR_SELECTION(BYTE* ptr, UINT32* pos, UINT32 max, TPM_PCR_SELECTION* p, UnpackPtr alloc) {
+ return unpack3_UINT16(ptr, pos, max, &p->sizeOfSelect) ||
+ unpack3_PTR(ptr, pos, max, &p->pcrSelect, p->sizeOfSelect, alloc);
+}
+
+inline int sizeof_TPM_PCR_SELECTION(const TPM_PCR_SELECTION* p) {
+ return 2 + p->sizeOfSelect;
}
inline BYTE* pack_TPM_PCR_INFO(BYTE* ptr, const TPM_PCR_INFO* p) {
- ptr = pack_TPM_PCR_SELECTION(ptr, &p->pcrSelection);
- ptr = pack_TPM_COMPOSITE_HASH(ptr, &p->digestAtRelease);
- ptr = pack_TPM_COMPOSITE_HASH(ptr, &p->digestAtCreation);
- return ptr;
+ ptr = pack_TPM_PCR_SELECTION(ptr, &p->pcrSelection);
+ ptr = pack_TPM_COMPOSITE_HASH(ptr, &p->digestAtRelease);
+ ptr = pack_TPM_COMPOSITE_HASH(ptr, &p->digestAtCreation);
+ return ptr;
}
-inline BYTE* unpack_TPM_PCR_INFO(BYTE* ptr, TPM_PCR_INFO* p, UnpackPtr alloc) {
- ptr = unpack_TPM_PCR_SELECTION(ptr, &p->pcrSelection, alloc);
- ptr = unpack_TPM_COMPOSITE_HASH(ptr, &p->digestAtRelease);
- ptr = unpack_TPM_COMPOSITE_HASH(ptr, &p->digestAtCreation);
- return ptr;
+inline int unpack3_TPM_PCR_INFO(BYTE* ptr, UINT32* pos, UINT32 max, TPM_PCR_INFO* p, UnpackPtr alloc) {
+ return unpack3_TPM_PCR_SELECTION(ptr, pos, max, &p->pcrSelection, alloc) ||
+ unpack3_TPM_COMPOSITE_HASH(ptr, pos, max, &p->digestAtRelease) ||
+ unpack3_TPM_COMPOSITE_HASH(ptr, pos, max, &p->digestAtCreation);
}
+inline int sizeof_TPM_PCR_INFO(const TPM_PCR_INFO* p) {
+ int rc = 0;
+ rc += sizeof_TPM_PCR_SELECTION(&p->pcrSelection);
+ rc += sizeof_TPM_COMPOSITE_HASH(&p->digestAtRelease);
+ rc += sizeof_TPM_COMPOSITE_HASH(&p->digestAtCreation);
+ return rc;
+}
+
+
inline BYTE* pack_TPM_PCR_COMPOSITE(BYTE* ptr, const TPM_PCR_COMPOSITE* p) {
- ptr = pack_TPM_PCR_SELECTION(ptr, &p->select);
- ptr = pack_UINT32(ptr, p->valueSize);
- ptr = pack_BUFFER(ptr, (const BYTE*)p->pcrValue, p->valueSize);
- return ptr;
+ ptr = pack_TPM_PCR_SELECTION(ptr, &p->select);
+ ptr = pack_UINT32(ptr, p->valueSize);
+ ptr = pack_BUFFER(ptr, (const BYTE*)p->pcrValue, p->valueSize);
+ return ptr;
}
-inline BYTE* unpack_TPM_PCR_COMPOSITE(BYTE* ptr, TPM_PCR_COMPOSITE* p, UnpackPtr alloc) {
- ptr = unpack_TPM_PCR_SELECTION(ptr, &p->select, alloc);
- ptr = unpack_UINT32(ptr, &p->valueSize);
- ptr = unpack_PTR(ptr, (BYTE**)&p->pcrValue, p->valueSize, alloc);
- return ptr;
+inline int unpack3_TPM_PCR_COMPOSITE(BYTE* ptr, UINT32* pos, UINT32 max, TPM_PCR_COMPOSITE* p, UnpackPtr alloc) {
+ return unpack3_TPM_PCR_SELECTION(ptr, pos, max, &p->select, alloc) ||
+ unpack3_UINT32(ptr, pos, max, &p->valueSize) ||
+ unpack3_PTR(ptr, pos, max, (BYTE**)&p->pcrValue, p->valueSize, alloc);
}
inline BYTE* pack_TPM_KEY(BYTE* ptr, const TPM_KEY* k) {
- ptr = pack_TPM_VERSION(ptr, &k->ver);
- ptr = pack_TPM_KEY_USAGE(ptr, k->keyUsage);
- ptr = pack_TPM_KEY_FLAGS(ptr, k->keyFlags);
- ptr = pack_TPM_AUTH_DATA_USAGE(ptr, k->authDataUsage);
- ptr = pack_TPM_KEY_PARMS(ptr, &k->algorithmParms);
- ptr = pack_UINT32(ptr, k->PCRInfoSize);
- if(k->PCRInfoSize) {
- ptr = pack_TPM_PCR_INFO(ptr, &k->PCRInfo);
- }
- ptr = pack_TPM_STORE_PUBKEY(ptr, &k->pubKey);
- ptr = pack_UINT32(ptr, k->encDataSize);
- return pack_BUFFER(ptr, k->encData, k->encDataSize);
-}
-
-inline BYTE* unpack_TPM_KEY(BYTE* ptr, TPM_KEY* k, UnpackPtr alloc) {
- ptr = unpack_TPM_VERSION(ptr, &k->ver);
- ptr = unpack_TPM_KEY_USAGE(ptr, &k->keyUsage);
- ptr = unpack_TPM_KEY_FLAGS(ptr, &k->keyFlags);
- ptr = unpack_TPM_AUTH_DATA_USAGE(ptr, &k->authDataUsage);
- ptr = unpack_TPM_KEY_PARMS(ptr, &k->algorithmParms, alloc);
- ptr = unpack_UINT32(ptr, &k->PCRInfoSize);
- if(k->PCRInfoSize) {
- ptr = unpack_TPM_PCR_INFO(ptr, &k->PCRInfo, alloc);
- }
- ptr = unpack_TPM_STORE_PUBKEY(ptr, &k->pubKey, alloc);
- ptr = unpack_UINT32(ptr, &k->encDataSize);
- return unpack_PTR(ptr, &k->encData, k->encDataSize, alloc);
+ ptr = pack_TPM_VERSION(ptr, &k->ver);
+ ptr = pack_TPM_KEY_USAGE(ptr, k->keyUsage);
+ ptr = pack_TPM_KEY_FLAGS(ptr, k->keyFlags);
+ ptr = pack_TPM_AUTH_DATA_USAGE(ptr, k->authDataUsage);
+ ptr = pack_TPM_KEY_PARMS(ptr, &k->algorithmParms);
+ ptr = pack_UINT32(ptr, k->PCRInfoSize);
+ if(k->PCRInfoSize) {
+ ptr = pack_TPM_PCR_INFO(ptr, &k->PCRInfo);
+ }
+ ptr = pack_TPM_STORE_PUBKEY(ptr, &k->pubKey);
+ ptr = pack_UINT32(ptr, k->encDataSize);
+ return pack_BUFFER(ptr, k->encData, k->encDataSize);
+}
+
+inline int unpack3_TPM_KEY(BYTE* ptr, UINT32* pos, UINT32 max, TPM_KEY* k, UnpackPtr alloc) {
+ int rc = unpack3_TPM_VERSION(ptr, pos, max, &k->ver) ||
+ unpack3_TPM_KEY_USAGE(ptr, pos, max, &k->keyUsage) ||
+ unpack3_TPM_KEY_FLAGS(ptr, pos, max, &k->keyFlags) ||
+ unpack3_TPM_AUTH_DATA_USAGE(ptr, pos, max, &k->authDataUsage) ||
+ unpack3_TPM_KEY_PARMS(ptr, pos, max, &k->algorithmParms, alloc) ||
+ unpack3_UINT32(ptr, pos, max, &k->PCRInfoSize);
+ if (rc) return rc;
+ if(k->PCRInfoSize) {
+ rc = unpack3_TPM_PCR_INFO(ptr, pos, max, &k->PCRInfo, alloc);
+ }
+ if (rc) return rc;
+ return unpack3_TPM_STORE_PUBKEY(ptr, pos, max, &k->pubKey, alloc) ||
+ unpack3_UINT32(ptr, pos, max, &k->encDataSize) ||
+ unpack3_PTR(ptr, pos, max, &k->encData, k->encDataSize, alloc);
+}
+
+inline int sizeof_TPM_KEY(const TPM_KEY* k) {
+ int rc = 0;
+ rc += sizeof_TPM_VERSION(&k->ver);
+ rc += sizeof_TPM_KEY_USAGE(k->keyUsage);
+ rc += sizeof_TPM_KEY_FLAGS(k->keyFlags);
+ rc += sizeof_TPM_AUTH_DATA_USAGE(k->authDataUsage);
+ rc += sizeof_TPM_KEY_PARMS(&k->algorithmParms);
+ rc += sizeof_UINT32(k->PCRInfoSize);
+ if(k->PCRInfoSize) {
+ rc += sizeof_TPM_PCR_INFO(&k->PCRInfo);
+ }
+ rc += sizeof_TPM_STORE_PUBKEY(&k->pubKey);
+ rc += sizeof_UINT32(k->encDataSize);
+ rc += k->encDataSize;
+ return rc;
}
inline BYTE* pack_TPM_BOUND_DATA(BYTE* ptr, const TPM_BOUND_DATA* b, UINT32 payloadSize) {
- ptr = pack_TPM_VERSION(ptr, &b->ver);
- ptr = pack_TPM_PAYLOAD_TYPE(ptr, b->payload);
- return pack_BUFFER(ptr, b->payloadData, payloadSize);
+ ptr = pack_TPM_VERSION(ptr, &b->ver);
+ ptr = pack_TPM_PAYLOAD_TYPE(ptr, b->payload);
+ return pack_BUFFER(ptr, b->payloadData, payloadSize);
}
inline BYTE* unpack_TPM_BOUND_DATA(BYTE* ptr, TPM_BOUND_DATA* b, UINT32 payloadSize, UnpackPtr alloc) {
- ptr = unpack_TPM_VERSION(ptr, &b->ver);
- ptr = unpack_TPM_PAYLOAD_TYPE(ptr, &b->payload);
- return unpack_PTR(ptr, &b->payloadData, payloadSize, alloc);
+ ptr = unpack_TPM_VERSION(ptr, &b->ver);
+ ptr = unpack_TPM_PAYLOAD_TYPE(ptr, &b->payload);
+ return unpack_PTR(ptr, &b->payloadData, payloadSize, alloc);
}
inline BYTE* pack_TPM_STORED_DATA(BYTE* ptr, const TPM_STORED_DATA* d) {
- ptr = pack_TPM_VERSION(ptr, &d->ver);
- ptr = pack_UINT32(ptr, d->sealInfoSize);
- if(d->sealInfoSize) {
- ptr = pack_TPM_PCR_INFO(ptr, &d->sealInfo);
- }
- ptr = pack_UINT32(ptr, d->encDataSize);
- ptr = pack_BUFFER(ptr, d->encData, d->encDataSize);
- return ptr;
-}
-
-inline BYTE* unpack_TPM_STORED_DATA(BYTE* ptr, TPM_STORED_DATA* d, UnpackPtr alloc) {
- ptr = unpack_TPM_VERSION(ptr, &d->ver);
- ptr = unpack_UINT32(ptr, &d->sealInfoSize);
- if(d->sealInfoSize) {
- ptr = unpack_TPM_PCR_INFO(ptr, &d->sealInfo, alloc);
- }
- ptr = unpack_UINT32(ptr, &d->encDataSize);
- ptr = unpack_PTR(ptr, &d->encData, d->encDataSize, alloc);
- return ptr;
+ ptr = pack_TPM_VERSION(ptr, &d->ver);
+ ptr = pack_UINT32(ptr, d->sealInfoSize);
+ if(d->sealInfoSize) {
+ ptr = pack_TPM_PCR_INFO(ptr, &d->sealInfo);
+ }
+ ptr = pack_UINT32(ptr, d->encDataSize);
+ ptr = pack_BUFFER(ptr, d->encData, d->encDataSize);
+ return ptr;
+}
+
+inline int sizeof_TPM_STORED_DATA(const TPM_STORED_DATA* d) {
+ int rv = sizeof_TPM_VERSION(&d->ver) + sizeof_UINT32(d->sealInfoSize);
+ if (d->sealInfoSize) {
+ rv += sizeof_TPM_PCR_INFO(&d->sealInfo);
+ }
+ rv += sizeof_UINT32(d->encDataSize);
+ rv += sizeof_BUFFER(d->encData, d->encDataSize);
+ return rv;
+}
+
+inline int unpack3_TPM_STORED_DATA(BYTE* ptr, UINT32* pos, UINT32 len, TPM_STORED_DATA* d, UnpackPtr alloc) {
+ int rc = unpack3_TPM_VERSION(ptr, pos, len, &d->ver) ||
+ unpack3_UINT32(ptr, pos, len, &d->sealInfoSize);
+ if (rc)
+ return rc;
+ if (d->sealInfoSize)
+ rc = unpack3_TPM_PCR_INFO(ptr, pos, len, &d->sealInfo, alloc);
+ if (rc)
+ return rc;
+ rc = unpack3_UINT32(ptr, pos, len, &d->encDataSize) ||
+ unpack3_PTR(ptr, pos, len, &d->encData, d->encDataSize, alloc);
+ return rc;
}
inline BYTE* pack_TPM_AUTH_SESSION(BYTE* ptr, const TPM_AUTH_SESSION* auth) {
- ptr = pack_TPM_AUTH_HANDLE(ptr, auth->AuthHandle);
- ptr = pack_TPM_NONCE(ptr, &auth->NonceOdd);
- ptr = pack_BOOL(ptr, auth->fContinueAuthSession);
- ptr = pack_TPM_AUTHDATA(ptr, &auth->HMAC);
- return ptr;
+ ptr = pack_TPM_AUTH_HANDLE(ptr, auth->AuthHandle);
+ ptr = pack_TPM_NONCE(ptr, &auth->NonceOdd);
+ ptr = pack_BOOL(ptr, auth->fContinueAuthSession);
+ ptr = pack_TPM_AUTHDATA(ptr, &auth->HMAC);
+ return ptr;
}
inline BYTE* unpack_TPM_AUTH_SESSION(BYTE* ptr, TPM_AUTH_SESSION* auth) {
- ptr = unpack_TPM_NONCE(ptr, &auth->NonceEven);
- ptr = unpack_BOOL(ptr, &auth->fContinueAuthSession);
- ptr = unpack_TPM_AUTHDATA(ptr, &auth->HMAC);
- return ptr;
+ ptr = unpack_TPM_NONCE(ptr, &auth->NonceEven);
+ ptr = unpack_BOOL(ptr, &auth->fContinueAuthSession);
+ ptr = unpack_TPM_AUTHDATA(ptr, &auth->HMAC);
+ return ptr;
}
-inline BYTE* pack_TPM_RQU_HEADER(BYTE* ptr,
- TPM_TAG tag,
- UINT32 size,
- TPM_COMMAND_CODE ord) {
- ptr = pack_UINT16(ptr, tag);
- ptr = pack_UINT32(ptr, size);
- return pack_UINT32(ptr, ord);
+inline int unpack3_TPM_AUTH_SESSION(BYTE* ptr, UINT32* pos, UINT32 len, TPM_AUTH_SESSION* auth) {
+ return unpack3_TPM_NONCE(ptr, pos, len, &auth->NonceEven) ||
+ unpack3_BOOL(ptr, pos, len, &auth->fContinueAuthSession) ||
+ unpack3_TPM_AUTHDATA(ptr, pos, len, &auth->HMAC);
}
-inline BYTE* unpack_TPM_RQU_HEADER(BYTE* ptr,
- TPM_TAG* tag,
- UINT32* size,
- TPM_COMMAND_CODE* ord) {
- ptr = unpack_UINT16(ptr, tag);
- ptr = unpack_UINT32(ptr, size);
- ptr = unpack_UINT32(ptr, ord);
- return ptr;
+
+inline int sizeof_TPM_AUTH_SESSION(const TPM_AUTH_SESSION* auth) {
+ int rv = 0;
+ rv += sizeof_TPM_AUTH_HANDLE(auth->AuthHandle);
+ rv += sizeof_TPM_NONCE(&auth->NonceOdd);
+ rv += sizeof_BOOL(auth->fContinueAuthSession);
+ rv += sizeof_TPM_AUTHDATA(&auth->HMAC);
+ return rv;
}
-#define pack_TPM_RSP_HEADER(p, t, s, r) pack_TPM_RQU_HEADER(p, t, s, r);
-#define unpack_TPM_RSP_HEADER(p, t, s, r) unpack_TPM_RQU_HEADER(p, t, s, r);
+inline BYTE* pack_TPM_RQU_HEADER(BYTE* ptr,
+ TPM_TAG tag,
+ UINT32 size,
+ TPM_COMMAND_CODE ord) {
+ ptr = pack_UINT16(ptr, tag);
+ ptr = pack_UINT32(ptr, size);
+ return pack_UINT32(ptr, ord);
+}
+
+inline BYTE* unpack_TPM_RQU_HEADER(BYTE* ptr,
+ TPM_TAG* tag,
+ UINT32* size,
+ TPM_COMMAND_CODE* ord) {
+ ptr = unpack_UINT16(ptr, tag);
+ ptr = unpack_UINT32(ptr, size);
+ ptr = unpack_UINT32(ptr, ord);
+ return ptr;
+}
+
+inline int unpack3_TPM_RQU_HEADER(BYTE* ptr, UINT32* pos, UINT32 max,
+ TPM_TAG* tag, UINT32* size, TPM_COMMAND_CODE* ord) {
+ return
+ unpack3_UINT16(ptr, pos, max, tag) ||
+ unpack3_UINT32(ptr, pos, max, size) ||
+ unpack3_UINT32(ptr, pos, max, ord);
+}
+
+#define pack_TPM_RSP_HEADER(p, t, s, r) pack_TPM_RQU_HEADER(p, t, s, r)
+#define unpack_TPM_RSP_HEADER(p, t, s, r) unpack_TPM_RQU_HEADER(p, t, s, r)
+#define unpack3_TPM_RSP_HEADER(p, l, m, t, s, r) unpack3_TPM_RQU_HEADER(p, l, m, t, s, r)
#endif
--- /dev/null
+#include <console.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <mini-os/byteorder.h>
+#include <polarssl/sha1.h>
+
+#include "vtpm_manager.h"
+#include "log.h"
+#include "uuid.h"
+
+#include "tpm.h"
+#include "tcg.h"
+#include "marshal.h"
+#include "vtpmmgr.h"
+#include "vtpm_disk.h"
+#include "disk_tpm.h"
+#include "disk_io.h"
+#include "disk_crypto.h"
+#include "disk_format.h"
+#include "mgmt_authority.h"
+
+static int do_provision_aik(struct mem_group *group,
+ const struct tpm_authdata *privCADigest)
+{
+ TPM_KEY kinfo = {
+ .ver = TPM_STRUCT_VER_1_1,
+ .keyUsage = TPM_KEY_IDENTITY,
+ .keyFlags = 0,
+ .authDataUsage = TPM_AUTH_ALWAYS,
+ .algorithmParms = {
+ .algorithmID = TPM_ALG_RSA,
+ .encScheme = TPM_ES_NONE,
+ .sigScheme = TPM_SS_RSASSAPKCS1v15_SHA1,
+ .parmSize = 12,
+ .parms.rsa = {
+ .keyLength = RSA_KEY_SIZE,
+ .numPrimes = 2,
+ .exponentSize = 0,
+ .exponent = NULL,
+ },
+ },
+ .PCRInfoSize = 0,
+ .pubKey.keyLength = 0,
+ .encDataSize = 0,
+ };
+
+ TPM_AUTH_SESSION srkAuth = TPM_AUTH_SESSION_INIT;
+ TPM_AUTH_SESSION ownAuth = TPM_AUTH_SESSION_INIT;
+ TPM_SECRET osapMask;
+
+ TPM_KEY key = TPM_KEY_INIT;
+ UINT32 identityBindingSize;
+ BYTE* identityBinding = NULL;
+
+ TPM_RESULT rc;
+
+ rc = TPM_OSAP(TPM_ET_OWNER, 0, (void*)&vtpm_globals.owner_auth, &osapMask, &ownAuth);
+ if (rc)
+ return rc;
+
+ rc = TPM_OIAP(&srkAuth);
+ if (rc)
+ return rc;
+
+ rc = TPM_MakeIdentity((void*)&group->aik_authdata, (void*)privCADigest, &kinfo,
+ (void*)&vtpm_globals.srk_auth, (void*)&osapMask, &srkAuth, &ownAuth,
+ &key, &identityBindingSize, &identityBinding);
+
+ TPM_TerminateHandle(srkAuth.AuthHandle);
+ TPM_TerminateHandle(ownAuth.AuthHandle);
+
+ if (rc) {
+ printk("TPM_MakeIdentity failed: %d\n", rc);
+ return rc;
+ }
+
+ if (key.pubKey.keyLength != 256)
+ rc = TPM_FAIL;
+ if (key.encDataSize != 256)
+ rc = TPM_FAIL;
+ if (identityBindingSize != 256)
+ rc = TPM_FAIL;
+ if (rc) {
+ printk("TPM_MakeIdentity TPM_KEY mismatch: %d %d %d\n",
+ key.pubKey.keyLength, key.encDataSize, identityBindingSize);
+ } else {
+ memcpy(group->id_data.tpm_aik_public, key.pubKey.key, 256);
+ memcpy(group->id_data.tpm_aik_edata, key.encData, 256);
+ memcpy(group->details.recovery_data, identityBinding, 256);
+ }
+
+ free_TPM_KEY(&key);
+ free(identityBinding);
+
+ return rc;
+}
+
+static int do_load_aik(struct mem_group *group, TPM_HANDLE *handle)
+{
+ TPM_KEY key = {
+ .ver = TPM_STRUCT_VER_1_1,
+ .keyUsage = TPM_KEY_IDENTITY,
+ .keyFlags = 0,
+ .authDataUsage = TPM_AUTH_ALWAYS,
+ .algorithmParms = {
+ .algorithmID = TPM_ALG_RSA,
+ .encScheme = TPM_ES_NONE,
+ .sigScheme = TPM_SS_RSASSAPKCS1v15_SHA1,
+ .parmSize = 12,
+ .parms.rsa = {
+ .keyLength = RSA_KEY_SIZE,
+ .numPrimes = 2,
+ .exponentSize = 0,
+ .exponent = NULL,
+ },
+ },
+ .PCRInfoSize = 0,
+ .pubKey.keyLength = 256,
+ .pubKey.key = group->id_data.tpm_aik_public,
+ .encDataSize = 256,
+ .encData = group->id_data.tpm_aik_edata,
+ };
+
+ return TPM_LoadKey(TPM_SRK_KEYHANDLE, &key, handle, (void*)&vtpm_globals.srk_auth, &vtpm_globals.oiap);
+}
+
+/*
+ * Sets up resettable PCRs for a vTPM deep quote request
+ */
+static int do_pcr_setup(struct mem_group *group, const void* uuid, const uint8_t* kern_hash)
+{
+ uint32_t reset_sel = (1 << 20) | (1 << 21) | (1 << 22) | (1 << 23);
+ sha1_context ctx;
+ TPM_DIGEST extended;
+ TPM_PCR_SELECTION sel = {
+ .sizeOfSelect = 3,
+ .pcrSelect = (void*)&reset_sel,
+ };
+ int rc;
+ int i;
+
+ rc = TPM_Reset(&sel);
+ if (rc)
+ return rc;
+
+ sha1((void*)&group->id_data.saa_pubkey, sizeof(group->id_data.saa_pubkey), extended.digest);
+ rc = TPM_Extend(20, &extended, &extended);
+ if (rc)
+ return rc;
+
+ sha1_starts(&ctx);
+ sha1_update(&ctx, (void*)&group->details.cfg_seq, 8);
+ sha1_update(&ctx, (void*)&group->seal_bits.nr_cfgs, 4);
+ for(i=0; i < group->nr_seals; i++)
+ sha1_update(&ctx, (void*)&group->seals[i].digest_release, 20);
+ sha1_update(&ctx, (void*)&group->seal_bits.nr_kerns, 4);
+ sha1_update(&ctx, (void*)&group->seal_bits.kernels, 20 * be32_native(group->seal_bits.nr_kerns));
+ sha1_finish(&ctx, extended.digest);
+ rc = TPM_Extend(21, &extended, &extended);
+ if (rc)
+ return rc;
+
+ if (kern_hash) {
+ rc = TPM_Extend(22, (void*)kern_hash, &extended);
+ if (rc)
+ return rc;
+ }
+
+ memset(&extended, 0, 20);
+ memcpy(&extended, group->id_data.uuid, 16);
+ rc = TPM_Extend(23, &extended, &extended);
+ if (rc)
+ return rc;
+
+ if (uuid) {
+ memset(&extended, 0, 20);
+ memcpy(&extended, uuid, 16);
+ rc = TPM_Extend(23, &extended, &extended);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+struct mem_group *vtpm_new_group(const struct tpm_authdata *privCADigest)
+{
+ static struct mem_group* group0_delayed = NULL;
+ struct mem_group *group;
+
+ if (group0_delayed) {
+ group = group0_delayed;
+ group0_delayed = NULL;
+ } else {
+ group = calloc(1, sizeof(*group));
+
+ group->flags = MEM_GROUP_FLAG_FIRSTBOOT;
+
+ do_random(&group->id_data.uuid, 16);
+ do_random(&group->group_key, 16);
+ do_random(&group->rollback_mac_key, 16);
+ do_random(&group->aik_authdata, 20);
+
+ group->id_data.uuid[6] = 0x40 | (group->id_data.uuid[6] & 0x0F);
+ group->id_data.uuid[8] = 0x80 | (group->id_data.uuid[8] & 0x3F);
+ }
+
+ if (privCADigest) {
+ int rc;
+ rc = do_provision_aik(group, privCADigest);
+ if (rc) {
+ free(group);
+ return NULL;
+ }
+ } else {
+ group0_delayed = group;
+ }
+
+ return group;
+}
+
+int group_do_activate(struct mem_group *group, void* blob, int blobSize,
+ void* resp, unsigned int *rlen)
+{
+ int rc;
+ TPM_HANDLE handle;
+ TPM_AUTH_SESSION aikAuth = TPM_AUTH_SESSION_INIT;
+ TPM_AUTH_SESSION ownAuth = TPM_AUTH_SESSION_INIT;
+ TPM_SYMMETRIC_KEY symKey;
+
+ /* ActivateIdentity with TPM_EK_BLOB_ACTIVATE can check PCRs */
+ rc = do_pcr_setup(group, NULL, NULL);
+ if (rc)
+ return rc;
+
+ rc = do_load_aik(group, &handle);
+ if (rc)
+ return rc;
+
+ rc = TPM_OIAP(&aikAuth);
+ if (rc) {
+ TPM_TerminateHandle(handle);
+ return rc;
+ }
+
+ rc = TPM_OIAP(&ownAuth);
+ if (rc) {
+ TPM_TerminateHandle(aikAuth.AuthHandle);
+ TPM_TerminateHandle(handle);
+ return rc;
+ }
+
+ rc = TPM_ActivateIdentity(handle, blob, blobSize, (void*)&group->aik_authdata,
+ (void*)&vtpm_globals.owner_auth, &aikAuth, &ownAuth, &symKey);
+
+ TPM_TerminateHandle(ownAuth.AuthHandle);
+ TPM_TerminateHandle(aikAuth.AuthHandle);
+ TPM_TerminateHandle(handle);
+
+ if (rc)
+ return rc;
+
+ pack_TPM_SYMMETRIC_KEY(resp + *rlen, &symKey);
+ *rlen += 8 + symKey.size;
+ free(symKey.data);
+
+ return rc;
+}
+
+int vtpm_do_quote(struct mem_group *group, const uuid_t uuid,
+ const uint8_t* kern_hash, const struct tpm_authdata *data, TPM_PCR_SELECTION *sel,
+ void* pcr_out, uint32_t *pcr_size, void* sig_out)
+{
+ TPM_HANDLE handle;
+ TPM_AUTH_SESSION oiap = TPM_AUTH_SESSION_INIT;
+ TPM_PCR_COMPOSITE pcrs;
+ BYTE* sig;
+ UINT32 size;
+ int rc;
+
+ rc = do_pcr_setup(group, uuid, kern_hash);
+ if (rc)
+ return rc;
+
+ rc = do_load_aik(group, &handle);
+ if (rc)
+ return rc;
+
+ rc = TPM_OIAP(&oiap);
+ if (rc) {
+ TPM_TerminateHandle(handle);
+ return rc;
+ }
+
+ rc = TPM_Quote(handle, (void*)data, sel, (void*)&group->aik_authdata, &oiap, &pcrs, &sig, &size);
+ printk("TPM_Quote: %d\n", rc);
+
+ TPM_TerminateHandle(oiap.AuthHandle);
+ TPM_FlushSpecific(handle, TPM_RT_KEY);
+
+ if (rc)
+ return rc;
+ if (size != 256) {
+ printk("Bad size\n");
+ return TPM_FAIL;
+ }
+
+ if (pcr_out) {
+ *pcr_size = pcrs.valueSize;
+ memcpy(pcr_out, pcrs.pcrValue, *pcr_size);
+ }
+
+ memcpy(sig_out, sig, size);
+
+ free_TPM_PCR_COMPOSITE(&pcrs);
+ free(sig);
+
+ return rc;
+}
--- /dev/null
+#ifndef __VTPMMGR_MGMT_AUTHORITY_H
+#define __VTPMMGR_MGMT_AUTHORITY_H
+
+struct mem_group *vtpm_new_group(const struct tpm_authdata *privCADigest);
+int group_do_activate(struct mem_group *group, void* blob, int blobSize,
+ void* resp, unsigned int *rlen);
+int vtpm_do_quote(struct mem_group *group, const uuid_t uuid,
+ const uint8_t* kern_hash, const struct tpm_authdata *data, TPM_PCR_SELECTION *sel,
+ void* pcr_out, uint32_t *pcr_size, void* sig_out);
+
+#endif
BYTE* IV;
} TPM_SYMMETRIC_KEY_PARMS;
+typedef struct TPM_SYMMETRIC_KEY {
+ UINT32 algId;
+ UINT16 encScheme;
+ UINT16 size;
+ BYTE* data;
+} TPM_SYMMETRIC_KEY;
+
inline void free_TPM_SYMMETRIC_KEY_PARMS(TPM_SYMMETRIC_KEY_PARMS* p) {
free(p->IV);
p->IV = NULL;
#define TCPA_MAX_BUFFER_LENGTH 0x2000
-#define TPM_BEGIN(TAG, ORD) \
- const TPM_TAG intag = TAG;\
-TPM_TAG tag = intag;\
-UINT32 paramSize;\
-const TPM_COMMAND_CODE ordinal = ORD;\
-TPM_RESULT status = TPM_SUCCESS;\
-BYTE in_buf[TCPA_MAX_BUFFER_LENGTH];\
-BYTE out_buf[TCPA_MAX_BUFFER_LENGTH];\
-UINT32 out_len = sizeof(out_buf);\
-BYTE* ptr = in_buf;\
-/*Print a log message */\
-vtpmloginfo(VTPM_LOG_TPM, "%s\n", __func__);\
-/* Pack the header*/\
-ptr = pack_TPM_TAG(ptr, tag);\
-ptr += sizeof(UINT32);\
-ptr = pack_TPM_COMMAND_CODE(ptr, ordinal)\
-
-#define TPM_AUTH_BEGIN() \
- sha1_context sha1_ctx;\
-BYTE* authbase = ptr - sizeof(TPM_COMMAND_CODE);\
-TPM_DIGEST paramDigest;\
-sha1_starts(&sha1_ctx)
-
-#define TPM_AUTH1_GEN(HMACkey, auth) do {\
- sha1_finish(&sha1_ctx, paramDigest.digest);\
- generateAuth(¶mDigest, HMACkey, auth);\
- ptr = pack_TPM_AUTH_SESSION(ptr, auth);\
-} while(0)
-
-#define TPM_AUTH2_GEN(HMACkey, auth) do {\
- generateAuth(¶mDigest, HMACkey, auth);\
- ptr = pack_TPM_AUTH_SESSION(ptr, auth);\
-} while(0)
-
-#define TPM_TRANSMIT() do {\
- /* Pack the command size */\
- paramSize = ptr - in_buf;\
- pack_UINT32(in_buf + sizeof(TPM_TAG), paramSize);\
- if((status = TPM_TransmitData(in_buf, paramSize, out_buf, &out_len)) != TPM_SUCCESS) {\
- goto abort_egress;\
- }\
-} while(0)
-
-#define TPM_AUTH_VERIFY_BEGIN() do {\
- UINT32 buf[2] = { cpu_to_be32(status), cpu_to_be32(ordinal) };\
- sha1_starts(&sha1_ctx);\
- sha1_update(&sha1_ctx, (unsigned char*)buf, sizeof(buf));\
- authbase = ptr;\
-} while(0)
-
-#define TPM_AUTH1_VERIFY(HMACkey, auth) do {\
- sha1_finish(&sha1_ctx, paramDigest.digest);\
- ptr = unpack_TPM_AUTH_SESSION(ptr, auth);\
- if((status = verifyAuth(¶mDigest, HMACkey, auth)) != TPM_SUCCESS) {\
- goto abort_egress;\
- }\
-} while(0)
-
-#define TPM_AUTH2_VERIFY(HMACkey, auth) do {\
- ptr = unpack_TPM_AUTH_SESSION(ptr, auth);\
- if((status = verifyAuth(¶mDigest, HMACkey, auth)) != TPM_SUCCESS) {\
- goto abort_egress;\
- }\
-} while(0)
-
-
-
-#define TPM_UNPACK_VERIFY() do { \
- ptr = out_buf;\
- ptr = unpack_TPM_RSP_HEADER(ptr, \
- &(tag), &(paramSize), &(status));\
- if((status) != TPM_SUCCESS || (tag) != (intag +3)) { \
- vtpmlogerror(VTPM_LOG_TPM, "Failed with return code %s\n", tpm_get_error_name(status));\
- goto abort_egress;\
- }\
-} while(0)
-
-#define TPM_AUTH_HASH() do {\
- sha1_update(&sha1_ctx, authbase, ptr - authbase);\
- authbase = ptr;\
-} while(0)
-
-#define TPM_AUTH_SKIP() do {\
- authbase = ptr;\
-} while(0)
+#define TPM_BEGIN_CMD(ord) \
+ const TPM_COMMAND_CODE ordinal = ord; \
+ TPM_RESULT status = TPM_SUCCESS; \
+ BYTE _io_buffer[TCPA_MAX_BUFFER_LENGTH]; \
+ UINT32 _io_bufsize_in; \
+ UINT32 _io_bufsize_out; \
+ vtpmloginfo(VTPM_LOG_TPM, "%s\n", __func__); \
+ do { \
+ BYTE *in_buf = _io_buffer; \
+ UINT32 in_pos = 6; \
+ PACK_IN(UINT32, ordinal);
+
+#define IN_PTR (in_buf + in_pos)
+
+#define PACK_IN(type, item...) do { \
+ UINT32 isize = sizeof_ ## type(item); \
+ if (isize + in_pos > TCPA_MAX_BUFFER_LENGTH) { \
+ status = TPM_SIZE; \
+ goto abort_egress; \
+ } \
+ pack_ ## type (IN_PTR, item); \
+ in_pos += isize; \
+} while (0)
+
+#define TPM_HASH_IN_BEGIN \
+ sha1_context sha1_ctx; \
+ sha1_starts(&sha1_ctx); \
+ sha1_update(&sha1_ctx, in_buf + 6, 4); \
+ TPM_HASH_IN_START
+
+#define TPM_HASH_IN_START \
+ do { \
+ UINT32 _hash_in_start = in_pos;
+
+#define TPM_HASH_IN_STOP \
+ sha1_update(&sha1_ctx, in_buf + _hash_in_start, in_pos - _hash_in_start); \
+ } while (0)
+
+#define TPM_TAG_COMMON(req_tag) \
+ _io_bufsize_in = in_pos; \
+ pack_TPM_TAG(in_buf, req_tag); \
+ pack_UINT32(in_buf + sizeof(TPM_TAG), in_pos); \
+ } while (0); \
+ _io_bufsize_out = TCPA_MAX_BUFFER_LENGTH; \
+ status = TPM_TransmitData(_io_buffer, _io_bufsize_in, _io_buffer, &_io_bufsize_out); \
+ if (status != TPM_SUCCESS) { \
+ goto abort_egress; \
+ } \
+ do { \
+ BYTE *out_buf = _io_buffer; \
+ UINT32 out_pos = 0; \
+ UINT32 out_len = _io_bufsize_out; \
+ do { \
+ TPM_TAG rsp_tag; \
+ UINT32 rsp_len; \
+ UINT32 rsp_status; \
+ UNPACK_OUT(TPM_RSP_HEADER, &rsp_tag, &rsp_len, &rsp_status); \
+ if (rsp_status != TPM_SUCCESS) { \
+ vtpmlogerror(VTPM_LOG_TPM, "Failed with return code %s\n", tpm_get_error_name(rsp_status)); \
+ status = rsp_status; \
+ goto abort_egress; \
+ } \
+ if (rsp_tag != req_tag + 3 || rsp_len != out_len) { \
+ status = TPM_FAIL; \
+ goto abort_egress; \
+ } \
+ } while(0)
+
+#define OUT_PTR (out_buf + out_pos)
+
+#define UNPACK_OUT(type, item...) do { \
+ if (unpack3_ ## type (out_buf, &out_pos, TCPA_MAX_BUFFER_LENGTH, item)) { \
+ status = TPM_SIZE; \
+ goto abort_egress; \
+ } \
+} while (0)
+
+#define TPM_XMIT_REQ() \
+ TPM_TAG_COMMON(TPM_TAG_RQU_COMMAND)
+
+#define TPM_XMIT_AUTH1(sec1, auth1) \
+ TPM_HASH_IN_STOP; \
+ do { \
+ TPM_DIGEST paramDigest; \
+ sha1_finish(&sha1_ctx, paramDigest.digest); \
+ generateAuth(¶mDigest, sec1, auth1); \
+ PACK_IN(TPM_AUTH_SESSION, auth1); \
+ } while (0); \
+ TPM_TAG_COMMON(TPM_TAG_RQU_AUTH1_COMMAND); \
+ TPM_HASH_OUT_BEGIN
+
+#define TPM_XMIT_AUTH2(sec1, auth1, sec2, auth2) \
+ TPM_HASH_IN_STOP; \
+ do { \
+ TPM_DIGEST paramDigest; \
+ sha1_finish(&sha1_ctx, paramDigest.digest); \
+ generateAuth(¶mDigest, sec1, auth1); \
+ PACK_IN(TPM_AUTH_SESSION, auth1); \
+ generateAuth(¶mDigest, sec2, auth2); \
+ PACK_IN(TPM_AUTH_SESSION, auth2); \
+ } while (0); \
+ TPM_TAG_COMMON(TPM_TAG_RQU_AUTH2_COMMAND); \
+ TPM_HASH_OUT_BEGIN
+
+#define TPM_HASH_OUT_BEGIN \
+ sha1_context sha1_ctx; \
+ sha1_starts(&sha1_ctx); \
+ do { \
+ UINT32 buf[2] = { cpu_to_be32(status), cpu_to_be32(ordinal) }; \
+ sha1_update(&sha1_ctx, (BYTE*)buf, sizeof(buf)); \
+ } while(0); \
+ TPM_HASH_OUT_START
+
+#define TPM_HASH_OUT_START \
+ do { \
+ UINT32 _hash_out_start = out_pos;
+
+#define TPM_HASH_OUT_STOP \
+ sha1_update(&sha1_ctx, out_buf + _hash_out_start, out_pos - _hash_out_start); \
+ } while (0)
+
+#define TPM_END_AUTH1(sec1, auth1) \
+ TPM_HASH_OUT_STOP; \
+ do { \
+ TPM_DIGEST paramDigest; \
+ sha1_finish(&sha1_ctx, paramDigest.digest); \
+ UNPACK_OUT(TPM_AUTH_SESSION, auth1); \
+ status = verifyAuth(¶mDigest, sec1, auth1); \
+ if (status != TPM_SUCCESS) \
+ goto abort_egress; \
+ } while(0); \
+ TPM_END_COMMON
+
+#define TPM_END_AUTH2(sec1, auth1, sec2, auth2) \
+ TPM_HASH_OUT_STOP; \
+ do { \
+ TPM_DIGEST paramDigest; \
+ sha1_finish(&sha1_ctx, paramDigest.digest); \
+ UNPACK_OUT(TPM_AUTH_SESSION, auth1); \
+ status = verifyAuth(¶mDigest, sec1, auth1); \
+ if (status != TPM_SUCCESS) \
+ goto abort_egress; \
+ UNPACK_OUT(TPM_AUTH_SESSION, auth2); \
+ status = verifyAuth(¶mDigest, sec2, auth2); \
+ if (status != TPM_SUCCESS) \
+ goto abort_egress; \
+ } while(0); \
+ TPM_END_COMMON
+
+#define TPM_END() TPM_END_COMMON
+
+#define TPM_END_COMMON \
+ if (out_pos != out_len) { \
+ vtpmloginfo(VTPM_LOG_TPM, "Response too long (%d != %d)", out_pos, out_len);\
+ status = TPM_SIZE; \
+ goto abort_egress; \
+ } \
+ } while (0); \
#define TPM_AUTH_ERR_CHECK(auth) do {\
- if(status != TPM_SUCCESS || auth->fContinueAuthSession == FALSE) {\
- vtpmloginfo(VTPM_LOG_TPM, "Auth Session: 0x%x closed by TPM\n", auth->AuthHandle);\
- auth->AuthHandle = 0;\
- }\
+ if(status != TPM_SUCCESS || auth->fContinueAuthSession == FALSE) {\
+ vtpmloginfo(VTPM_LOG_TPM, "Auth Session: 0x%x closed by TPM\n", auth->AuthHandle);\
+ auth->AuthHandle = 0;\
+ }\
} while(0)
static void xorEncrypt(const TPM_SECRET* sharedSecret,
- TPM_NONCE* nonce,
- const TPM_AUTHDATA* inAuth0,
- TPM_ENCAUTH outAuth0,
- const TPM_AUTHDATA* inAuth1,
- TPM_ENCAUTH outAuth1) {
- BYTE XORbuffer[sizeof(TPM_SECRET) + sizeof(TPM_NONCE)];
- BYTE XORkey[TPM_DIGEST_SIZE];
- BYTE* ptr = XORbuffer;
- ptr = pack_TPM_SECRET(ptr, sharedSecret);
- ptr = pack_TPM_NONCE(ptr, nonce);
-
- sha1(XORbuffer, ptr - XORbuffer, XORkey);
-
- if(inAuth0) {
- for(int i = 0; i < TPM_DIGEST_SIZE; ++i) {
- outAuth0[i] = XORkey[i] ^ (*inAuth0)[i];
- }
- }
- if(inAuth1) {
- for(int i = 0; i < TPM_DIGEST_SIZE; ++i) {
- outAuth1[i] = XORkey[i] ^ (*inAuth1)[i];
- }
- }
+ TPM_NONCE* nonce,
+ const TPM_AUTHDATA* inAuth0,
+ TPM_ENCAUTH outAuth0,
+ const TPM_AUTHDATA* inAuth1,
+ TPM_ENCAUTH outAuth1) {
+ BYTE XORbuffer[sizeof(TPM_SECRET) + sizeof(TPM_NONCE)];
+ BYTE XORkey[TPM_DIGEST_SIZE];
+ BYTE* ptr = XORbuffer;
+ ptr = pack_TPM_SECRET(ptr, sharedSecret);
+ ptr = pack_TPM_NONCE(ptr, nonce);
+
+ sha1(XORbuffer, ptr - XORbuffer, XORkey);
+
+ if(inAuth0) {
+ for(int i = 0; i < TPM_DIGEST_SIZE; ++i) {
+ outAuth0[i] = XORkey[i] ^ (*inAuth0)[i];
+ }
+ }
+ if(inAuth1) {
+ for(int i = 0; i < TPM_DIGEST_SIZE; ++i) {
+ outAuth1[i] = XORkey[i] ^ (*inAuth1)[i];
+ }
+ }
}
static void generateAuth(const TPM_DIGEST* paramDigest,
- const TPM_SECRET* HMACkey,
- TPM_AUTH_SESSION *auth)
+ const TPM_SECRET* HMACkey,
+ TPM_AUTH_SESSION *auth)
{
- //Generate new OddNonce
- vtpmmgr_rand((BYTE*)auth->NonceOdd.nonce, sizeof(TPM_NONCE));
+ //Generate new OddNonce
+ vtpmmgr_rand((BYTE*)auth->NonceOdd.nonce, sizeof(TPM_NONCE));
- // Create HMAC text. (Concat inParamsDigest with inAuthSetupParams).
- BYTE hmacText[sizeof(TPM_DIGEST) + (2 * sizeof(TPM_NONCE)) + sizeof(BOOL)];
- BYTE* ptr = hmacText;
+ // Create HMAC text. (Concat inParamsDigest with inAuthSetupParams).
+ BYTE hmacText[sizeof(TPM_DIGEST) + (2 * sizeof(TPM_NONCE)) + sizeof(BOOL)];
+ BYTE* ptr = hmacText;
- ptr = pack_TPM_DIGEST(ptr, paramDigest);
- ptr = pack_TPM_NONCE(ptr, &auth->NonceEven);
- ptr = pack_TPM_NONCE(ptr, &auth->NonceOdd);
- ptr = pack_BOOL(ptr, auth->fContinueAuthSession);
+ ptr = pack_TPM_DIGEST(ptr, paramDigest);
+ ptr = pack_TPM_NONCE(ptr, &auth->NonceEven);
+ ptr = pack_TPM_NONCE(ptr, &auth->NonceOdd);
+ ptr = pack_BOOL(ptr, auth->fContinueAuthSession);
- sha1_hmac((BYTE *) HMACkey, sizeof(TPM_DIGEST),
- (BYTE *) hmacText, sizeof(hmacText),
- auth->HMAC);
+ sha1_hmac((BYTE *) HMACkey, sizeof(TPM_DIGEST),
+ (BYTE *) hmacText, sizeof(hmacText),
+ auth->HMAC);
}
static TPM_RESULT verifyAuth(const TPM_DIGEST* paramDigest,
- /*[IN]*/ const TPM_SECRET *HMACkey,
- /*[IN,OUT]*/ TPM_AUTH_SESSION *auth)
+ /*[IN]*/ const TPM_SECRET *HMACkey,
+ /*[IN,OUT]*/ TPM_AUTH_SESSION *auth)
{
- // Create HMAC text. (Concat inParamsDigest with inAuthSetupParams).
- TPM_AUTHDATA hm;
- BYTE hmacText[sizeof(TPM_DIGEST) + (2 * sizeof(TPM_NONCE)) + sizeof(BOOL)];
- BYTE* ptr = hmacText;
-
- ptr = pack_TPM_DIGEST(ptr, paramDigest);
- ptr = pack_TPM_NONCE(ptr, &auth->NonceEven);
- ptr = pack_TPM_NONCE(ptr, &auth->NonceOdd);
- ptr = pack_BOOL(ptr, auth->fContinueAuthSession);
-
- sha1_hmac( (BYTE *) HMACkey, sizeof(TPM_DIGEST),
- (BYTE *) hmacText, sizeof(hmacText),
- hm);
-
- // Compare correct HMAC with provided one.
- if (memcmp(hm, auth->HMAC, sizeof(TPM_DIGEST)) == 0) { // 0 indicates equality
- return TPM_SUCCESS;
- } else {
- vtpmlogerror(VTPM_LOG_TPM, "Auth Session verification failed!\n");
- return TPM_AUTHFAIL;
- }
+ // Create HMAC text. (Concat inParamsDigest with inAuthSetupParams).
+ TPM_AUTHDATA hm;
+ BYTE hmacText[sizeof(TPM_DIGEST) + (2 * sizeof(TPM_NONCE)) + sizeof(BOOL)];
+ BYTE* ptr = hmacText;
+
+ ptr = pack_TPM_DIGEST(ptr, paramDigest);
+ ptr = pack_TPM_NONCE(ptr, &auth->NonceEven);
+ ptr = pack_TPM_NONCE(ptr, &auth->NonceOdd);
+ ptr = pack_BOOL(ptr, auth->fContinueAuthSession);
+
+ sha1_hmac( (BYTE *) HMACkey, sizeof(TPM_DIGEST),
+ (BYTE *) hmacText, sizeof(hmacText),
+ hm);
+
+ // Compare correct HMAC with provided one.
+ if (memcmp(hm, auth->HMAC, sizeof(TPM_DIGEST)) == 0) { // 0 indicates equality
+ return TPM_SUCCESS;
+ } else {
+ vtpmlogerror(VTPM_LOG_TPM, "Auth Session verification failed! %x %x\n",
+ *(UINT32*)auth->HMAC, *(UINT32*)hm);
+ return TPM_AUTHFAIL;
+ }
}
// Authorization Commands
// ------------------------------------------------------------------
-TPM_RESULT TPM_OIAP(TPM_AUTH_SESSION* auth) // out
+TPM_RESULT TPM_OIAP(TPM_AUTH_SESSION* auth) // out
{
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_OIAP);
+ TPM_BEGIN_CMD(TPM_ORD_OIAP);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
+ TPM_XMIT_REQ();
- memset(&auth->HMAC, 0, sizeof(TPM_DIGEST));
- auth->fContinueAuthSession = TRUE;
+ memset(&auth->HMAC, 0, sizeof(TPM_DIGEST));
+ auth->fContinueAuthSession = TRUE;
- ptr = unpack_UINT32(ptr, &auth->AuthHandle);
- ptr = unpack_TPM_NONCE(ptr, &auth->NonceEven);
+ UNPACK_OUT(UINT32, &auth->AuthHandle);
+ UNPACK_OUT(TPM_NONCE, &auth->NonceEven);
+ TPM_END();
- vtpmloginfo(VTPM_LOG_TPM, "Auth Session: 0x%x opened by TPM_OIAP.\n", auth->AuthHandle);
+ vtpmloginfo(VTPM_LOG_TPM, "Auth Session: 0x%x opened by TPM_OIAP.\n", auth->AuthHandle);
abort_egress:
- return status;
+ return status;
}
TPM_RESULT TPM_OSAP(TPM_ENTITY_TYPE entityType, // in
- UINT32 entityValue, // in
- const TPM_AUTHDATA* usageAuth, //in
- TPM_SECRET *sharedSecret, //out
- TPM_AUTH_SESSION *auth)
+ UINT32 entityValue, // in
+ const TPM_AUTHDATA* usageAuth, //in
+ TPM_SECRET *sharedSecret, //out
+ TPM_AUTH_SESSION *auth)
{
- BYTE* nonceOddOSAP;
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_OSAP);
+ TPM_DIGEST nonceOddOSAP;
+ vtpmmgr_rand(nonceOddOSAP.digest, TPM_DIGEST_SIZE);
+ TPM_BEGIN_CMD(TPM_ORD_OSAP);
- ptr = pack_TPM_ENTITY_TYPE(ptr, entityType);
- ptr = pack_UINT32(ptr, entityValue);
+ PACK_IN(TPM_ENTITY_TYPE, entityType);
+ PACK_IN(UINT32, entityValue);
+ PACK_IN(TPM_DIGEST, &nonceOddOSAP);
- //nonce Odd OSAP
- nonceOddOSAP = ptr;
- vtpmmgr_rand(ptr, TPM_DIGEST_SIZE);
- ptr += TPM_DIGEST_SIZE;
+ TPM_XMIT_REQ();
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
+ UNPACK_OUT(UINT32, &auth->AuthHandle);
+ UNPACK_OUT(TPM_NONCE, &auth->NonceEven);
- ptr = unpack_UINT32(ptr, &auth->AuthHandle);
- ptr = unpack_TPM_NONCE(ptr, &auth->NonceEven);
+ //Calculate session secret
+ sha1_context ctx;
+ sha1_hmac_starts(&ctx, *usageAuth, TPM_DIGEST_SIZE);
+ sha1_hmac_update(&ctx, OUT_PTR, TPM_DIGEST_SIZE); // nonceEvenOSAP
+ sha1_hmac_update(&ctx, nonceOddOSAP.digest, TPM_DIGEST_SIZE);
+ sha1_hmac_finish(&ctx, *sharedSecret);
- //Calculate session secret
- sha1_context ctx;
- sha1_hmac_starts(&ctx, *usageAuth, TPM_DIGEST_SIZE);
- sha1_hmac_update(&ctx, ptr, TPM_DIGEST_SIZE); //ptr = nonceEvenOSAP
- sha1_hmac_update(&ctx, nonceOddOSAP, TPM_DIGEST_SIZE);
- sha1_hmac_finish(&ctx, *sharedSecret);
+ out_pos += TPM_DIGEST_SIZE;
+ TPM_END();
- memset(&auth->HMAC, 0, sizeof(TPM_DIGEST));
- auth->fContinueAuthSession = FALSE;
+ memset(&auth->HMAC, 0, sizeof(TPM_DIGEST));
+ auth->fContinueAuthSession = FALSE;
- vtpmloginfo(VTPM_LOG_TPM, "Auth Session: 0x%x opened by TPM_OSAP.\n", auth->AuthHandle);
+ vtpmloginfo(VTPM_LOG_TPM, "Auth Session: 0x%x opened by TPM_OSAP.\n", auth->AuthHandle);
abort_egress:
- return status;
+ return status;
}
TPM_RESULT TPM_TakeOwnership(
- const TPM_PUBKEY *pubEK, //in
- const TPM_AUTHDATA* ownerAuth, //in
- const TPM_AUTHDATA* srkAuth, //in
- const TPM_KEY* inSrk, //in
- TPM_KEY* outSrk, //out, optional
- TPM_AUTH_SESSION* auth) // in, out
+ const TPM_PUBKEY *pubEK, //in
+ const TPM_AUTHDATA* ownerAuth, //in
+ const TPM_AUTHDATA* srkAuth, //in
+ const TPM_KEY* inSrk, //in
+ TPM_KEY* outSrk, //out, optional
+ TPM_AUTH_SESSION* auth) // in, out
{
- int keyAlloced = 0;
- tpmrsa_context ek_rsa = TPMRSA_CTX_INIT;
-
- TPM_BEGIN(TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_TakeOwnership);
- TPM_AUTH_BEGIN();
-
- tpmrsa_set_pubkey(&ek_rsa,
- pubEK->pubKey.key, pubEK->pubKey.keyLength,
- pubEK->algorithmParms.parms.rsa.exponent,
- pubEK->algorithmParms.parms.rsa.exponentSize);
-
- /* Pack the protocol ID */
- ptr = pack_UINT16(ptr, TPM_PID_OWNER);
-
- /* Pack the encrypted owner auth */
- ptr = pack_UINT32(ptr, pubEK->algorithmParms.parms.rsa.keyLength / 8);
- tpmrsa_pub_encrypt_oaep(&ek_rsa,
- ctr_drbg_random, &vtpm_globals.ctr_drbg,
- sizeof(TPM_SECRET),
- (BYTE*) ownerAuth,
- ptr);
- ptr += pubEK->algorithmParms.parms.rsa.keyLength / 8;
-
- /* Pack the encrypted srk auth */
- ptr = pack_UINT32(ptr, pubEK->algorithmParms.parms.rsa.keyLength / 8);
- tpmrsa_pub_encrypt_oaep(&ek_rsa,
- ctr_drbg_random, &vtpm_globals.ctr_drbg,
- sizeof(TPM_SECRET),
- (BYTE*) srkAuth,
- ptr);
- ptr += pubEK->algorithmParms.parms.rsa.keyLength / 8;
-
- /* Pack the Srk key */
- ptr = pack_TPM_KEY(ptr, inSrk);
-
- /* Hash everything up to here */
- TPM_AUTH_HASH();
-
- /* Generate the authorization */
- TPM_AUTH1_GEN(ownerAuth, auth);
-
- /* Send the command to the tpm*/
- TPM_TRANSMIT();
- /* Unpack and validate the header */
- TPM_UNPACK_VERIFY();
- TPM_AUTH_VERIFY_BEGIN();
-
- if(outSrk != NULL) {
- /* If the user wants a copy of the srk we give it to them */
- keyAlloced = 1;
- ptr = unpack_TPM_KEY(ptr, outSrk, UNPACK_ALLOC);
- } else {
- /*otherwise just parse past it */
- TPM_KEY temp;
- ptr = unpack_TPM_KEY(ptr, &temp, UNPACK_ALIAS);
- }
-
- /* Hash the output key */
- TPM_AUTH_HASH();
-
- /* Verify authorizaton */
- TPM_AUTH1_VERIFY(ownerAuth, auth);
-
- goto egress;
+ int keyAlloced = 0;
+ tpmrsa_context ek_rsa = TPMRSA_CTX_INIT;
+
+ TPM_BEGIN_CMD(TPM_ORD_TakeOwnership);
+ TPM_HASH_IN_BEGIN;
+
+ tpmrsa_set_pubkey(&ek_rsa,
+ pubEK->pubKey.key, pubEK->pubKey.keyLength,
+ pubEK->algorithmParms.parms.rsa.exponent,
+ pubEK->algorithmParms.parms.rsa.exponentSize);
+
+ /* Pack the protocol ID */
+ PACK_IN(UINT16, TPM_PID_OWNER);
+
+ /* Pack the encrypted owner auth */
+ PACK_IN(UINT32, pubEK->algorithmParms.parms.rsa.keyLength / 8);
+ tpmrsa_pub_encrypt_oaep(&ek_rsa,
+ ctr_drbg_random, &vtpm_globals.ctr_drbg,
+ sizeof(TPM_SECRET),
+ (BYTE*) ownerAuth,
+ IN_PTR);
+ in_pos += pubEK->algorithmParms.parms.rsa.keyLength / 8;
+
+ /* Pack the encrypted srk auth */
+ PACK_IN(UINT32, pubEK->algorithmParms.parms.rsa.keyLength / 8);
+ tpmrsa_pub_encrypt_oaep(&ek_rsa,
+ ctr_drbg_random, &vtpm_globals.ctr_drbg,
+ sizeof(TPM_SECRET),
+ (BYTE*) srkAuth,
+ IN_PTR);
+ in_pos += pubEK->algorithmParms.parms.rsa.keyLength / 8;
+
+ PACK_IN(TPM_KEY, inSrk);
+
+ TPM_XMIT_AUTH1(ownerAuth, auth);
+
+ if (outSrk != NULL) {
+ /* If the user wants a copy of the srk we give it to them */
+ keyAlloced = 1;
+ UNPACK_OUT(TPM_KEY, outSrk, UNPACK_ALLOC);
+ } else {
+ /*otherwise just parse past it */
+ TPM_KEY temp;
+ UNPACK_OUT(TPM_KEY, &temp, UNPACK_ALIAS);
+ }
+
+ TPM_END_AUTH1(ownerAuth, auth);
+
+ goto egress;
abort_egress:
- if(keyAlloced) {
- free_TPM_KEY(outSrk);
- }
+ if(keyAlloced) {
+ free_TPM_KEY(outSrk);
+ }
egress:
- tpmrsa_free(&ek_rsa);
- TPM_AUTH_ERR_CHECK(auth);
- return status;
+ tpmrsa_free(&ek_rsa);
+ TPM_AUTH_ERR_CHECK(auth);
+ return status;
}
TPM_RESULT TPM_DisablePubekRead (
- const TPM_AUTHDATA* ownerAuth,
- TPM_AUTH_SESSION* auth)
+ const TPM_AUTHDATA* ownerAuth,
+ TPM_AUTH_SESSION* auth)
{
- TPM_BEGIN(TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_DisablePubekRead);
- TPM_AUTH_BEGIN();
-
- TPM_AUTH_HASH();
+ TPM_BEGIN_CMD(TPM_ORD_DisablePubekRead);
+ TPM_HASH_IN_BEGIN;
- TPM_AUTH1_GEN(ownerAuth, auth);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
- TPM_AUTH_VERIFY_BEGIN();
+ TPM_XMIT_AUTH1(ownerAuth, auth);
- TPM_AUTH1_VERIFY(ownerAuth, auth);
+ TPM_END_AUTH1(ownerAuth, auth);
abort_egress:
- TPM_AUTH_ERR_CHECK(auth);
- return status;
+ TPM_AUTH_ERR_CHECK(auth);
+ return status;
}
TPM_RESULT TPM_TerminateHandle(TPM_AUTHHANDLE handle) // in
{
- if(handle == 0) {
- return TPM_SUCCESS;
- }
+ if(handle == 0) {
+ return TPM_SUCCESS;
+ }
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_Terminate_Handle);
+ TPM_BEGIN_CMD(TPM_ORD_Terminate_Handle);
- ptr = pack_TPM_AUTHHANDLE(ptr, handle);
+ PACK_IN(TPM_AUTHHANDLE, handle);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
+ TPM_XMIT_REQ();
+ TPM_END();
- vtpmloginfo(VTPM_LOG_TPM, "Auth Session: 0x%x closed by TPM_TerminateHandle\n", handle);
+ vtpmloginfo(VTPM_LOG_TPM, "Auth Session: 0x%x closed by TPM_TerminateHandle\n", handle);
abort_egress:
- return status;
+ return status;
}
TPM_RESULT TPM_Extend( TPM_PCRINDEX pcrNum, // in
- TPM_DIGEST inDigest, // in
- TPM_PCRVALUE* outDigest) // out
+ TPM_DIGEST* inDigest, // in
+ TPM_PCRVALUE* outDigest) // out
{
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_Extend);
+ TPM_BEGIN_CMD(TPM_ORD_Extend);
+
+ PACK_IN(TPM_PCRINDEX, pcrNum);
+ PACK_IN(TPM_DIGEST, inDigest);
- ptr = pack_TPM_PCRINDEX(ptr, pcrNum);
- ptr = pack_TPM_DIGEST(ptr, &inDigest);
+ TPM_XMIT_REQ();
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
+ UNPACK_OUT(TPM_PCRVALUE, outDigest);
- ptr = unpack_TPM_PCRVALUE(ptr, outDigest);
+ TPM_END();
abort_egress:
- return status;
+ return status;
}
-TPM_RESULT TPM_Seal(
- TPM_KEY_HANDLE keyHandle, // in
- UINT32 pcrInfoSize, // in
- TPM_PCR_INFO* pcrInfo, // in
- UINT32 inDataSize, // in
- const BYTE* inData, // in
- TPM_STORED_DATA* sealedData, //out
- const TPM_SECRET* osapSharedSecret, //in
- const TPM_AUTHDATA* sealedDataAuth, //in
- TPM_AUTH_SESSION* pubAuth // in, out
- )
+TPM_RESULT TPM_Reset(TPM_PCR_SELECTION *sel)
{
- int dataAlloced = 0;
- TPM_BEGIN(TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_Seal);
- TPM_AUTH_BEGIN();
-
- TPM_AUTH_HASH();
-
- ptr = pack_TPM_KEY_HANDLE(ptr, keyHandle);
-
- TPM_AUTH_SKIP();
-
- xorEncrypt(osapSharedSecret, &pubAuth->NonceEven,
- sealedDataAuth, ptr,
- NULL, NULL);
- ptr += sizeof(TPM_ENCAUTH);
+ TPM_BEGIN_CMD(TPM_ORD_PCR_Reset);
+ PACK_IN(TPM_PCR_SELECTION, sel);
+ TPM_XMIT_REQ();
+ TPM_END();
+abort_egress:
+ return status;
+}
- ptr = pack_UINT32(ptr, pcrInfoSize);
- ptr = pack_TPM_PCR_INFO(ptr, pcrInfo);
+TPM_RESULT TPM_Seal(
+ TPM_KEY_HANDLE keyHandle, // in
+ UINT32 pcrInfoSize, // in
+ TPM_PCR_INFO* pcrInfo, // in
+ UINT32 inDataSize, // in
+ const BYTE* inData, // in
+ TPM_STORED_DATA* sealedData, //out
+ const TPM_SECRET* osapSharedSecret, //in
+ const TPM_AUTHDATA* sealedDataAuth, //in
+ TPM_AUTH_SESSION* pubAuth // in, out
+ )
+{
+ memset(sealedData, 0, sizeof(*sealedData));
+ TPM_BEGIN_CMD(TPM_ORD_Seal);
+ PACK_IN(TPM_KEY_HANDLE, keyHandle);
+ TPM_HASH_IN_BEGIN;
+
+ xorEncrypt(osapSharedSecret, &pubAuth->NonceEven, sealedDataAuth, IN_PTR, NULL, NULL);
+ in_pos += sizeof(TPM_ENCAUTH);
+ PACK_IN(UINT32, pcrInfoSize);
+ if (pcrInfoSize)
+ PACK_IN(TPM_PCR_INFO, pcrInfo);
+ PACK_IN(UINT32, inDataSize);
+ PACK_IN(BUFFER, inData, inDataSize);
+
+ TPM_XMIT_AUTH1(osapSharedSecret, pubAuth);
+
+ UNPACK_OUT(TPM_STORED_DATA, sealedData, UNPACK_ALLOC);
+
+ TPM_END_AUTH1(osapSharedSecret, pubAuth);
+
+ abort_egress:
+ if (status)
+ free_TPM_STORED_DATA(sealedData);
+ TPM_AUTH_ERR_CHECK(pubAuth);
+ return status;
+}
- ptr = pack_UINT32(ptr, inDataSize);
- ptr = pack_BUFFER(ptr, inData, inDataSize);
+TPM_RESULT TPM_Unseal(
+ TPM_KEY_HANDLE parentHandle, // in
+ const TPM_STORED_DATA* sealedData,
+ UINT32* outSize, // out
+ BYTE** out, //out
+ const TPM_AUTHDATA* key_usage_auth, //in
+ const TPM_AUTHDATA* data_usage_auth, //in
+ TPM_AUTH_SESSION* keyAuth, // in, out
+ TPM_AUTH_SESSION* dataAuth // in, out
+ )
+{
+ TPM_BEGIN_CMD(TPM_ORD_Unseal);
- TPM_AUTH_HASH();
+ PACK_IN(TPM_KEY_HANDLE, parentHandle);
- TPM_AUTH1_GEN(osapSharedSecret, pubAuth);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
- TPM_AUTH_VERIFY_BEGIN();
+ TPM_HASH_IN_BEGIN;
+ PACK_IN(TPM_STORED_DATA, sealedData);
- ptr = unpack_TPM_STORED_DATA(ptr, sealedData, UNPACK_ALLOC);
- dataAlloced = 1;
+ TPM_XMIT_AUTH2(key_usage_auth, keyAuth, data_usage_auth, dataAuth);
- TPM_AUTH_HASH();
+ UNPACK_OUT(UINT32, outSize);
+ UNPACK_OUT(PTR, out, *outSize, UNPACK_ALLOC);
- TPM_AUTH1_VERIFY(osapSharedSecret, pubAuth);
+ TPM_END_AUTH2(key_usage_auth, keyAuth, data_usage_auth, dataAuth);
- goto egress;
abort_egress:
- if(dataAlloced) {
- free_TPM_STORED_DATA(sealedData);
- }
-egress:
- TPM_AUTH_ERR_CHECK(pubAuth);
- return status;
+ TPM_AUTH_ERR_CHECK(keyAuth);
+ TPM_AUTH_ERR_CHECK(dataAuth);
+ return status;
}
-TPM_RESULT TPM_Unseal(
- TPM_KEY_HANDLE parentHandle, // in
- const TPM_STORED_DATA* sealedData,
- UINT32* outSize, // out
- BYTE** out, //out
- const TPM_AUTHDATA* key_usage_auth, //in
- const TPM_AUTHDATA* data_usage_auth, //in
- TPM_AUTH_SESSION* keyAuth, // in, out
- TPM_AUTH_SESSION* dataAuth // in, out
- )
+TPM_RESULT TPM_LoadKey(
+ TPM_KEY_HANDLE parentHandle, //
+ const TPM_KEY* key, //in
+ TPM_HANDLE* keyHandle, // out
+ const TPM_AUTHDATA* usage_auth,
+ TPM_AUTH_SESSION* auth)
{
- TPM_BEGIN(TPM_TAG_RQU_AUTH2_COMMAND, TPM_ORD_Unseal);
- TPM_AUTH_BEGIN();
+ TPM_BEGIN_CMD(TPM_ORD_LoadKey);
+ PACK_IN(TPM_KEY_HANDLE, parentHandle);
- TPM_AUTH_HASH();
+ TPM_HASH_IN_BEGIN;
- ptr = pack_TPM_KEY_HANDLE(ptr, parentHandle);
+ PACK_IN(TPM_KEY, key);
- TPM_AUTH_SKIP();
+ TPM_XMIT_AUTH1(usage_auth, auth);
- ptr = pack_TPM_STORED_DATA(ptr, sealedData);
+ UNPACK_OUT(UINT32, keyHandle);
- TPM_AUTH_HASH();
+ TPM_END_AUTH1(usage_auth, auth);
- TPM_AUTH1_GEN(key_usage_auth, keyAuth);
- TPM_AUTH2_GEN(data_usage_auth, dataAuth);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
- TPM_AUTH_VERIFY_BEGIN();
+ vtpmloginfo(VTPM_LOG_TPM, "Key Handle: 0x%x opened by TPM_LoadKey\n", *keyHandle);
+abort_egress:
+ TPM_AUTH_ERR_CHECK(auth);
+ return status;
+}
- ptr = unpack_UINT32(ptr, outSize);
- ptr = unpack_ALLOC(ptr, out, *outSize);
+TPM_RESULT TPM_FlushSpecific(TPM_HANDLE handle,
+ TPM_RESOURCE_TYPE rt) {
+ if(handle == 0) {
+ return TPM_SUCCESS;
+ }
+ TPM_BEGIN_CMD(TPM_ORD_FlushSpecific);
- TPM_AUTH_HASH();
+ PACK_IN(TPM_HANDLE, handle);
+ PACK_IN(TPM_RESOURCE_TYPE, rt);
- TPM_AUTH1_VERIFY(key_usage_auth, keyAuth);
- TPM_AUTH2_VERIFY(data_usage_auth, dataAuth);
+ TPM_XMIT_REQ();
+ TPM_END();
abort_egress:
- TPM_AUTH_ERR_CHECK(keyAuth);
- TPM_AUTH_ERR_CHECK(dataAuth);
- return status;
+ return status;
}
-TPM_RESULT TPM_Bind(
- const TPM_KEY* key,
- const BYTE* in,
- UINT32 ilen,
- BYTE* out)
+TPM_RESULT TPM_GetRandom( UINT32* bytesRequested, // in, out
+ BYTE* randomBytes) // out
{
- TPM_RESULT status;
- tpmrsa_context rsa = TPMRSA_CTX_INIT;
- TPM_BOUND_DATA boundData;
- uint8_t plain[TCPA_MAX_BUFFER_LENGTH];
- BYTE* ptr = plain;
-
- vtpmloginfo(VTPM_LOG_TPM, "%s\n", __func__);
-
- tpmrsa_set_pubkey(&rsa,
- key->pubKey.key, key->pubKey.keyLength,
- key->algorithmParms.parms.rsa.exponent,
- key->algorithmParms.parms.rsa.exponentSize);
-
- // Fill boundData's accessory information
- boundData.ver = TPM_STRUCT_VER_1_1;
- boundData.payload = TPM_PT_BIND;
- boundData.payloadData = (BYTE*)in;
-
- //marshall the bound data object
- ptr = pack_TPM_BOUND_DATA(ptr, &boundData, ilen);
-
- // Encrypt the data
- TPMTRYRETURN(tpmrsa_pub_encrypt_oaep(&rsa,
- ctr_drbg_random, &vtpm_globals.ctr_drbg,
- ptr - plain,
- plain,
- out));
+ UINT32 req_len = *bytesRequested;
+ TPM_BEGIN_CMD(TPM_ORD_GetRandom);
+ PACK_IN(UINT32, req_len);
-abort_egress:
- tpmrsa_free(&rsa);
- return status;
+ TPM_XMIT_REQ();
+
+ UNPACK_OUT(UINT32, bytesRequested);
+ if (*bytesRequested > req_len)
+ return TPM_FAIL;
+ UNPACK_OUT(BUFFER, randomBytes, *bytesRequested);
+ TPM_END();
+abort_egress:
+ return status;
}
-TPM_RESULT TPM_UnBind(
- TPM_KEY_HANDLE keyHandle, // in
- UINT32 ilen, //in
- const BYTE* in, //
- UINT32* olen, //
- BYTE* out, //out
- const TPM_AUTHDATA* usage_auth,
- TPM_AUTH_SESSION* auth //in, out
- )
+
+TPM_RESULT TPM_ReadPubek(
+ TPM_PUBKEY* pubEK //out
+ )
{
- TPM_BEGIN(TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_UnBind);
- TPM_AUTH_BEGIN();
+ TPM_DIGEST antiReplay;
+ BYTE* kptr = NULL;
+ BYTE digest[TPM_DIGEST_SIZE];
+ sha1_context ctx;
- TPM_AUTH_HASH();
+ vtpmmgr_rand(antiReplay.digest, TPM_DIGEST_SIZE);
- ptr = pack_TPM_KEY_HANDLE(ptr, keyHandle);
+ TPM_BEGIN_CMD(TPM_ORD_ReadPubek);
- TPM_AUTH_SKIP();
+ PACK_IN(TPM_DIGEST, &antiReplay);
- ptr = pack_UINT32(ptr, ilen);
- ptr = pack_BUFFER(ptr, in, ilen);
+ TPM_XMIT_REQ();
- TPM_AUTH_HASH();
+ //unpack and allocate the key
+ kptr = OUT_PTR;
+ UNPACK_OUT(TPM_PUBKEY, pubEK, UNPACK_ALLOC);
- TPM_AUTH1_GEN(usage_auth, auth);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
- TPM_AUTH_VERIFY_BEGIN();
+ //Verify the checksum
+ sha1_starts(&ctx);
+ sha1_update(&ctx, kptr, OUT_PTR - kptr);
+ sha1_update(&ctx, antiReplay.digest, TPM_DIGEST_SIZE);
+ sha1_finish(&ctx, digest);
- ptr = unpack_UINT32(ptr, olen);
- if(*olen > ilen) {
- vtpmlogerror(VTPM_LOG_TPM, "Output length < input length!\n");
- status = TPM_IOERROR;
- goto abort_egress;
- }
- ptr = unpack_BUFFER(ptr, out, *olen);
+ UNPACK_OUT(TPM_DIGEST, &antiReplay);
- TPM_AUTH_HASH();
+ TPM_END();
- TPM_AUTH1_VERIFY(usage_auth, auth);
+ //ptr points to the checksum computed by TPM
+ if(memcmp(digest, antiReplay.digest, TPM_DIGEST_SIZE)) {
+ vtpmlogerror(VTPM_LOG_TPM, "TPM_ReadPubek: Checksum returned by TPM was invalid!\n");
+ status = TPM_FAIL;
+ goto abort_egress;
+ }
+ goto egress;
abort_egress:
+ if(kptr != NULL) { //If we unpacked the pubEK, we have to free it
+ free_TPM_PUBKEY(pubEK);
+ }
egress:
- TPM_AUTH_ERR_CHECK(auth);
- return status;
+ return status;
}
-TPM_RESULT TPM_CreateWrapKey(
- TPM_KEY_HANDLE hWrappingKey, // in
- const TPM_AUTHDATA* osapSharedSecret,
- const TPM_AUTHDATA* dataUsageAuth, //in
- const TPM_AUTHDATA* dataMigrationAuth, //in
- TPM_KEY* key, //in, out
- TPM_AUTH_SESSION* pAuth) // in, out
+TPM_RESULT TPM_PCR_Read(UINT32 pcr, TPM_DIGEST *value)
{
- int keyAlloced = 0;
- TPM_BEGIN(TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_CreateWrapKey);
- TPM_AUTH_BEGIN();
-
- TPM_AUTH_HASH();
-
- ptr = pack_TPM_KEY_HANDLE(ptr, hWrappingKey);
-
- TPM_AUTH_SKIP();
-
- //Encrypted auths
- xorEncrypt(osapSharedSecret, &pAuth->NonceEven,
- dataUsageAuth, ptr,
- dataMigrationAuth, ptr + sizeof(TPM_ENCAUTH));
- ptr += sizeof(TPM_ENCAUTH) * 2;
-
- ptr = pack_TPM_KEY(ptr, key);
-
- TPM_AUTH_HASH();
-
- TPM_AUTH1_GEN(osapSharedSecret, pAuth);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
- TPM_AUTH_VERIFY_BEGIN();
-
- keyAlloced = 1;
- ptr = unpack_TPM_KEY(ptr, key, UNPACK_ALLOC);
-
- TPM_AUTH_HASH();
-
- TPM_AUTH1_VERIFY(osapSharedSecret, pAuth);
-
- goto egress;
+ TPM_BEGIN_CMD(TPM_ORD_PcrRead);
+ PACK_IN(UINT32, pcr);
+ TPM_XMIT_REQ();
+ UNPACK_OUT(TPM_DIGEST, value);
+ TPM_END();
abort_egress:
- if(keyAlloced) {
- free_TPM_KEY(key);
- }
-egress:
- TPM_AUTH_ERR_CHECK(pAuth);
- return status;
+ return status;
}
-TPM_RESULT TPM_LoadKey(
- TPM_KEY_HANDLE parentHandle, //
- const TPM_KEY* key, //in
- TPM_HANDLE* keyHandle, // out
- const TPM_AUTHDATA* usage_auth,
- TPM_AUTH_SESSION* auth)
+TPM_RESULT TPM_SaveState(void)
{
- TPM_BEGIN(TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_LoadKey);
- TPM_AUTH_BEGIN();
-
- TPM_AUTH_HASH();
-
- ptr = pack_TPM_KEY_HANDLE(ptr, parentHandle);
-
- TPM_AUTH_SKIP();
-
- ptr = pack_TPM_KEY(ptr, key);
-
- TPM_AUTH_HASH();
-
- TPM_AUTH1_GEN(usage_auth, auth);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
- TPM_AUTH_VERIFY_BEGIN();
-
- ptr = unpack_UINT32(ptr, keyHandle);
-
- TPM_AUTH_HASH();
-
- TPM_AUTH1_VERIFY(usage_auth, auth);
-
- vtpmloginfo(VTPM_LOG_TPM, "Key Handle: 0x%x opened by TPM_LoadKey\n", *keyHandle);
+ TPM_BEGIN_CMD(TPM_ORD_SaveState);
+ TPM_XMIT_REQ();
+ TPM_END();
abort_egress:
- TPM_AUTH_ERR_CHECK(auth);
- return status;
+ return status;
}
-TPM_RESULT TPM_EvictKey( TPM_KEY_HANDLE hKey) // in
+TPM_RESULT TPM_GetCapability(
+ TPM_CAPABILITY_AREA capArea,
+ UINT32 subCapSize,
+ const BYTE* subCap,
+ UINT32* respSize,
+ BYTE** resp)
{
- if(hKey == 0) {
- return TPM_SUCCESS;
- }
+ TPM_BEGIN_CMD(TPM_ORD_GetCapability);
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_EvictKey);
+ PACK_IN(TPM_CAPABILITY_AREA, capArea);
+ PACK_IN(UINT32, subCapSize);
+ PACK_IN(BUFFER, subCap, subCapSize);
- ptr = pack_TPM_KEY_HANDLE(ptr, hKey);
+ TPM_XMIT_REQ();
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
+ UNPACK_OUT(UINT32, respSize);
+ UNPACK_OUT(PTR, resp, *respSize, UNPACK_ALLOC);
- vtpmloginfo(VTPM_LOG_TPM, "Key handle: 0x%x closed by TPM_EvictKey\n", hKey);
+ TPM_END();
abort_egress:
- return status;
+ return status;
}
-TPM_RESULT TPM_FlushSpecific(TPM_HANDLE handle,
- TPM_RESOURCE_TYPE rt) {
- if(handle == 0) {
- return TPM_SUCCESS;
- }
-
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_FlushSpecific);
-
- ptr = pack_TPM_HANDLE(ptr, handle);
- ptr = pack_TPM_RESOURCE_TYPE(ptr, rt);
-
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
+TPM_RESULT TPM_CreateEndorsementKeyPair(
+ const TPM_KEY_PARMS* keyInfo,
+ TPM_PUBKEY* pubEK)
+{
+ BYTE* kptr = NULL;
+ sha1_context ctx;
+ TPM_DIGEST checksum;
+ TPM_DIGEST hash;
+ TPM_NONCE antiReplay;
+ TPM_BEGIN_CMD(TPM_ORD_CreateEndorsementKeyPair);
-abort_egress:
- return status;
-}
+ //Make anti replay nonce
+ vtpmmgr_rand(antiReplay.nonce, sizeof(antiReplay.nonce));
-TPM_RESULT TPM_GetRandom( UINT32* bytesRequested, // in, out
- BYTE* randomBytes) // out
-{
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_GetRandom);
+ PACK_IN(TPM_NONCE, &antiReplay);
+ PACK_IN(TPM_KEY_PARMS, keyInfo);
- // check input params
- if (bytesRequested == NULL || randomBytes == NULL){
- return TPM_BAD_PARAMETER;
- }
+ TPM_XMIT_REQ();
- ptr = pack_UINT32(ptr, *bytesRequested);
+ kptr = OUT_PTR;
+ UNPACK_OUT(TPM_PUBKEY, pubEK, UNPACK_ALLOC);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
+ /* Hash the pub key blob */
+ sha1_starts(&ctx);
+ sha1_update(&ctx, kptr, OUT_PTR - kptr);
+ sha1_update(&ctx, antiReplay.nonce, sizeof(antiReplay.nonce));
+ sha1_finish(&ctx, hash.digest);
- ptr = unpack_UINT32(ptr, bytesRequested);
- ptr = unpack_BUFFER(ptr, randomBytes, *bytesRequested);
+ UNPACK_OUT(TPM_DIGEST, &checksum);
-abort_egress:
- return status;
-}
+ TPM_END();
+ if (memcmp(checksum.digest, hash.digest, TPM_DIGEST_SIZE)) {
+ vtpmloginfo(VTPM_LOG_VTPM, "TPM_CreateEndorsementKey: Checkum verification failed!\n");
+ status = TPM_FAIL;
+ goto abort_egress;
+ }
-TPM_RESULT TPM_ReadPubek(
- TPM_PUBKEY* pubEK //out
- )
-{
- BYTE* antiReplay = NULL;
- BYTE* kptr = NULL;
- BYTE digest[TPM_DIGEST_SIZE];
- sha1_context ctx;
-
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_ReadPubek);
-
- //antiReplay nonce
- vtpmmgr_rand(ptr, TPM_DIGEST_SIZE);
- antiReplay = ptr;
- ptr += TPM_DIGEST_SIZE;
-
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
-
- //unpack and allocate the key
- kptr = ptr;
- ptr = unpack_TPM_PUBKEY(ptr, pubEK, UNPACK_ALLOC);
-
- //Verify the checksum
- sha1_starts(&ctx);
- sha1_update(&ctx, kptr, ptr - kptr);
- sha1_update(&ctx, antiReplay, TPM_DIGEST_SIZE);
- sha1_finish(&ctx, digest);
-
- //ptr points to the checksum computed by TPM
- if(memcmp(digest, ptr, TPM_DIGEST_SIZE)) {
- vtpmlogerror(VTPM_LOG_TPM, "TPM_ReadPubek: Checksum returned by TPM was invalid!\n");
- status = TPM_FAIL;
- goto abort_egress;
- }
-
- goto egress;
+ goto egress;
abort_egress:
- if(kptr != NULL) { //If we unpacked the pubEK, we have to free it
- free_TPM_PUBKEY(pubEK);
- }
+ if(kptr) {
+ free_TPM_PUBKEY(pubEK);
+ }
egress:
- return status;
+ return status;
}
-
-TPM_RESULT TPM_SaveState(void)
+TPM_RESULT TPM_MakeIdentity(
+ const TPM_AUTHDATA* identityAuth, // in
+ const TPM_AUTHDATA* privCADigest, // in
+ const TPM_KEY* kinfo, // in
+ const TPM_AUTHDATA* srk_auth, // in
+ const TPM_AUTHDATA* own_auth, // in
+ TPM_AUTH_SESSION* srkAuth, // in,out
+ TPM_AUTH_SESSION* ownAuth, // in,out
+ TPM_KEY* key, // out
+ UINT32* identityBindingSize, // out
+ BYTE** identityBinding) // out
{
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_SaveState);
+ TPM_BEGIN_CMD(TPM_ORD_MakeIdentity);
+ TPM_HASH_IN_BEGIN;
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
+ xorEncrypt(own_auth, &ownAuth->NonceEven, identityAuth, IN_PTR, NULL, NULL);
+ in_pos += sizeof(TPM_ENCAUTH);
-abort_egress:
- return status;
-}
-
-TPM_RESULT TPM_GetCapability(
- TPM_CAPABILITY_AREA capArea,
- UINT32 subCapSize,
- const BYTE* subCap,
- UINT32* respSize,
- BYTE** resp)
-{
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_GetCapability);
+ PACK_IN(TPM_AUTHDATA, privCADigest);
+ PACK_IN(TPM_KEY, kinfo);
- ptr = pack_TPM_CAPABILITY_AREA(ptr, capArea);
- ptr = pack_UINT32(ptr, subCapSize);
- ptr = pack_BUFFER(ptr, subCap, subCapSize);
+ TPM_XMIT_AUTH2(srk_auth, srkAuth, own_auth, ownAuth);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
+ UNPACK_OUT(TPM_KEY, key, UNPACK_ALLOC);
+ UNPACK_OUT(UINT32, identityBindingSize);
+ UNPACK_OUT(PTR, identityBinding, *identityBindingSize, UNPACK_ALLOC);
- ptr = unpack_UINT32(ptr, respSize);
- ptr = unpack_ALLOC(ptr, resp, *respSize);
+ TPM_END_AUTH2(srk_auth, srkAuth, own_auth, ownAuth);
abort_egress:
- return status;
+ TPM_AUTH_ERR_CHECK(srkAuth);
+ TPM_AUTH_ERR_CHECK(ownAuth);
+ return status;
}
-TPM_RESULT TPM_CreateEndorsementKeyPair(
- const TPM_KEY_PARMS* keyInfo,
- TPM_PUBKEY* pubEK)
+TPM_RESULT TPM_ActivateIdentity(
+ TPM_KEY_HANDLE aikHandle, // in
+ BYTE* blob, // in
+ UINT32 blobSize, // in
+ const TPM_AUTHDATA* aik_auth, // in
+ const TPM_AUTHDATA* owner_auth, // in
+ TPM_AUTH_SESSION* aikAuth, // in,out
+ TPM_AUTH_SESSION* ownAuth, // in,out
+ TPM_SYMMETRIC_KEY* symKey) // out
{
- BYTE* kptr = NULL;
- sha1_context ctx;
- TPM_DIGEST checksum;
- TPM_DIGEST hash;
- TPM_NONCE antiReplay;
- TPM_BEGIN(TPM_TAG_RQU_COMMAND, TPM_ORD_CreateEndorsementKeyPair);
+ TPM_BEGIN_CMD(TPM_ORD_ActivateIdentity);
+ PACK_IN(TPM_KEY_HANDLE, aikHandle);
+ TPM_HASH_IN_BEGIN;
+ PACK_IN(UINT32, blobSize);
+ PACK_IN(BUFFER, blob, blobSize);
+
+ TPM_XMIT_AUTH2(aik_auth, aikAuth, owner_auth, ownAuth);
- //Make anti replay nonce
- vtpmmgr_rand(antiReplay.nonce, sizeof(antiReplay.nonce));
+ UNPACK_OUT(TPM_SYMMETRIC_KEY, symKey, UNPACK_ALLOC);
- ptr = pack_TPM_NONCE(ptr, &antiReplay);
- ptr = pack_TPM_KEY_PARMS(ptr, keyInfo);
+ TPM_END_AUTH2(aik_auth, aikAuth, owner_auth, ownAuth);
- TPM_TRANSMIT();
- TPM_UNPACK_VERIFY();
-
- sha1_starts(&ctx);
+abort_egress:
+ TPM_AUTH_ERR_CHECK(aikAuth);
+ TPM_AUTH_ERR_CHECK(ownAuth);
+ return status;
+}
- kptr = ptr;
- ptr = unpack_TPM_PUBKEY(ptr, pubEK, UNPACK_ALLOC);
+TPM_RESULT TPM_Quote(
+ TPM_KEY_HANDLE keyh, // in
+ const TPM_NONCE* data, // in
+ const TPM_PCR_SELECTION *pcrSelect, // in
+ const TPM_AUTHDATA* auth, // in
+ TPM_AUTH_SESSION* oiap, // in,out
+ TPM_PCR_COMPOSITE *pcrs, // out
+ BYTE** sig, // out
+ UINT32* sigSize) // out
+{
+ TPM_BEGIN_CMD(TPM_ORD_Quote);
+ PACK_IN(TPM_KEY_HANDLE, keyh);
+ TPM_HASH_IN_BEGIN;
+ PACK_IN(TPM_NONCE, data);
+ PACK_IN(TPM_PCR_SELECTION, pcrSelect);
- /* Hash the pub key blob */
- sha1_update(&ctx, kptr, ptr - kptr);
- ptr = unpack_TPM_DIGEST(ptr, &checksum);
+ TPM_XMIT_AUTH1(auth, oiap);
- sha1_update(&ctx, antiReplay.nonce, sizeof(antiReplay.nonce));
+ UNPACK_OUT(TPM_PCR_COMPOSITE, pcrs, UNPACK_ALLOC);
+ UNPACK_OUT(UINT32, sigSize);
+ UNPACK_OUT(PTR, sig, *sigSize, UNPACK_ALLOC);
- sha1_finish(&ctx, hash.digest);
- if(memcmp(checksum.digest, hash.digest, TPM_DIGEST_SIZE)) {
- vtpmloginfo(VTPM_LOG_VTPM, "TPM_CreateEndorsementKey: Checkum verification failed!\n");
- status = TPM_FAIL;
- goto abort_egress;
- }
+ TPM_END_AUTH1(auth, oiap);
- goto egress;
abort_egress:
- if(kptr) {
- free_TPM_PUBKEY(pubEK);
- }
-egress:
- return status;
+ TPM_AUTH_ERR_CHECK(oiap);
+ return status;
}
TPM_RESULT TPM_TransmitData(
- BYTE* in,
- UINT32 insize,
- BYTE* out,
- UINT32* outsize) {
- TPM_RESULT status = TPM_SUCCESS;
-
- UINT32 i;
- vtpmloginfo(VTPM_LOG_TXDATA, "Sending buffer = 0x");
- for(i = 0 ; i < insize ; i++)
- vtpmloginfomore(VTPM_LOG_TXDATA, "%2.2x ", in[i]);
-
- vtpmloginfomore(VTPM_LOG_TXDATA, "\n");
-
- ssize_t size = 0;
-
- // send the request
- size = write (vtpm_globals.tpm_fd, in, insize);
- if (size < 0) {
- vtpmlogerror(VTPM_LOG_TXDATA, "write() failed : %s\n", strerror(errno));
- ERRORDIE (TPM_IOERROR);
- }
- else if ((UINT32) size < insize) {
- vtpmlogerror(VTPM_LOG_TXDATA, "Wrote %d instead of %d bytes!\n", (int) size, insize);
- ERRORDIE (TPM_IOERROR);
- }
-
- // read the response
- size = read (vtpm_globals.tpm_fd, out, *outsize);
- if (size < 0) {
- vtpmlogerror(VTPM_LOG_TXDATA, "read() failed : %s\n", strerror(errno));
- ERRORDIE (TPM_IOERROR);
- }
-
- vtpmloginfo(VTPM_LOG_TXDATA, "Receiving buffer = 0x");
- for(i = 0 ; i < size ; i++)
- vtpmloginfomore(VTPM_LOG_TXDATA, "%2.2x ", out[i]);
-
- vtpmloginfomore(VTPM_LOG_TXDATA, "\n");
-
- *outsize = size;
- goto egress;
+ BYTE* in,
+ UINT32 insize,
+ BYTE* out,
+ UINT32* outsize) {
+ TPM_RESULT status = TPM_SUCCESS;
+
+ UINT32 i;
+ vtpmloginfo(VTPM_LOG_TXDATA, "Sending buffer = 0x");
+ for(i = 0 ; i < insize ; i++)
+ vtpmloginfomore(VTPM_LOG_TXDATA, "%2.2x ", in[i]);
+
+ vtpmloginfomore(VTPM_LOG_TXDATA, "\n");
+
+ ssize_t size = 0;
+
+ // send the request
+ size = write (vtpm_globals.tpm_fd, in, insize);
+ if (size < 0) {
+ vtpmlogerror(VTPM_LOG_TXDATA, "write() failed : %s\n", strerror(errno));
+ ERRORDIE (TPM_IOERROR);
+ }
+ else if ((UINT32) size < insize) {
+ vtpmlogerror(VTPM_LOG_TXDATA, "Wrote %d instead of %d bytes!\n", (int) size, insize);
+ ERRORDIE (TPM_IOERROR);
+ }
+
+ // read the response
+ size = read (vtpm_globals.tpm_fd, out, *outsize);
+ if (size < 0) {
+ vtpmlogerror(VTPM_LOG_TXDATA, "read() failed : %s\n", strerror(errno));
+ ERRORDIE (TPM_IOERROR);
+ }
+
+ vtpmloginfo(VTPM_LOG_TXDATA, "Receiving buffer = 0x");
+ for(i = 0 ; i < size ; i++)
+ vtpmloginfomore(VTPM_LOG_TXDATA, "%2.2x ", out[i]);
+
+ vtpmloginfomore(VTPM_LOG_TXDATA, "\n");
+
+ *outsize = size;
+ goto egress;
abort_egress:
egress:
- return status;
+ return status;
}
// TPM Mandatory
TPM_RESULT TPM_Extend ( TPM_PCRINDEX pcrNum, // in
- TPM_DIGEST inDigest, // in
+ TPM_DIGEST* inDigest, // in
TPM_PCRVALUE* outDigest // out
);
-TPM_RESULT TPM_PcrRead ( TPM_PCRINDEX pcrNum, // in
- TPM_PCRVALUE* outDigest // out
- );
-
-TPM_RESULT TPM_Quote ( TCS_KEY_HANDLE keyHandle, // in
- TPM_NONCE antiReplay, // in
- UINT32* PcrDataSize, // in, out
- BYTE** PcrData, // in, out
- TPM_AUTH_SESSION* privAuth, // in, out
- UINT32* sigSize, // out
- BYTE** sig // out
- );
+TPM_RESULT TPM_Reset(TPM_PCR_SELECTION *sel);
TPM_RESULT TPM_Seal(
TCS_KEY_HANDLE keyHandle, // in
TPM_AUTH_SESSION* dataAuth // in, out
);
-TPM_RESULT TPM_DirWriteAuth ( TPM_DIRINDEX dirIndex, // in
- TPM_DIRVALUE newContents, // in
- TPM_AUTH_SESSION* ownerAuth // in, out
- );
-
-TPM_RESULT TPM_DirRead ( TPM_DIRINDEX dirIndex, // in
- TPM_DIRVALUE* dirValue // out
- );
-
-TPM_RESULT TPM_Bind(
- const TPM_KEY* key, //in
- const BYTE* in, //in
- UINT32 ilen, //in
- BYTE* out //out, must be at least cipher block size
- );
-
-TPM_RESULT TPM_UnBind (
- TCS_KEY_HANDLE keyHandle, // in
- UINT32 ilen, //in
- const BYTE* in, //
- UINT32* outDataSize, // out
- BYTE* outData, //out
- const TPM_AUTHDATA* usage_auth,
- TPM_AUTH_SESSION* auth //in, out
- );
-
-TPM_RESULT TPM_CreateWrapKey (
- TCS_KEY_HANDLE hWrappingKey, // in
- const TPM_AUTHDATA* osapSharedSecret,
- const TPM_AUTHDATA* dataUsageAuth, //in
- const TPM_AUTHDATA* dataMigrationAuth, //in
- TPM_KEY* key, //in
- TPM_AUTH_SESSION* pAuth // in, out
- );
-
TPM_RESULT TPM_LoadKey (
TPM_KEY_HANDLE parentHandle, //
const TPM_KEY* key, //in
TPM_AUTH_SESSION* auth
);
-TPM_RESULT TPM_GetPubKey ( TCS_KEY_HANDLE hKey, // in
- TPM_AUTH_SESSION* pAuth, // in, out
- UINT32* pcPubKeySize, // out
- BYTE** prgbPubKey // out
- );
-
-TPM_RESULT TPM_EvictKey ( TCS_KEY_HANDLE hKey // in
- );
-
TPM_RESULT TPM_FlushSpecific(TPM_HANDLE handle, //in
TPM_RESOURCE_TYPE rt //in
);
-TPM_RESULT TPM_Sign ( TCS_KEY_HANDLE keyHandle, // in
- UINT32 areaToSignSize, // in
- BYTE* areaToSign, // in
- TPM_AUTH_SESSION* privAuth, // in, out
- UINT32* sigSize, // out
- BYTE** sig // out
- );
-
TPM_RESULT TPM_GetRandom ( UINT32* bytesRequested, // in, out
BYTE* randomBytes // out
);
-TPM_RESULT TPM_StirRandom ( UINT32 inDataSize, // in
- BYTE* inData // in
- );
-
TPM_RESULT TPM_ReadPubek (
TPM_PUBKEY* pubEK //out
);
UINT32* respSize,
BYTE** resp);
+TPM_RESULT TPM_PCR_Read(UINT32 pcr, TPM_DIGEST *value);
TPM_RESULT TPM_SaveState(void);
TPM_RESULT TPM_CreateEndorsementKeyPair(
const TPM_KEY_PARMS* keyInfo,
TPM_PUBKEY* pubEK);
+TPM_RESULT TPM_MakeIdentity(
+ const TPM_AUTHDATA* identityAuth, // in
+ const TPM_AUTHDATA* privCADigest, // in
+ const TPM_KEY* kinfo, // in
+ const TPM_AUTHDATA* srk_auth, // in
+ const TPM_AUTHDATA* owner_auth, // in
+ TPM_AUTH_SESSION* srkAuth, // in,out
+ TPM_AUTH_SESSION* ownAuth, // in,out
+ TPM_KEY* key, // out
+ UINT32* identityBindingSize, // out
+ BYTE** identityBinding); // out
+
+TPM_RESULT TPM_ActivateIdentity(
+ TPM_KEY_HANDLE aikHandle, // in
+ BYTE* blob, // in
+ UINT32 blobSize, // in
+ const TPM_AUTHDATA* aik_auth, // in
+ const TPM_AUTHDATA* owner_auth, // in
+ TPM_AUTH_SESSION* aikAuth, // in,out
+ TPM_AUTH_SESSION* ownAuth, // in,out
+ TPM_SYMMETRIC_KEY* symKey); // out
+
+TPM_RESULT TPM_Quote(
+ TPM_KEY_HANDLE keyh, // in
+ const TPM_NONCE* data, // in
+ const TPM_PCR_SELECTION *pcrSelect, // in
+ const TPM_AUTHDATA* auth, // in
+ TPM_AUTH_SESSION* oiap, // in,out
+ TPM_PCR_COMPOSITE *pcrs, // out
+ BYTE** sig, // out
+ UINT32* sigSize); // out
+
TPM_RESULT TPM_TransmitData(
BYTE* in,
UINT32 insize,
return TPM_SUCCESS;
}
+static const unsigned char rsa_der_header[] = {
+ 0x00, 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14,
+};
+
+TPM_RESULT tpmrsa_sigcheck(tpmrsa_context *ctx, const unsigned char *input, const unsigned char *sha1)
+{
+ unsigned char *tmp = alloca(ctx->len);
+ TPM_RESULT rv;
+ int i;
+ rv = tpmrsa_public(ctx, input, tmp);
+ if (rv)
+ return rv;
+ if (tmp[0] != 0 || tmp[1] != 1)
+ return TPM_INAPPROPRIATE_SIG;
+ for(i=2; i < 220; i++) {
+ if (tmp[i] != 0xFF)
+ return TPM_INAPPROPRIATE_SIG;
+ }
+ if (memcmp(tmp + 220, rsa_der_header, sizeof(rsa_der_header)))
+ return TPM_INAPPROPRIATE_SIG;
+ if (memcmp(tmp + 236, sha1, 20))
+ return TPM_DECRYPT_ERROR;
+ return TPM_SUCCESS;
+}
+
static void mgf_mask( unsigned char *dst, int dlen, unsigned char *src, int slen)
{
unsigned char mask[HASH_LEN];
const unsigned char* exponent,
int explen);
+/* Check an RSA signature */
+TPM_RESULT tpmrsa_sigcheck(tpmrsa_context *ctx, const unsigned char *input, const unsigned char *sha1);
+
/* Do rsa public crypto */
TPM_RESULT tpmrsa_pub_encrypt_oaep( tpmrsa_context *ctx,
int (*f_rng)(void *, unsigned char *, size_t),
#include <inttypes.h>
#include <string.h>
#include <stdlib.h>
+#include <mini-os/console.h>
+#include <polarssl/sha1.h>
+#include <polarssl/sha2.h>
#include "marshal.h"
#include "log.h"
-#include "vtpm_storage.h"
+#include "vtpm_disk.h"
#include "vtpmmgr.h"
#include "tpm.h"
+#include "tpmrsa.h"
#include "tcg.h"
+#include "mgmt_authority.h"
+#include "disk_crypto.h"
-static TPM_RESULT vtpmmgr_SaveHashKey(
- const uuid_t uuid,
- tpmcmd_t* tpmcmd)
+static void gen_random_uuid(uuid_t uuid)
{
- TPM_RESULT status = TPM_SUCCESS;
+ do_random(uuid, 16);
+ // make the 128-bit random number a valid UUID (122 bits remain)
+ uuid[6] = 0x40 | (uuid[6] & 0x0F);
+ uuid[8] = 0x80 | (uuid[8] & 0x3F);
+}
+
+static TPM_RESULT find_vtpm_khash(int domid, struct tpm_opaque *opq)
+{
+ // TODO getting the build hashes requires a domain builder to report them
+ memset(opq->kern_hash, 0, sizeof(opq->kern_hash));
+ return TPM_SUCCESS;
+}
+
+static TPM_RESULT find_vtpm_verified(int domid, struct tpm_opaque *opq)
+{
+ TPM_RESULT rc;
+ int i;
+ if (opq->vtpm)
+ return TPM_SUCCESS;
+
+ rc = find_vtpm(&opq->group, &opq->vtpm, *opq->uuid);
+ if (rc)
+ return TPM_BAD_PARAMETER;
- if(tpmcmd->req_len != VTPM_COMMAND_HEADER_SIZE + HASHKEYSZ) {
- vtpmlogerror(VTPM_LOG_VTPM, "VTPM_ORD_SAVEHASHKEY hashkey too short!\n");
- status = TPM_BAD_PARAMETER;
- goto abort_egress;
- }
+ if (opq->vtpm->flags & VTPM_FLAG_OPEN) {
+ printk("Attempted to open vTPM twice!\n");
+ opq->vtpm = NULL;
+ opq->group = NULL;
+ return TPM_DISABLED;
+ }
- /* Do the command */
- TPMTRYRETURN(vtpm_storage_save_hashkey(uuid, tpmcmd->req + VTPM_COMMAND_HEADER_SIZE));
+ rc = find_vtpm_khash(domid, opq);
+ if (rc)
+ return rc;
+
+ for(i=0; i < be32_native(opq->group->seal_bits.nr_kerns); i++) {
+ if (!memcmp(opq->group->seal_bits.kernels[i].bits, opq->kern_hash, 20)) {
+ opq->vtpm->flags |= VTPM_FLAG_OPEN;
+ return TPM_SUCCESS;
+ }
+ }
+ printk("Unauthorized vTPM kernel image used!\n");
+ return TPM_DISABLED;
+}
+
+static TPM_RESULT vtpmmgr_SaveHashKey(struct tpm_opaque *opq, tpmcmd_t* tpmcmd)
+{
+ TPM_RESULT status = TPM_SUCCESS;
+ int rc = 0;
+
+ size_t bufsize = tpmcmd->req_len - VTPM_COMMAND_HEADER_SIZE;
+ const void *buf = tpmcmd->req + VTPM_COMMAND_HEADER_SIZE;
+
+ if (bufsize < 52) {
+ vtpmlogerror(VTPM_LOG_VTPM, "VTPM_ORD_SAVEHASHKEY hashkey too short!\n");
+ status = TPM_BAD_PARAMETER;
+ goto abort_egress;
+ }
+ if (bufsize > 64) {
+ vtpmlogerror(VTPM_LOG_VTPM, "VTPM_ORD_SAVEHASHKEY hashkey too long!\n");
+ status = TPM_BAD_PARAMETER;
+ goto abort_egress;
+ }
+
+ vtpmloginfo(VTPM_LOG_VTPM, "vtpmmgr_SaveHashKey\n");
+ status = find_vtpm_verified(tpmcmd->domid, opq);
+
+ // auto-create vTPMs in group0 when saving a new UUID
+ // TODO restrict to certain UUIDs (such as all-zero)
+ // this is not done yet to simplify use of the TPM Manager
+ if (status == TPM_BAD_PARAMETER) {
+ opq->group = g_mgr->groups[0].v;
+ rc = create_vtpm(opq->group, &opq->vtpm, *opq->uuid);
+ if (rc) {
+ status = TPM_BAD_PARAMETER;
+ goto abort_egress;
+ }
+ if (opq->group->nr_vtpms == 1)
+ opq->vtpm->flags = VTPM_FLAG_ADMIN;
+ printk("SaveHashKey with unknown UUID="UUID_FMT" - creating in auth0 (f=%d)\n",
+ UUID_BYTES((*opq->uuid)), opq->vtpm->flags);
+ status = TPM_SUCCESS;
+ }
+ if (status)
+ goto abort_egress;
+
+ memcpy(opq->vtpm->data, buf, bufsize);
+ memset(opq->vtpm->data + bufsize, 0, 64 - bufsize);
+
+ vtpm_sync(opq->group, opq->vtpm);
abort_egress:
- pack_TPM_RSP_HEADER(tpmcmd->resp,
- VTPM_TAG_RSP, VTPM_COMMAND_HEADER_SIZE, status);
- tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE;
+ pack_TPM_RSP_HEADER(tpmcmd->resp, VTPM_TAG_RSP, VTPM_COMMAND_HEADER_SIZE, status);
+ tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE;
- return status;
+ return status;
}
-static TPM_RESULT vtpmmgr_LoadHashKey(
- const uuid_t uuid,
- tpmcmd_t* tpmcmd) {
- TPM_RESULT status = TPM_SUCCESS;
+static TPM_RESULT vtpmmgr_LoadHashKey(struct tpm_opaque *opq, tpmcmd_t* tpmcmd)
+{
+ TPM_RESULT status = TPM_SUCCESS;
+ int i;
+ uint8_t *buf = tpmcmd->resp + VTPM_COMMAND_HEADER_SIZE;
+
+ vtpmloginfo(VTPM_LOG_VTPM, "vtpmmgr_LoadHashKey\n");
+ tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE;
- tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE;
+ status = find_vtpm_verified(tpmcmd->domid, opq);
+ if (status)
+ goto abort_egress;
- TPMTRYRETURN(vtpm_storage_load_hashkey(uuid, tpmcmd->resp + VTPM_COMMAND_HEADER_SIZE));
+ memcpy(buf, opq->vtpm->data, 64);
- tpmcmd->resp_len += HASHKEYSZ;
+ for(i=52; i < 64; i++) {
+ if (buf[i]) {
+ tpmcmd->resp_len += 64;
+ goto abort_egress;
+ }
+ }
+ tpmcmd->resp_len += 52;
abort_egress:
- pack_TPM_RSP_HEADER(tpmcmd->resp,
- VTPM_TAG_RSP, tpmcmd->resp_len, status);
+ pack_TPM_RSP_HEADER(tpmcmd->resp, VTPM_TAG_RSP, tpmcmd->resp_len, status);
+
+ return status;
+}
+
+#define CMD_BEGIN \
+ TPM_RESULT status = TPM_SUCCESS; \
+ uint32_t in_pos = VTPM_COMMAND_HEADER_SIZE; \
+ tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE; \
+ vtpmloginfo(VTPM_LOG_TPM, "%s\n", __func__);
+
+#define CMD_END \
+ abort_egress: \
+ if (status) \
+ tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE; \
+ pack_TPM_RSP_HEADER(tpmcmd->resp, VTPM_TAG_RSP, tpmcmd->resp_len, status); \
+ return status
+
+#define UNPACK_IN(type, item...) do { \
+ status = unpack3_ ## type (tpmcmd->req, &in_pos, tpmcmd->req_len, item); \
+ if (status) { \
+ status = TPM_BAD_PARAMETER; \
+ goto abort_egress; \
+ } \
+} while (0)
+
+#define UNPACK_GROUP(group) do { \
+ uint32_t group_idx; \
+ UNPACK_IN(UINT32, &group_idx); \
+ if (group_idx >= g_mgr->nr_groups) { \
+ status = TPM_BADINDEX; \
+ goto abort_egress; \
+ } \
+ group = g_mgr->groups[group_idx].v; \
+ if (!group) { \
+ status = TPM_AUTHFAIL; \
+ goto abort_egress; \
+ } \
+} while (0)
+
+#define UNPACK_DONE() do { \
+ if (in_pos != tpmcmd->req_len) { \
+ status = TPM_BAD_PARAMETER; \
+ goto abort_egress; \
+ } \
+} while (0)
+
+#define PACK_OUT(type, item...) do { \
+ UINT32 isize = sizeof_ ## type(item); \
+ if (isize + tpmcmd->resp_len > TCPA_MAX_BUFFER_LENGTH) { \
+ status = TPM_SIZE; \
+ goto abort_egress; \
+ } \
+ pack_ ## type (tpmcmd->resp + tpmcmd->resp_len, item); \
+ tpmcmd->resp_len += isize; \
+} while (0)
+
+#define PACK_BUF ((void*)(tpmcmd->resp + tpmcmd->resp_len))
+
+static TPM_RESULT vtpmmgr_GetBootHash(struct tpm_opaque *opq, tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ UNPACK_DONE();
+
+ PACK_OUT(BUFFER, opq->kern_hash, 20);
+
+ CMD_END;
+}
+
+static TPM_RESULT vtpmmgr_GetQuote(struct tpm_opaque *opq, tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ int i;
+ void *ibuf;
+ uint32_t pcr_size;
+ TPM_PCR_SELECTION sel;
+
+ UNPACK_IN(VPTR, &ibuf, 20, UNPACK_ALIAS);
+ UNPACK_IN(TPM_PCR_SELECTION, &sel, UNPACK_ALIAS);
+ UNPACK_DONE();
+
+ if (!opq->vtpm) {
+ status = TPM_BAD_PARAMETER;
+ goto abort_egress;
+ }
+
+ printk("ibuf: ");
+ for (i=0; i < 20; i++)
+ printk("%02x", ((uint8_t*)ibuf)[i]);
+ printk("\n");
+
+ status = vtpm_do_quote(opq->group, *opq->uuid, opq->kern_hash, ibuf, &sel, PACK_BUF + 256, &pcr_size, PACK_BUF);
+ if (status)
+ goto abort_egress;
+ tpmcmd->resp_len += 256 + pcr_size;
+
+ CMD_END;
+}
+
+static TPM_RESULT vtpmmgr_GroupList(tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ UNPACK_DONE();
+ PACK_OUT(UINT32, g_mgr->nr_groups);
+ CMD_END;
+}
+
+static TPM_RESULT vtpmmgr_GroupNew(tpmcmd_t* tpmcmd)
+{
+ void *privCADigest;
+ BYTE *pubkey;
+ struct mem_group *group;
+ uint32_t group_idx;
+ CMD_BEGIN;
+
+ UNPACK_IN(VPTR, &privCADigest, 20, UNPACK_ALIAS);
+ UNPACK_IN(PTR, &pubkey, 256, UNPACK_ALIAS);
+ UNPACK_DONE();
+
+ group = vtpm_new_group(privCADigest);
+ if (!group) {
+ status = TPM_FAIL;
+ goto abort_egress;
+ }
+
+ memcpy(group->id_data.saa_pubkey, pubkey, 256);
+
+ PACK_OUT(BUFFER, group->id_data.uuid, 16);
+ PACK_OUT(BUFFER, group->id_data.tpm_aik_public, 256);
+ PACK_OUT(BUFFER, group->details.recovery_data, 256);
+
+ memset(group->details.recovery_data, 0, 256);
+
+ group->details.sequence = native_be64(g_mgr->sequence);
+
+ if (group != g_mgr->groups[0].v) {
+ group_idx = g_mgr->nr_groups;
+ g_mgr->nr_groups++;
+ g_mgr->groups = realloc(g_mgr->groups, g_mgr->nr_groups*sizeof(struct mem_group_hdr));
+ memset(&g_mgr->groups[group_idx], 0, sizeof(g_mgr->groups[0]));
+ g_mgr->groups[group_idx].v = group;
+ }
+
+ vtpm_sync_group(group, SEQ_UPDATE);
+ CMD_END;
+}
+
+static TPM_RESULT vtpmmgr_GroupDel(tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ struct mem_group *group;
+ uint32_t group_idx, nr_mov;
+
+ UNPACK_IN(UINT32, &group_idx);
+ UNPACK_DONE();
+
+ if (group_idx > g_mgr->nr_groups) {
+ status = TPM_BADINDEX;
+ goto abort_egress;
+ }
+ group = g_mgr->groups[group_idx].v;
+
+ if (group) {
+ int i, j;
+ for (i = 0; i < group->nr_pages; i++) {
+ for (j = 0; j < group->data[i].size; j++) {
+ if (group->data[i].vtpms[j]->flags & VTPM_FLAG_OPEN) {
+ status = TPM_FAIL;
+ goto abort_egress;
+ }
+ }
+ }
+
+ for (i = 0; i < group->nr_pages; i++) {
+ for (j = 0; j < group->data[i].size; j++) {
+ free(group->data[i].vtpms[j]);
+ }
+ }
+ free(group->data);
+ free(group->seals);
+ free(group);
+ }
+
+ g_mgr->nr_groups--;
+ nr_mov = g_mgr->nr_groups - group_idx;
+ memmove(&g_mgr->groups[group_idx], &g_mgr->groups[group_idx + 1], nr_mov * sizeof(g_mgr->groups[0]));
+
+ vtpm_sync_disk(g_mgr, CTR_UPDATE);
+
+ CMD_END;
+}
+
+static int pack_cfg_list(void* buf, struct mem_group *group)
+{
+ int i;
+ void *bstart = buf;
+ memcpy(buf, &group->details.cfg_seq, 8); buf += 8;
+ buf = pack_UINT32(buf, group->nr_seals);
+ for(i=0; i < group->nr_seals; i++) {
+ memcpy(buf, &group->seals[i].digest_release, 20);
+ buf += 20;
+ }
+ memcpy(buf, &group->seal_bits.nr_kerns, 4); buf += 4;
+ memcpy(buf, &group->seal_bits.kernels, 20 * be32_native(group->seal_bits.nr_kerns));
+ return buf - bstart + 20 * be32_native(group->seal_bits.nr_kerns);
+}
+
+static TPM_RESULT vtpmmgr_GroupShow(tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ struct mem_group *group;
+
+ UNPACK_GROUP(group);
+ UNPACK_DONE();
+
+ // TODO show is read-only access, need to hit disk if group is NULL
+
+ PACK_OUT(BUFFER, group->id_data.uuid, 16);
+ PACK_OUT(BUFFER, group->id_data.saa_pubkey, 256);
+ tpmcmd->resp_len += pack_cfg_list(PACK_BUF, group);
+
+ CMD_END;
+}
+
+static TPM_RESULT vtpmmgr_GroupActivate(tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ struct mem_group *group;
+ uint32_t blobSize;
+ void *blob;
+
+ UNPACK_GROUP(group);
+ UNPACK_IN(UINT32, &blobSize);
+ UNPACK_IN(VPTR, &blob, blobSize, UNPACK_ALIAS);
+ UNPACK_DONE();
+
+ status = group_do_activate(group, blob, blobSize, tpmcmd->resp, &tpmcmd->resp_len);
+
+ CMD_END;
+}
+
+/* 2048-bit MODP Group from RFC3526:
+ * 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 }
+ * mpi objects use little endian word ordering
+ */
+static t_uint Pp[256 / sizeof(t_uint)] = {
+ 0xFFFFFFFFFFFFFFFFUL, 0x15728E5A8AACAA68UL, 0x15D2261898FA0510UL,
+ 0x3995497CEA956AE5UL, 0xDE2BCBF695581718UL, 0xB5C55DF06F4C52C9UL,
+ 0x9B2783A2EC07A28FUL, 0xE39E772C180E8603UL, 0x32905E462E36CE3BUL,
+ 0xF1746C08CA18217CUL, 0x670C354E4ABC9804UL, 0x9ED529077096966DUL,
+ 0x1C62F356208552BBUL, 0x83655D23DCA3AD96UL, 0x69163FA8FD24CF5FUL,
+ 0x98DA48361C55D39AUL, 0xC2007CB8A163BF05UL, 0x49286651ECE45B3DUL,
+ 0xAE9F24117C4B1FE6UL, 0xEE386BFB5A899FA5UL, 0x0BFF5CB6F406B7EDUL,
+ 0xF44C42E9A637ED6BUL, 0xE485B576625E7EC6UL, 0x4FE1356D6D51C245UL,
+ 0x302B0A6DF25F1437UL, 0xEF9519B3CD3A431BUL, 0x514A08798E3404DDUL,
+ 0x020BBEA63B139B22UL, 0x29024E088A67CC74UL, 0xC4C6628B80DC1CD1UL,
+ 0xC90FDAA22168C234UL, 0xFFFFFFFFFFFFFFFFUL,
+};
+static t_uint Gp[] = { 2 };
+
+static void tm_dhkx_gen(void* dhkx1, void* dhkx2, void* out)
+{
+ mpi GX = { 0 }, GY = { 0 }, K = { 0 }, RP = { 0 };
+
+ t_uint Xp[256 / sizeof(t_uint)];
+ mpi X = {
+ .s = 1,
+ .n = sizeof(Xp)/sizeof(Xp[0]),
+ .p = Xp
+ };
+ mpi P = {
+ .s = 1,
+ .n = sizeof(Pp)/sizeof(Pp[0]),
+ .p = Pp,
+ };
+ mpi G = {
+ .s = 1,
+ .n = 1,
+ .p = Gp,
+ };
+
+ do_random(Xp, sizeof(Xp));
+ while (Xp[31] == 0 || Xp[31] == -1UL)
+ do_random(Xp + 31, sizeof(Xp[31]));
+
+ mpi_exp_mod(&GX, &G, &X, &P, &RP);
+ mpi_write_binary(&GX, dhkx2, 256);
+ mpi_free(&GX);
+
+ mpi_read_binary(&GY, dhkx1, 256);
+ mpi_exp_mod(&K, &GY, &X, &P, &RP);
+ mpi_free(&RP);
+ mpi_free(&GY);
+
+ mpi_write_binary(&K, (void*)Xp, 256);
+ mpi_free(&K);
+ sha2((void*)Xp, 256, out, 0);
+}
+
+static void xor2_256b(void *xv, const void* yv)
+{
+ int i;
+ uint64_t *x = xv;
+ const uint64_t *y = yv;
+ for(i=0; i < 4; i++)
+ x[i] ^= y[i];
+}
+
+static TPM_RESULT vtpmmgr_GroupRegister(tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ struct mem_group *group = NULL;
+ tpmrsa_context saa_rsa = TPMRSA_CTX_INIT;
+ struct tpm_authdata digest;
+ sha1_context ctx;
+ TPM_PCR_SELECTION sel;
+ void *dhkx1, *dhkx2, *gk, *sig;
+
+ UNPACK_GROUP(group);
+ UNPACK_IN(VPTR, &dhkx1, 256, UNPACK_ALIAS);
+ UNPACK_IN(VPTR, &sig, 256, UNPACK_ALIAS);
+ UNPACK_IN(TPM_PCR_SELECTION, &sel, UNPACK_ALIAS);
+ UNPACK_DONE();
+
+ /* Only generating this quote during the same boot that this group was
+ * created in allows the quote to prove that the group key has never
+ * been available outside a configuration approved by its SAA.
+ */
+ if (!(group->flags & MEM_GROUP_FLAG_FIRSTBOOT)) {
+ status = TPM_FAIL;
+ goto abort_egress;
+ }
+
+ sha1(dhkx1, 256, digest.bits);
+ tpmrsa_set_pubkey(&saa_rsa, group->id_data.saa_pubkey, 256, 0, 0);
+ if (tpmrsa_sigcheck(&saa_rsa, sig, digest.bits))
+ status = TPM_FAIL;
+ tpmrsa_free(&saa_rsa);
+ if (status)
+ goto abort_egress;
+
+ dhkx2 = PACK_BUF;
+ tpmcmd->resp_len += 256;
+ gk = PACK_BUF;
+ tpmcmd->resp_len += 32;
+
+ tm_dhkx_gen(dhkx1, dhkx2, gk);
+ xor2_256b(gk, &group->group_key);
+
+ sha1_starts(&ctx);
+ sha1_update(&ctx, (void*)"REGR", 4);
+ sha1_update(&ctx, dhkx1, 256);
+ sha1_update(&ctx, dhkx2, 256 + 32);
+ sha1_finish(&ctx, digest.bits);
+
+ status = vtpm_do_quote(group, NULL, NULL, &digest, &sel, NULL, NULL, PACK_BUF);
+ tpmcmd->resp_len += 256;
+
+ CMD_END;
+}
+
+static TPM_RESULT vtpmmgr_GroupUpdate(tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ struct mem_group *group;
+ int i;
+ int hstart;
+ uint32_t nr_kerns, nr_seals;
+ uint64_t old_seq, new_seq;
+ struct mem_seal *seals = NULL;
+ tpmrsa_context saa_rsa = TPMRSA_CTX_INIT;
+ unsigned char digest[20];
+ TPM_RESULT rc;
+ void *sig, *seal_bits, *kern_bits;
+
+ UNPACK_GROUP(group);
+ UNPACK_IN(VPTR, &sig, 256, UNPACK_ALIAS);
+
+ // Hash starts here
+ hstart = in_pos;
+
+ new_seq = be64_native(*(be64_t*)(tpmcmd->req + in_pos));
+ old_seq = be64_native(group->details.cfg_seq);
+ in_pos += 8;
+ if (old_seq > new_seq) {
+ status = TPM_FAIL;
+ goto abort_egress;
+ }
+
+ UNPACK_IN(UINT32, &nr_seals);
+ UNPACK_IN(VPTR, &seal_bits, nr_seals * 20, UNPACK_ALIAS);
+
+ UNPACK_IN(UINT32, &nr_kerns);
+ UNPACK_IN(VPTR, &kern_bits, nr_kerns * 20, UNPACK_ALIAS);
+
+ // TODO handle saving larger lists on disk
+ if (nr_seals > NR_SEALS_PER_GROUP) {
+ status = TPM_SIZE;
+ goto abort_egress;
+ }
+
+ if (nr_kerns > NR_KERNS_PER_GROUP) {
+ status = TPM_SIZE;
+ goto abort_egress;
+ }
+
+ sha1(tpmcmd->req + hstart, in_pos - hstart, digest);
- return status;
+ seals = calloc(nr_seals, sizeof(seals[0]));
+
+ for(i=0; i < nr_seals; i++) {
+ TPM_PCR_SELECTION sel;
+ UNPACK_IN(TPM_PCR_SELECTION, &sel, UNPACK_ALIAS);
+ memcpy(&seals[i].digest_release, seal_bits, 20);
+ seal_bits += 20;
+ if (sel.sizeOfSelect > 4) {
+ status = TPM_BAD_PARAMETER;
+ goto abort_egress;
+ }
+ seals[i].pcr_selection = native_le32(0);
+ memcpy(&seals[i].pcr_selection, sel.pcrSelect, sel.sizeOfSelect);
+ }
+
+ UNPACK_DONE();
+
+ tpmrsa_set_pubkey(&saa_rsa, group->id_data.saa_pubkey, 256, 0, 0);
+ rc = tpmrsa_sigcheck(&saa_rsa, sig, digest);
+ tpmrsa_free(&saa_rsa);
+ if (rc) {
+ printk("sigcheck failed: %d\n", rc);
+ status = rc;
+ goto abort_egress;
+ }
+
+ // Commit
+ free(group->seals);
+
+ memcpy(&group->seal_bits.kernels, kern_bits, 20 * nr_kerns);
+ group->details.cfg_seq = native_be64(new_seq);
+ group->nr_seals = nr_seals;
+ group->seals = seals;
+ group->seal_bits.nr_kerns = native_be32(nr_kerns);
+
+ seals = NULL;
+
+ group->flags &= ~MEM_GROUP_FLAG_SEAL_VALID;
+ if (group == g_mgr->groups[0].v)
+ g_mgr->root_seals_valid = 0;
+
+ // TODO use GROUP_KEY_UPDATE or MGR_KEY_UPDATE here?
+ // only required if this update was to address a potential key leak
+ vtpm_sync_group(group, SEQ_UPDATE);
+
+ abort_egress:
+ free(seals);
+
+ pack_TPM_RSP_HEADER(tpmcmd->resp, VTPM_TAG_RSP, tpmcmd->resp_len, status);
+ return status;
+}
+
+static TPM_RESULT vtpmmgr_VtpmList(tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ struct mem_group *group;
+ uint32_t vtpm_offset;
+ int i, j;
+
+ UNPACK_GROUP(group);
+ UNPACK_IN(UINT32, &vtpm_offset);
+
+ PACK_OUT(UINT32, group->nr_vtpms);
+ if (vtpm_offset > group->nr_vtpms)
+ goto egress;
+
+ for(i=0; i < group->nr_pages; i++) {
+ struct mem_vtpm_page *pg = &group->data[i];
+ for(j=0; j < pg->size; j++) {
+ if (vtpm_offset) {
+ // TODO a proper seek would be far faster
+ vtpm_offset--;
+ continue;
+ }
+ memcpy(PACK_BUF, pg->vtpms[j]->uuid, 16);
+ tpmcmd->resp_len += 16;
+ if (tpmcmd->resp_len + 16 > TCPA_MAX_BUFFER_LENGTH)
+ goto egress;
+ }
+ }
+
+ egress:
+ CMD_END;
+}
+
+static TPM_RESULT vtpmmgr_VtpmNew(tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ struct mem_group *group;
+ struct mem_vtpm *vtpm;
+ uuid_t newuuid;
+ int rc;
+
+ UNPACK_GROUP(group);
+
+ // XXX allow non-random UUIDs for testing
+ if (tpmcmd->req_len == 14 + 16)
+ UNPACK_IN(BUFFER, newuuid, 16);
+ else
+ gen_random_uuid(newuuid);
+ UNPACK_DONE();
+
+ rc = create_vtpm(group, &vtpm, newuuid);
+ if (rc) {
+ status = TPM_FAIL;
+ goto abort_egress;
+ }
+ memset(vtpm->data, 0, 64);
+ vtpm_sync(group, vtpm);
+
+ PACK_OUT(BUFFER, newuuid, 16);
+ CMD_END;
}
+static TPM_RESULT vtpmmgr_VtpmDel(tpmcmd_t* tpmcmd)
+{
+ CMD_BEGIN;
+ uuid_t uuid;
+ struct mem_group *group;
+ struct mem_vtpm *vtpm;
+ int rc;
+
+ UNPACK_IN(BUFFER, uuid, 16);
+ UNPACK_DONE();
+ rc = find_vtpm(&group, &vtpm, uuid);
+ if (rc) {
+ status = TPM_FAIL;
+ goto abort_egress;
+ }
+
+ if (vtpm->flags & VTPM_FLAG_OPEN) {
+ status = TPM_FAIL;
+ goto abort_egress;
+ }
+
+ delete_vtpm(group, vtpm);
+
+ CMD_END;
+}
+
+static int vtpmmgr_permcheck(struct tpm_opaque *opq)
+{
+ if (!opq->vtpm)
+ return 1;
+ if (opq->vtpm->flags & VTPM_FLAG_ADMIN)
+ return 0;
+ return 1;
+}
TPM_RESULT vtpmmgr_handle_cmd(
- const uuid_t uuid,
- tpmcmd_t* tpmcmd)
-{
- TPM_RESULT status = TPM_SUCCESS;
- TPM_TAG tag;
- UINT32 size;
- TPM_COMMAND_CODE ord;
-
- unpack_TPM_RQU_HEADER(tpmcmd->req,
- &tag, &size, &ord);
-
- /* Handle the command now */
- switch(tag) {
- case VTPM_TAG_REQ:
- //This is a vTPM command
- switch(ord) {
- case VTPM_ORD_SAVEHASHKEY:
- return vtpmmgr_SaveHashKey(uuid, tpmcmd);
- case VTPM_ORD_LOADHASHKEY:
- return vtpmmgr_LoadHashKey(uuid, tpmcmd);
- default:
- vtpmlogerror(VTPM_LOG_VTPM, "Invalid vTPM Ordinal %" PRIu32 "\n", ord);
- status = TPM_BAD_ORDINAL;
- }
- break;
- case TPM_TAG_RQU_COMMAND:
- case TPM_TAG_RQU_AUTH1_COMMAND:
- case TPM_TAG_RQU_AUTH2_COMMAND:
- //This is a TPM passthrough command
- switch(ord) {
- case TPM_ORD_GetRandom:
- vtpmloginfo(VTPM_LOG_VTPM, "Passthrough: TPM_GetRandom\n");
- break;
- case TPM_ORD_PcrRead:
- vtpmloginfo(VTPM_LOG_VTPM, "Passthrough: TPM_PcrRead\n");
- break;
- default:
- vtpmlogerror(VTPM_LOG_VTPM, "TPM Disallowed Passthrough ord=%" PRIu32 "\n", ord);
- status = TPM_DISABLED_CMD;
- goto abort_egress;
- }
-
- size = TCPA_MAX_BUFFER_LENGTH;
- TPMTRYRETURN(TPM_TransmitData(tpmcmd->req, tpmcmd->req_len, tpmcmd->resp, &size));
- tpmcmd->resp_len = size;
-
- unpack_TPM_RESULT(tpmcmd->resp + sizeof(TPM_TAG) + sizeof(UINT32), &status);
- return status;
-
- break;
- default:
- vtpmlogerror(VTPM_LOG_VTPM, "Invalid tag=%" PRIu16 "\n", tag);
- status = TPM_BADTAG;
- }
+ struct tpm_opaque *opaque,
+ tpmcmd_t* tpmcmd)
+{
+ TPM_RESULT status = TPM_SUCCESS;
+ TPM_TAG tag;
+ UINT32 size;
+ TPM_COMMAND_CODE ord;
+
+ unpack_TPM_RQU_HEADER(tpmcmd->req,
+ &tag, &size, &ord);
+
+ /* Handle the command now */
+ switch(tag) {
+ case VTPM_TAG_REQ:
+ // This is a vTPM command
+ switch(ord) {
+ case VTPM_ORD_SAVEHASHKEY:
+ return vtpmmgr_SaveHashKey(opaque, tpmcmd);
+ case VTPM_ORD_LOADHASHKEY:
+ return vtpmmgr_LoadHashKey(opaque, tpmcmd);
+ case VTPM_ORD_GET_BOOT_HASH:
+ return vtpmmgr_GetBootHash(opaque, tpmcmd);
+ case VTPM_ORD_GET_QUOTE:
+ return vtpmmgr_GetQuote(opaque, tpmcmd);
+ default:
+ vtpmlogerror(VTPM_LOG_VTPM, "Invalid vTPM Ordinal %" PRIu32 "\n", ord);
+ status = TPM_BAD_ORDINAL;
+ }
+ break;
+ case VTPM_TAG_REQ2:
+ // This is a management command
+ if (vtpmmgr_permcheck(opaque)) {
+ status = TPM_AUTHFAIL;
+ vtpmlogerror(VTPM_LOG_VTPM, "Rejected attempt to use management command from client\n");
+ break;
+ }
+ switch (ord) {
+ case VTPM_ORD_GROUP_LIST:
+ return vtpmmgr_GroupList(tpmcmd);
+ case VTPM_ORD_GROUP_NEW:
+ return vtpmmgr_GroupNew(tpmcmd);
+ case VTPM_ORD_GROUP_DEL:
+ return vtpmmgr_GroupDel(tpmcmd);
+ case VTPM_ORD_GROUP_ACTIVATE:
+ return vtpmmgr_GroupActivate(tpmcmd);
+ case VTPM_ORD_GROUP_REGISTER:
+ return vtpmmgr_GroupRegister(tpmcmd);
+ case VTPM_ORD_GROUP_UPDATE:
+ return vtpmmgr_GroupUpdate(tpmcmd);
+ case VTPM_ORD_GROUP_SHOW:
+ return vtpmmgr_GroupShow(tpmcmd);
+ case VTPM_ORD_VTPM_LIST:
+ return vtpmmgr_VtpmList(tpmcmd);
+ case VTPM_ORD_VTPM_NEW:
+ return vtpmmgr_VtpmNew(tpmcmd);
+ case VTPM_ORD_VTPM_DEL:
+ return vtpmmgr_VtpmDel(tpmcmd);
+ default:
+ vtpmlogerror(VTPM_LOG_VTPM, "Invalid TM Ordinal %" PRIu32 "\n", ord);
+ status = TPM_BAD_ORDINAL;
+ }
+ break;
+ case TPM_TAG_RQU_COMMAND:
+ case TPM_TAG_RQU_AUTH1_COMMAND:
+ case TPM_TAG_RQU_AUTH2_COMMAND:
+ //This is a TPM passthrough command
+ switch(ord) {
+ case TPM_ORD_GetRandom:
+ vtpmloginfo(VTPM_LOG_VTPM, "Passthrough: TPM_GetRandom\n");
+ break;
+ case TPM_ORD_PcrRead:
+ vtpmloginfo(VTPM_LOG_VTPM, "Passthrough: TPM_PcrRead\n");
+ // Quotes also need to be restricted to hide PCR values
+ break;
+ case TPM_ORD_Extend:
+ // TODO allow to certain clients? A malicious client
+ // could scramble PCRs and make future quotes invalid.
+ if (vtpmmgr_permcheck(opaque)) {
+ vtpmlogerror(VTPM_LOG_VTPM, "Disallowed TPM_Extend\n");
+ status = TPM_DISABLED_CMD;
+ goto abort_egress;
+ } else {
+ vtpmloginfo(VTPM_LOG_VTPM, "Passthrough: TPM_Extend\n");
+ }
+ break;
+ default:
+ vtpmlogerror(VTPM_LOG_VTPM, "TPM Disallowed Passthrough ord=%" PRIu32 "\n", ord);
+ status = TPM_DISABLED_CMD;
+ goto abort_egress;
+ }
+
+ size = TCPA_MAX_BUFFER_LENGTH;
+ TPMTRYRETURN(TPM_TransmitData(tpmcmd->req, tpmcmd->req_len, tpmcmd->resp, &size));
+ tpmcmd->resp_len = size;
+
+ return TPM_SUCCESS;
+ default:
+ vtpmlogerror(VTPM_LOG_VTPM, "Invalid tag=%" PRIu16 "\n", tag);
+ status = TPM_BADTAG;
+ }
abort_egress:
- tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE;
- pack_TPM_RSP_HEADER(tpmcmd->resp,
- tag + 3, tpmcmd->resp_len, status);
+ tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE;
+ pack_TPM_RSP_HEADER(tpmcmd->resp, tag + 3, tpmcmd->resp_len, status);
- return status;
+ return status;
}
--- /dev/null
+#include <console.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <mini-os/byteorder.h>
+
+#include "vtpm_manager.h"
+#include "log.h"
+#include "uuid.h"
+
+#include "vtpmmgr.h"
+#include "vtpm_disk.h"
+#include "disk_crypto.h"
+#include "disk_format.h"
+#include "disk_io.h"
+#include "disk_tpm.h"
+
+struct mem_tpm_mgr *g_mgr;
+
+int vtpm_sync_disk(struct mem_tpm_mgr *mgr, int depth)
+{
+ int old_active_root = mgr->active_root;
+ int new_active_root = !old_active_root;
+ int rc = 0;
+ struct tpm_authdata prev;
+ struct mem_group *group0 = mgr->groups[0].v;
+
+ // don't bother writing if we will never be able to read
+ if (!group0 || !group0->nr_seals)
+ return 0;
+
+ mgr->sequence++;
+ mgr->active_root = new_active_root;
+
+ switch (depth) {
+ case CTR_UPDATE:
+ {
+ uint32_t ctr = be32_native(mgr->counter_value);
+ mgr->counter_value = native_be32(ctr + 1);
+ break;
+ }
+ case MGR_KEY_UPDATE:
+ {
+ int i;
+ mgr->root_seals_valid = 0;
+ do_random(&mgr->tm_key, 16);
+ aes_setup(&mgr->tm_key_e, &mgr->tm_key);
+ do_random(&mgr->nv_key, 16);
+ for(i=0; i < mgr->nr_groups; i++) {
+ abort(); // TODO use raw re-encryption to handle unopened groups
+ }
+ break;
+ }
+ case CTR_AUTH_UPDATE:
+ mgr->root_seals_valid = 0;
+ memcpy(&prev, &mgr->counter_auth, 20);
+ do_random(&mgr->counter_auth, 20);
+ break;
+ case NV_AUTH_UPDATE:
+ mgr->root_seals_valid = 0;
+ memcpy(&prev, &mgr->nvram_auth, 20);
+ do_random(&mgr->nvram_auth, 20);
+ break;
+ }
+
+ disk_write_all(mgr);
+
+ switch (depth) {
+ case SEQ_UPDATE:
+ break;
+
+ case CTR_UPDATE:
+ rc = TPM_disk_incr_counter(mgr->counter_index, mgr->counter_auth);
+ if (rc) {
+ uint32_t ctr = be32_native(mgr->counter_value);
+ mgr->counter_value = native_be32(ctr - 1);
+ mgr->active_root = old_active_root;
+ return rc;
+ }
+ break;
+
+ case MGR_KEY_UPDATE:
+ rc = TPM_disk_nvwrite(&mgr->nv_key, 16, mgr->nvram_slot, mgr->nvram_auth);
+ if (rc)
+ abort();
+ break;
+
+ case CTR_AUTH_UPDATE:
+ rc = TPM_disk_change_counter(mgr->counter_index, prev, mgr->counter_auth);
+ if (rc)
+ abort();
+ break;
+
+ case NV_AUTH_UPDATE:
+ rc = TPM_disk_nvchange(mgr->nvram_slot, prev, mgr->nvram_auth);
+ if (rc)
+ abort();
+ break;
+ }
+
+ return rc;
+}
+
+static struct mem_group_hdr* find_mem_group_hdr(struct mem_tpm_mgr *mgr, struct mem_group *group)
+{
+ int i;
+ for (i = 0; i < mgr->nr_groups; i++) {
+ struct mem_group_hdr *hdr = mgr->groups + i;
+ if (hdr->v == group)
+ return hdr;
+ }
+ return NULL;
+}
+
+int vtpm_sync_group(struct mem_group *group, int depth)
+{
+ struct mem_group_hdr* hdr = find_mem_group_hdr(g_mgr, group);
+ uint64_t seq = be64_native(group->details.sequence);
+
+ if (!hdr)
+ abort();
+
+ hdr->disk_loc.value = 0;
+ group->details.sequence = native_be64(1 + seq);
+
+ if (depth == GROUP_KEY_UPDATE) {
+ int i;
+ do_random(&group->group_key, 16);
+ do_random(&group->rollback_mac_key, 16);
+ group->flags &= ~MEM_GROUP_FLAG_SEAL_VALID;
+ for (i = 0; i < group->nr_pages; i++)
+ group->data[i].disk_loc.value = 0;
+ depth = CTR_UPDATE;
+ }
+
+ return vtpm_sync_disk(g_mgr, depth);
+}
+
+static struct mem_vtpm_page* find_mem_vtpm_page(struct mem_group *group, struct mem_vtpm *vtpm)
+{
+ int pgidx = vtpm->index_in_parent / VTPMS_PER_SECTOR;
+ return group->data + pgidx;
+}
+
+int vtpm_sync(struct mem_group *group, struct mem_vtpm *vtpm)
+{
+ struct mem_vtpm_page *pg = find_mem_vtpm_page(group, vtpm);
+ if (!pg)
+ return 1;
+ pg->disk_loc.value = 0;
+ return vtpm_sync_group(group, SEQ_UPDATE);
+}
+
+/************************************************************************/
+
+int create_vtpm(struct mem_group *group, struct mem_vtpm **vtpmp, const uuid_t uuid)
+{
+ int pgidx = group->nr_vtpms / VTPMS_PER_SECTOR;
+ int vtidx = group->nr_vtpms % VTPMS_PER_SECTOR;
+ struct mem_vtpm *vtpm = calloc(1, sizeof(*vtpm));
+
+ struct mem_vtpm_page *page = group->data + pgidx;
+ if (pgidx >= group->nr_pages) {
+ if (pgidx != group->nr_pages)
+ abort(); // nr_vtpms inconsistent with nr_pages
+ group->nr_pages++;
+ group->data = realloc(group->data, group->nr_pages * sizeof(*page));
+ page = group->data + pgidx;
+ memset(page, 0, sizeof(*page));
+ }
+ if (page->size != vtidx)
+ abort(); // nr_vtpms inconsistent with page->size
+ page->size++;
+
+ page->vtpms[vtidx] = vtpm;
+ vtpm->index_in_parent = group->nr_vtpms;
+ vtpm->flags = 0;
+
+ group->nr_vtpms++;
+
+ memcpy(vtpm->uuid, uuid, 16);
+ *vtpmp = vtpm;
+ return 0;
+}
+
+int delete_vtpm(struct mem_group *group, struct mem_vtpm *vtpm)
+{
+ struct mem_vtpm_page *pg = find_mem_vtpm_page(group, vtpm);
+ struct mem_vtpm_page *last_pg = group->data + (group->nr_pages - 1);
+ struct mem_vtpm *last = last_pg->vtpms[last_pg->size - 1];
+ int vtidx = vtpm->index_in_parent % VTPMS_PER_SECTOR;
+
+ if (vtpm->flags & VTPM_FLAG_OPEN)
+ return 1;
+
+ last->index_in_parent = vtpm->index_in_parent;
+ pg->vtpms[vtidx] = last;
+ pg->disk_loc.value = 0;
+
+ last_pg->vtpms[last_pg->size - 1] = NULL;
+ last_pg->disk_loc.value = 0;
+ last_pg->size--;
+
+ if (last_pg->size == 0)
+ group->nr_pages--;
+ group->nr_vtpms--;
+ free(vtpm);
+ return 0;
+}
+
+int find_vtpm(struct mem_group **groupp, struct mem_vtpm **vtpmp, const uuid_t uuid)
+{
+ struct mem_group *group;
+ int i, j, k;
+
+ for (i = 0; i < g_mgr->nr_groups; i++) {
+ group = g_mgr->groups[i].v;
+ if (!group)
+ continue;
+ for (j = 0; j < group->nr_pages; j++) {
+ struct mem_vtpm_page *pg = &group->data[j];
+ for (k = 0; k < pg->size; k++) {
+ struct mem_vtpm *vt = pg->vtpms[k];
+ if (!memcmp(uuid, vt->uuid, 16)) {
+ *groupp = group;
+ *vtpmp = vt;
+ return 0;
+ }
+ }
+ }
+ }
+
+ return 1;
+}
--- /dev/null
+#ifndef __VTPM_DISK_H
+#define __VTPM_DISK_H
+
+#include "uuid.h"
+#include <polarssl/aes.h>
+#include "endian_int.h"
+
+/* Type for disk sector indexes */
+typedef be32_t sector_t;
+
+/* A TPM authdata entry (160 random bits) */
+struct tpm_authdata {
+ uint8_t bits[20];
+};
+
+/* 160-bit hash (SHA-1) */
+struct hash160 {
+ uint8_t bits[20];
+};
+
+/* 256-bit hash (either SHA256 or SHA512-256) */
+struct hash256 {
+ uint8_t bits[32];
+};
+
+/* 128-bit MAC (AES-128 CMAC) */
+struct mac128 {
+ uint8_t bits[16];
+};
+
+struct key128 {
+ uint8_t bits[16];
+};
+
+/********************************************************************/
+
+/**
+ * Unique identifying information for a vTPM group. Once a group has been
+ * created, this data will be constant.
+ *
+ * This structure a component of struct disk_group_sector, stored directly.
+ */
+struct group_id_data {
+ uuid_t uuid;
+ uint8_t saa_pubkey[256];
+ uint8_t tpm_aik_public[256];
+ uint8_t tpm_aik_edata[256];
+ struct hash256 rollback_pubkey_hash;
+};
+
+/**
+ * Details of a vTPM group that change during normal operation.
+ *
+ * This structure a component of struct disk_group_sector, stored directly.
+ */
+struct group_details {
+ be64_t sequence;
+ be64_t cfg_seq;
+ be64_t flags;
+#define FLAG_ROLLBACK_DETECTED 1
+
+ /* Seal(recovery_seal, PCR16 = H(RECOVERY_KEY)) */
+ uint8_t recovery_data[256];
+};
+
+/**
+ * The required input to TPM_Unseal to obtain key data
+ *
+ * This structure a component of several disk structures, stored directly.
+ */
+struct disk_seal_entry {
+ le32_t pcr_selection;
+ struct hash160 digest_at_seal;
+ struct hash160 digest_release;
+ uint8_t sealed_data[256];
+};
+
+/**
+ * A vTPM group's configuration list and sealed key data
+ *
+ * This structure a component of struct disk_group_sector, stored directly.
+ */
+struct disk_group_boot_config_list {
+#define NR_SEALS_PER_GROUP 5
+ be32_t nr_cfgs;
+ struct disk_seal_entry entry[NR_SEALS_PER_GROUP];
+#define NR_KERNS_PER_GROUP 16
+ be32_t nr_kerns;
+ struct hash160 kernels[NR_KERNS_PER_GROUP];
+
+ /* TODO support overflow of either nr_cfgs or nr_kerns */
+ struct hash256 next;
+};
+
+/********************************************************************/
+
+#define VTPM_FLAG_ADMIN 1
+#define VTPM_FLAG_DISK_MASK (0xFFFF)
+#define VTPM_FLAG_OPEN (1UL<<31)
+
+/**
+ * A single vTPM's in-memory data. Do not free if the open flag is set.
+ */
+struct mem_vtpm {
+ uuid_t uuid;
+ uint8_t data[64];
+ uint32_t flags;
+ uint32_t index_in_parent;
+};
+
+/**
+ * Shortened form of struct disk_seal_entry
+ */
+struct mem_seal {
+ le32_t pcr_selection;
+ struct hash160 digest_release;
+};
+
+/**
+ * Maximum number of vTPMs in one sector on the disk.
+ *
+ * 20 + 64 = 84 bytes per vTPM; 32 bytes overhead from IVs
+ * 48*84 + 32 = 4064 bytes
+ */
+#define VTPMS_PER_SECTOR 48
+
+/**
+ * Decrypted and unpacked version of struct disk_vtpm_sector
+ */
+struct mem_vtpm_page {
+ struct hash256 disk_hash;
+ sector_t disk_loc;
+ int size;
+
+ struct mem_vtpm *vtpms[VTPMS_PER_SECTOR];
+};
+
+/**
+ * In-memory representation of an open vTPM group
+ */
+struct mem_group {
+ struct group_id_data id_data;
+ struct group_details details;
+
+ /* Obtained from sealed data */
+ struct tpm_authdata aik_authdata;
+ struct key128 group_key;
+ struct key128 rollback_mac_key;
+
+ int nr_vtpms;
+ int nr_pages;
+ struct mem_vtpm_page *data;
+
+ int flags;
+#define MEM_GROUP_FLAG_SEAL_VALID 1
+#define MEM_GROUP_FLAG_FIRSTBOOT 2
+ int nr_seals;
+ struct mem_seal *seals;
+
+ sector_t seal_next_loc;
+ struct disk_group_boot_config_list seal_bits;
+};
+
+/**
+ * In-memory representation of a vTPM group (open or not)
+ */
+struct mem_group_hdr {
+ sector_t disk_loc;
+ struct hash256 disk_hash;
+
+ int disk_nr_inuse;
+ sector_t *disk_inuse;
+
+ struct mem_group *v;
+};
+
+/**
+ * In-memory representation of the TPM Manager's permanent data
+ */
+struct mem_tpm_mgr {
+ struct key128 tm_key;
+ aes_context tm_key_e;
+ struct key128 nv_key;
+ uuid_t uuid;
+
+ be32_t nvram_slot;
+ struct tpm_authdata nvram_auth;
+ be32_t counter_index;
+ struct tpm_authdata counter_auth;
+ be32_t counter_value;
+
+ uint64_t sequence;
+
+ int active_root;
+
+ int nr_groups;
+ struct mem_group_hdr *groups;
+
+ int root_seals_valid;
+};
+
+int vtpm_storage_init(void);
+int vtpm_load_disk(void);
+int vtpm_new_disk(void);
+
+enum vtpm_sync_depth {
+ SEQ_UPDATE, /* Just the soft sequence number */
+ CTR_UPDATE, /* Sequence and TPM counter */
+ GROUP_KEY_UPDATE, /* Group key (and TPM counter) */
+ MGR_KEY_UPDATE, /* Manager key */
+ CTR_AUTH_UPDATE, /* TPM counter authdata */
+ NV_AUTH_UPDATE /* NVRAM authdata */
+};
+
+/*
+ * For a full manager key flush, use this ordering of writes:
+ * MGR_KEY_UPDATE
+ * CTR_AUTH_UPDATE
+ * NV_AUTH_UPDATE
+ * CTR_UPDATE or GROUP_KEY_UPDATE
+ */
+
+extern struct mem_tpm_mgr *g_mgr;
+
+int vtpm_sync_disk(struct mem_tpm_mgr *mgr, int depth);
+int vtpm_sync_group(struct mem_group *group, int depth);
+int vtpm_sync(struct mem_group *group, struct mem_vtpm *vtpm);
+
+int create_vtpm(struct mem_group *group, struct mem_vtpm **vtpmp, const uuid_t uuid);
+int delete_vtpm(struct mem_group *group, struct mem_vtpm *vtpm);
+int find_vtpm(struct mem_group **groupp, struct mem_vtpm **vtpmp, const uuid_t uuid);
+
+#endif
#define VTPM_MANAGER_H
#define VTPM_TAG_REQ 0x01c1
+#define VTPM_TAG_REQ2 0x01c2
#define VTPM_TAG_RSP 0x01c4
+#define VTPM_TAG_RSP2 0x01c5
#define COMMAND_BUFFER_SIZE 4096
// Header size
//************************ Command Codes ****************************
#define VTPM_ORD_BASE 0x0000
-#define VTPM_PRIV_MASK 0x01000000 // Priviledged VTPM Command
-#define VTPM_PRIV_BASE (VTPM_ORD_BASE | VTPM_PRIV_MASK)
+#define TPM_VENDOR_COMMAND 0x02000000 // TPM Main, part 2, section 17.
+#define VTPM_PRIV_BASE (VTPM_ORD_BASE | TPM_VENDOR_COMMAND)
+
+/*
+ * Non-priviledged VTPM Commands:
+ *
+ * The PCRs available to read, extend, or quote may be limited to a given vTPM
+ * based on a local security policy (this is not yet implemented).
+ *
+ * vTPMs may request the following commands which will be forwarded directly to
+ * the physical TPM:
+ *
+ * TPM_ORD_GetRandom
+ * TPM_ORD_PcrRead
+ * TPM_ORD_Extend
+ *
+ * In addition, the following command are available to all vTPMs:
+ */
+
+/**
+ * Store a persistent key blob to TPM Manager storage
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_SAVEHASHKEY
+ * BYTE[] keyblob 52 or 64 bytes of key data
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ */
+#define VTPM_ORD_SAVEHASHKEY (VTPM_ORD_BASE + 1)
+/**
+ * Load the persistent key blob from TPM Manager storage
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_LOADHASHKEY
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * BYTE[] keyblob 52 or 64 bytes of key data
+ */
+#define VTPM_ORD_LOADHASHKEY (VTPM_ORD_BASE + 2)
+/**
+ * Get a kernel hash of the control domain for this vTPM
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GET_BOOT_HASH
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * TPM_DIGEST digest hash for the initial extend of PCR0
+ */
+#define VTPM_ORD_GET_BOOT_HASH (VTPM_ORD_BASE + 3)
+/**
+ * Get a hardware TPM quote for this vTPM. The quote will use the AIK
+ * associated with the group this vTPM was created in. Values specific to the
+ * vTPM will be extended to certain resettable PCRs.
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GET_QUOTE
+ * TPM_NONCE externData Data to be quoted
+ * PCR_SELECTION quoteSelect PCR selection for quote.
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * BYTE[] signature 256 bytes of signature data
+ * TPM_PCRVALUE[] pcrValues Values of PCRs selected by the request
+ */
+#define VTPM_ORD_GET_QUOTE (VTPM_ORD_BASE + 4)
+
+/*
+ * Resettable PCR values in TPM Manager quotes (VTPM_ORD_GET_QUOTE):
+ *
+ * PCR#16:
+ * unused - debug PCR
+ *
+ * PCR#17-19: (cannot be reset by locality 2)
+ * DRTM measurements
+ *
+ * PCR#20: Remains constant over the life of the vTPM group
+ * SHA1(SAA pubkey)
+ *
+ * PCR#21: May change during the life; must be approved by SAA
+ * SHA1(TPM_MGR_CFG_LIST)
+ *
+ * PCR#22: May change during the life; must be in the cfg_list
+ * vTPM kernel build hash (truncated SHA256)
+ * Note: this is currently set to 20 zero bytes
+ *
+ * PCR#23: Remains constant over the life of the vTPM; system-specific
+ * group UUID || 00 00 00 00
+ * vTPM UUID || 00 00 00 00
+ *
+ *
+ * Group-only PCR values (VTPM_ORD_GROUP_*) are the same except:
+ *
+ * PCR#22: unused (value is zero)
+ * PCR#23:
+ * group UUID || 00 00 00 00
+ *
+ * The value of externalData for quotes using these PCRs is defined below; it is
+ * always a hash whose first 4 bytes identify the rest of the structure.
+ *
+ *
+ * The configuration list signed by a System Approval Agent (SAA) is:
+ *
+ * TPM_MGR_CFG_LIST:
+ * UINT64 sequence Monotonic sequence number
+ * UINT32 pltCfgSize Size of pltCfgs array
+ * TPM_COMPOSITE_HASH[] pltCfgs Valid platform configurations
+ * UINT32 kernSize Size of kernList array
+ * TPM_HASH[] kernList Valid vTPM kernels
+ */
+
+/************************************\
+ * TPM Manager Management Interface *
+\************************************/
+
+/**
+ * List groups
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GROUP_LIST
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * UINT32 count number of valid groups
+ */
+#define VTPM_ORD_GROUP_LIST (VTPM_PRIV_BASE + 0x101)
+/**
+ * Create a group
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GROUP_NEW
+ * TPM_CHOSENID_HASH labelDigest Data for the privacy CA
+ * BYTE[256] SAASigKey RSA public signature key for the SAA
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * BYTE[16] groupUUID UUID for the group
+ * BYTE[256] aikPubKey Public key of the AIK
+ * BYTE[256] aikBinding TPM_IDENTITY_CONTENTS signature
+ */
+#define VTPM_ORD_GROUP_NEW (VTPM_PRIV_BASE + 0x102)
+/**
+ * Delete a group
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GROUP_DEL
+ * UINT32 groupID ID of the group to delete
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ */
+#define VTPM_ORD_GROUP_DEL (VTPM_PRIV_BASE + 0x103)
+/**
+ * Activate the group's AIK (message from privacy CA)
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GROUP_ACTIVATE
+ * UINT32 groupID ID of the group to activate
+ * UINT32 blobSize
+ * BYTE[] blob Blob from the privay CA
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * TPM_SYMMETRIC_KEY key Output from TPM_ActivateIdentity
+ */
+#define VTPM_ORD_GROUP_ACTIVATE (VTPM_PRIV_BASE + 0x104)
+/**
+ * Register this TPM manager slot with the SAA and provision its recovery data.
+ * The initial registration must be done with no reboots between the creation of
+ * the group and the execution of this command; it can only be done once.
+ *
+ * The ExternalData value is SHA1("REGR" || dhkx_1 || dhkx_2 || recoverBlob)
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GROUP_REGISTER
+ * UINT32 groupID ID of the group to register
+ * BYTE[256] dhkx_1 One half of a diffie-hellman key exchange
+ * BYTE[256] SAAProof Signature (using SAASigKey) of derivData
+ * PCR_SELECTION quoteSelect PCR selection for quote.
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * BYTE[256] dhkx_2 One half of a diffie-hellman key exchange
+ * BYTE[32] recoverBlob Encrypted blob (using key derived from DH)
+ * BYTE[256] regProof Quote using the group's AIK
+ */
+#define VTPM_ORD_GROUP_REGISTER (VTPM_PRIV_BASE + 0x105)
+/**
+ * Update the configuration list
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GROUP_UPDATE
+ * UINT32 groupID ID of the group to update
+ * BYTE[256] cfgListSig Signature (using SAASigKey) of cfgList
+ * TPM_MGR_CFG_LIST cfgList Configurations the group is valid in
+ * PCR_SELECTION[] selForCfgs PCR selections used in the cfgList.pltCfgs
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ */
+#define VTPM_ORD_GROUP_UPDATE (VTPM_PRIV_BASE + 0x106)
+/**
+ * Get the current contents of the group structure.
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GROUP_SHOW
+ * UINT32 groupID ID of the group to view
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * BYTE[16] groupUUID UUID for the group
+ * BYTE[256] pubkey public key of the SAA
+ * TPM_MGR_CFG_LIST cfgList current list for this group
+ */
+#define VTPM_ORD_GROUP_SHOW (VTPM_PRIV_BASE + 0x107)
+/**
+ * Get a quote of the current status of the TMA structure. This can be used to
+ * prove that an update has been applied; it is similar to VTPM_ORD_GET_QUOTE,
+ * but does not include measurements specific to any vTPM.
+ *
+ * The ExternalData value for the quote is SHA1("SHOW" || nonce)
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GROUP_QUOTE
+ * UINT32 groupID ID of the group to view
+ * TPM_NONCE nonce Anti-replay
+ * PCR_SELECTION quoteSelect PCR selection for quote.
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * BYTE[] signature 256 bytes of signature data
+ * TPM_PCRVALUE[] pcrValues Values of PCRs selected by the request
+ */
+#define VTPM_ORD_GROUP_QUOTE (VTPM_PRIV_BASE + 0x108)
+/**
+ * Prepare to use recovery data to open a currently-closed group.
+ *
+ * The ExternalData value is SHA1("RCVR" || nonce || dhkx_1)
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GROUP_RECOVER1
+ * UINT32 groupID ID of the group to recover
+ * TPM_KEY proxyAIK AIK to use for recovery quote
+ * TPM_NONCE nonce Anti-replay by challenger
+ * PCR_SELECTION quoteSelect PCR selection for quote
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * BYTE[256] dhkx_1 One half of a diffie-hellman key exchange
+ * BYTE[256] signature quote using proxyAIK
+ */
+#define VTPM_ORD_GROUP_RECOVER1 (VTPM_PRIV_BASE + 0x109)
+/**
+ * Use recovery data to open a currently-closed group
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_GROUP_RECOVER2
+ * UINT32 groupID ID of the group to recover
+ * BYTE[256] dhkx_2 One half of a diffie-hellman key exchange
+ * BYTE[32] recoverBlob Encrypted blob (using key derived from DH)
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ */
+#define VTPM_ORD_GROUP_RECOVER2 (VTPM_PRIV_BASE + 0x10A)
+
+/**
+ * List the UUIDs of vTPMs in an group. Multiple calls may be required to list
+ * all the vTPMs in an group; if the returned list is shorter than totalCount
+ * would imply, additional requests using the offest will be required
+ * to build the full list.
+ *
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_VTPM_LIST
+ * UINT32 groupID ID of the group to list
+ * UINT32 offset Offset to start the list at
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * UINT32 totalCount Count of all vTPMs under this group
+ * BYTE[] uuids List of UUIDs (16 bytes each)
+ */
+#define VTPM_ORD_VTPM_LIST (VTPM_PRIV_BASE + 0x201)
+#define VTPM_ORD_VTPM_SHOW (VTPM_PRIV_BASE + 0x202)
+#define VTPM_ORD_VTPM_EDIT (VTPM_PRIV_BASE + 0x203)
+/**
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_VTPM_NEW
+ * UINT32 groupID ID of the group to modify
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ * BYTE[16] vtpmUUID UUID for the vTPM
+ */
+#define VTPM_ORD_VTPM_NEW (VTPM_PRIV_BASE + 0x204)
+/**
+ * Input:
+ * TPM_TAG tag VTPM_TAG_REQ2
+ * UINT32 paramSize total size
+ * UINT32 ordinal VTPM_ORD_VTPM_DEL
+ ## UINT32 groupID ID of the group to modify
+ * BYTE[16] vtpmUUID UUID for the vTPM to delete
+ * Output:
+ * TPM_TAG tag VTPM_TAG_RSP
+ * UINT32 paramSize total size
+ * UINT32 status return code
+ */
+#define VTPM_ORD_VTPM_DEL (VTPM_PRIV_BASE + 0x205)
+
+/**
+ * Generate an unbound AIK for the pTPM
+ *
+ * This unbound AIK can be used in the GROUP_RECOVER1 operation.
+ */
+#define VTPM_ORD_MakeIdentity (VTPM_PRIV_BASE + 0x301)
+/**
+ * Activate an unbound AIK for the pTPM
+ */
+#define VTPM_ORD_ActivateIdentity (VTPM_PRIV_BASE + 0x302)
+/**
+ * Get the EK from the pTPM
+ *
+ * Used for any AIK activation
+ */
+#define VTPM_ORD_ReadPubek (VTPM_PRIV_BASE + 0x303)
+/**
+ * Define an NVRAM slot
+ */
+#define VTPM_NV_DefineSpace (VTPM_PRIV_BASE + 0x304)
+/**
+ * Write to NVRAM
+ */
+#define VTPM_NV_WriteValue (VTPM_PRIV_BASE + 0x305)
+/**
+ * Read from NVRAM
+ */
+#define VTPM_NV_ReadValue (VTPM_PRIV_BASE + 0x306)
-// Non-priviledged VTPM Commands (From DMI's)
-#define VTPM_ORD_SAVEHASHKEY (VTPM_ORD_BASE + 1) // DMI requests encryption key for persistent storage
-#define VTPM_ORD_LOADHASHKEY (VTPM_ORD_BASE + 2) // DMI requests symkey to be regenerated
//************************ Return Codes ****************************
#define VTPM_SUCCESS 0
+++ /dev/null
-/*
- * Copyright (c) 2010-2012 United States Government, as represented by
- * the Secretary of Defense. All rights reserved.
- *
- * THIS SOFTWARE AND ITS DOCUMENTATION ARE PROVIDED AS IS AND WITHOUT
- * ANY EXPRESS OR IMPLIED WARRANTIES WHATSOEVER. ALL WARRANTIES
- * INCLUDING, BUT NOT LIMITED TO, PERFORMANCE, MERCHANTABILITY, FITNESS
- * FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT ARE HEREBY
- * DISCLAIMED. USERS ASSUME THE ENTIRE RISK AND LIABILITY OF USING THE
- * SOFTWARE.
- */
-
-/***************************************************************
- * DISK IMAGE LAYOUT
- * *************************************************************
- * All data is stored in BIG ENDIAN format
- * *************************************************************
- * Section 1: Header
- *
- * 10 bytes id ID String "VTPMMGRDOM"
- * uint32_t version Disk Image version number (current == 1)
- * uint32_t storage_key_len Length of the storage Key
- * TPM_KEY storage_key Marshalled TPM_KEY structure (See TPM spec v2)
- * RSA_BLOCK aes_crypto Encrypted aes key data (RSA_CIPHER_SIZE bytes), bound by the storage_key
- * BYTE[32] aes_key Aes key for encrypting the uuid table
- * uint32_t cipher_sz Encrypted size of the uuid table
- *
- * *************************************************************
- * Section 2: Uuid Table
- *
- * This table is encrypted by the aes_key in the header. The cipher text size is just
- * large enough to hold all of the entries plus required padding.
- *
- * Each entry is as follows
- * BYTE[16] uuid Uuid of a vtpm that is stored on this disk
- * uint32_t offset Disk offset where the vtpm data is stored
- *
- * *************************************************************
- * Section 3: Vtpm Table
- *
- * The rest of the disk stores vtpms. Each vtpm is an RSA_BLOCK encrypted
- * by the storage key. Each vtpm must exist on an RSA_BLOCK aligned boundary,
- * starting at the first RSA_BLOCK aligned offset after the uuid table.
- * As the uuid table grows, vtpms may be relocated.
- *
- * RSA_BLOCK vtpm_crypto Vtpm data encrypted by storage_key
- * BYTE[20] hash Sha1 hash of vtpm encrypted data
- * BYTE[16] vtpm_aes_key Encryption key for vtpm data
- *
- *************************************************************
- */
-#define DISKVERS 1
-#define IDSTR "VTPMMGRDOM"
-#define IDSTRLEN 10
-#define AES_BLOCK_SIZE 16
-#define AES_KEY_BITS 256
-#define AES_KEY_SIZE (AES_KEY_BITS/8)
-#define BUF_SIZE 4096
-
-#define UUID_TBL_ENT_SIZE (sizeof(uuid_t) + sizeof(uint32_t))
-
-#define HEADERSZ (10 + 4 + 4)
-
-#define TRY_READ(buf, size, msg) do {\
- int rc; \
- if((rc = read(blkfront_fd, buf, (size))) != (size)) { \
- vtpmlogerror(VTPM_LOG_VTPM, "read() failed! " msg " : rc=(%d/%d), error=(%s)\n", rc, (int)(size), strerror(errno)); \
- status = TPM_IOERROR;\
- goto abort_egress;\
- } \
-} while(0)
-
-#define TRY_WRITE(buf, size, msg) do {\
- int rc; \
- if((rc = write(blkfront_fd, buf, (size))) != (size)) { \
- vtpmlogerror(VTPM_LOG_VTPM, "write() failed! " msg " : rc=(%d/%d), error=(%s)\n", rc, (int)(size), strerror(errno)); \
- status = TPM_IOERROR;\
- goto abort_egress;\
- } \
-} while(0)
-
-#include <blkfront.h>
-#include <unistd.h>
-#include <errno.h>
-#include <string.h>
-#include <inttypes.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <mini-os/byteorder.h>
-#include <polarssl/aes.h>
-
-#include "vtpm_manager.h"
-#include "log.h"
-#include "marshal.h"
-#include "tpm.h"
-#include "uuid.h"
-
-#include "vtpmmgr.h"
-#include "vtpm_storage.h"
-
-#define MAX(a,b) ( ((a) > (b)) ? (a) : (b) )
-#define MIN(a,b) ( ((a) < (b)) ? (a) : (b) )
-
-/* blkfront device objets */
-static struct blkfront_dev* blkdev = NULL;
-static int blkfront_fd = -1;
-
-struct Vtpm {
- uuid_t uuid;
- int offset;
-};
-struct Storage {
- int aes_offset;
- int uuid_offset;
- int end_offset;
-
- int num_vtpms;
- int num_vtpms_alloced;
- struct Vtpm* vtpms;
-};
-
-/* Global storage data */
-static struct Storage g_store = {
- .vtpms = NULL,
-};
-
-static int get_offset(void) {
- return lseek(blkfront_fd, 0, SEEK_CUR);
-}
-
-static void reset_store(void) {
- g_store.aes_offset = 0;
- g_store.uuid_offset = 0;
- g_store.end_offset = 0;
-
- g_store.num_vtpms = 0;
- g_store.num_vtpms_alloced = 0;
- free(g_store.vtpms);
- g_store.vtpms = NULL;
-}
-
-static int vtpm_get_index(const uuid_t uuid) {
- int st = 0;
- int ed = g_store.num_vtpms-1;
- while(st <= ed) {
- int mid = ((unsigned int)st + (unsigned int)ed) >> 1; //avoid overflow
- int c = memcmp(uuid, &g_store.vtpms[mid].uuid, sizeof(uuid_t));
- if(c == 0) {
- return mid;
- } else if(c > 0) {
- st = mid + 1;
- } else {
- ed = mid - 1;
- }
- }
- return -(st + 1);
-}
-
-static void vtpm_add(const uuid_t uuid, int offset, int index) {
- /* Realloc more space if needed */
- if(g_store.num_vtpms >= g_store.num_vtpms_alloced) {
- g_store.num_vtpms_alloced += 16;
- g_store.vtpms = realloc(
- g_store.vtpms,
- sizeof(struct Vtpm) * g_store.num_vtpms_alloced);
- }
-
- /* Move everybody after the new guy */
- for(int i = g_store.num_vtpms; i > index; --i) {
- g_store.vtpms[i] = g_store.vtpms[i-1];
- }
-
- vtpmloginfo(VTPM_LOG_VTPM, "Registered vtpm " UUID_FMT "\n", UUID_BYTES(uuid));
-
- /* Finally add new one */
- memcpy(g_store.vtpms[index].uuid, uuid, sizeof(uuid_t));
- g_store.vtpms[index].offset = offset;
- ++g_store.num_vtpms;
-}
-
-#if 0
-static void vtpm_remove(int index) {
- for(i = index; i < g_store.num_vtpms; ++i) {
- g_store.vtpms[i] = g_store.vtpms[i+1];
- }
- --g_store.num_vtpms;
-}
-#endif
-
-static int pack_uuid_table(uint8_t* table, int size, int* nvtpms) {
- uint8_t* ptr = table;
- while(*nvtpms < g_store.num_vtpms && size >= 0)
- {
- /* Pack the uuid */
- memcpy(ptr, (uint8_t*)g_store.vtpms[*nvtpms].uuid, sizeof(uuid_t));
- ptr+= sizeof(uuid_t);
-
-
- /* Pack the offset */
- ptr = pack_UINT32(ptr, g_store.vtpms[*nvtpms].offset);
-
- ++*nvtpms;
- size -= UUID_TBL_ENT_SIZE;
- }
- return ptr - table;
-}
-
-/* Extract the uuids */
-static int extract_uuid_table(uint8_t* table, int size) {
- uint8_t* ptr = table;
- for(;size >= UUID_TBL_ENT_SIZE; size -= UUID_TBL_ENT_SIZE) {
- int index;
- uint32_t v32;
-
- /*uuid_t is just an array of bytes, so we can do a direct cast here */
- uint8_t* uuid = ptr;
- ptr += sizeof(uuid_t);
-
- /* Get the offset of the key */
- ptr = unpack_UINT32(ptr, &v32);
-
- /* Insert the new vtpm in sorted order */
- if((index = vtpm_get_index(uuid)) >= 0) {
- vtpmlogerror(VTPM_LOG_VTPM, "Vtpm (" UUID_FMT ") exists multiple times! ignoring...\n", UUID_BYTES(uuid));
- continue;
- }
- index = -index -1;
-
- vtpm_add(uuid, v32, index);
-
- }
- return ptr - table;
-}
-
-static void vtpm_decrypt_block(aes_context* aes,
- uint8_t* iv,
- uint8_t* cipher,
- uint8_t* plain,
- int cipher_sz,
- int* overlap)
-{
- int bytes_ext;
- /* Decrypt */
- aes_crypt_cbc(aes, AES_DECRYPT,
- cipher_sz,
- iv, cipher, plain + *overlap);
-
- /* Extract */
- bytes_ext = extract_uuid_table(plain, cipher_sz + *overlap);
-
- /* Copy left overs to the beginning */
- *overlap = cipher_sz + *overlap - bytes_ext;
- memcpy(plain, plain + bytes_ext, *overlap);
-}
-
-static int vtpm_encrypt_block(aes_context* aes,
- uint8_t* iv,
- uint8_t* plain,
- uint8_t* cipher,
- int block_sz,
- int* overlap,
- int* num_vtpms)
-{
- int bytes_to_crypt;
- int bytes_packed;
-
- /* Pack the uuid table */
- bytes_packed = *overlap + pack_uuid_table(plain + *overlap, block_sz - *overlap, num_vtpms);
- bytes_to_crypt = MIN(bytes_packed, block_sz);
-
- /* Add padding if we aren't on a multiple of the block size */
- if(bytes_to_crypt & (AES_BLOCK_SIZE-1)) {
- int oldsz = bytes_to_crypt;
- //add padding
- bytes_to_crypt += AES_BLOCK_SIZE - (bytes_to_crypt & (AES_BLOCK_SIZE-1));
- //fill padding with random bytes
- vtpmmgr_rand(plain + oldsz, bytes_to_crypt - oldsz);
- *overlap = 0;
- } else {
- *overlap = bytes_packed - bytes_to_crypt;
- }
-
- /* Encrypt this chunk */
- aes_crypt_cbc(aes, AES_ENCRYPT,
- bytes_to_crypt,
- iv, plain, cipher);
-
- /* Copy the left over partials to the beginning */
- memcpy(plain, plain + bytes_to_crypt, *overlap);
-
- return bytes_to_crypt;
-}
-
-static TPM_RESULT vtpm_storage_new_vtpm(const uuid_t uuid, int index) {
- TPM_RESULT status = TPM_SUCCESS;
- uint8_t plain[BUF_SIZE + AES_BLOCK_SIZE];
- uint8_t buf[BUF_SIZE];
- uint8_t* ptr;
- int cipher_sz;
- aes_context aes;
-
- /* Add new vtpm to the table */
- vtpm_add(uuid, g_store.end_offset, index);
- g_store.end_offset += RSA_CIPHER_SIZE;
-
- /* Compute the new end location of the encrypted uuid table */
- cipher_sz = AES_BLOCK_SIZE; //IV
- cipher_sz += g_store.num_vtpms * UUID_TBL_ENT_SIZE; //uuid table
- cipher_sz += (AES_BLOCK_SIZE - (cipher_sz & (AES_BLOCK_SIZE -1))) & (AES_BLOCK_SIZE-1); //aes padding
-
- /* Does this overlap any key data? If so they need to be relocated */
- int uuid_end = (g_store.uuid_offset + cipher_sz + RSA_CIPHER_SIZE) & ~(RSA_CIPHER_SIZE -1);
- for(int i = 0; i < g_store.num_vtpms; ++i) {
- if(g_store.vtpms[i].offset < uuid_end) {
-
- vtpmloginfo(VTPM_LOG_VTPM, "Relocating vtpm data\n");
-
- //Read the hashkey cipher text
- lseek(blkfront_fd, g_store.vtpms[i].offset, SEEK_SET);
- TRY_READ(buf, RSA_CIPHER_SIZE, "vtpm hashkey relocate");
-
- //Write the cipher text to new offset
- lseek(blkfront_fd, g_store.end_offset, SEEK_SET);
- TRY_WRITE(buf, RSA_CIPHER_SIZE, "vtpm hashkey relocate");
-
- //Save new offset
- g_store.vtpms[i].offset = g_store.end_offset;
- g_store.end_offset += RSA_CIPHER_SIZE;
- }
- }
-
- vtpmloginfo(VTPM_LOG_VTPM, "Generating a new symmetric key\n");
-
- /* Generate an aes key */
- TPMTRYRETURN(vtpmmgr_rand(plain, AES_KEY_SIZE));
- aes_setkey_enc(&aes, plain, AES_KEY_BITS);
- ptr = plain + AES_KEY_SIZE;
-
- /* Pack the crypted size */
- ptr = pack_UINT32(ptr, cipher_sz);
-
- vtpmloginfo(VTPM_LOG_VTPM, "Binding encrypted key\n");
-
- /* Seal the key and size */
- TPMTRYRETURN(TPM_Bind(&vtpm_globals.storage_key,
- plain,
- ptr - plain,
- buf));
-
- /* Write the sealed key to disk */
- lseek(blkfront_fd, g_store.aes_offset, SEEK_SET);
- TRY_WRITE(buf, RSA_CIPHER_SIZE, "vtpm aes key");
-
- /* ENCRYPT AND WRITE UUID TABLE */
-
- vtpmloginfo(VTPM_LOG_VTPM, "Encrypting the uuid table\n");
-
- int num_vtpms = 0;
- int overlap = 0;
- int bytes_crypted;
- uint8_t iv[AES_BLOCK_SIZE];
-
- /* Generate the iv for the first block */
- TPMTRYRETURN(vtpmmgr_rand(iv, AES_BLOCK_SIZE));
-
- /* Copy the iv to the cipher text buffer to be written to disk */
- memcpy(buf, iv, AES_BLOCK_SIZE);
- ptr = buf + AES_BLOCK_SIZE;
-
- /* Encrypt the first block of the uuid table */
- bytes_crypted = vtpm_encrypt_block(&aes,
- iv, //iv
- plain, //plaintext
- ptr, //cipher text
- BUF_SIZE - AES_BLOCK_SIZE,
- &overlap,
- &num_vtpms);
-
- /* Write the iv followed by the crypted table*/
- TRY_WRITE(buf, bytes_crypted + AES_BLOCK_SIZE, "vtpm uuid table");
-
- /* Decrement the number of bytes encrypted */
- cipher_sz -= bytes_crypted + AES_BLOCK_SIZE;
-
- /* If there are more vtpms, encrypt and write them block by block */
- while(cipher_sz > 0) {
- /* Encrypt the next block of the uuid table */
- bytes_crypted = vtpm_encrypt_block(&aes,
- iv,
- plain,
- buf,
- BUF_SIZE,
- &overlap,
- &num_vtpms);
-
- /* Write the cipher text to disk */
- TRY_WRITE(buf, bytes_crypted, "vtpm uuid table");
-
- cipher_sz -= bytes_crypted;
- }
-
- goto egress;
-abort_egress:
-egress:
- return status;
-}
-
-
-/**************************************
- * PUBLIC FUNCTIONS
- * ***********************************/
-
-int vtpm_storage_init(void) {
- struct blkfront_info info;
- if((blkdev = init_blkfront(NULL, &info)) == NULL) {
- return -1;
- }
- if((blkfront_fd = blkfront_open(blkdev)) < 0) {
- return -1;
- }
- return 0;
-}
-
-void vtpm_storage_shutdown(void) {
- reset_store();
- close(blkfront_fd);
-}
-
-TPM_RESULT vtpm_storage_load_hashkey(const uuid_t uuid, uint8_t hashkey[HASHKEYSZ])
-{
- TPM_RESULT status = TPM_SUCCESS;
- int index;
- uint8_t cipher[RSA_CIPHER_SIZE];
- uint8_t clear[RSA_CIPHER_SIZE];
- UINT32 clear_size;
-
- /* Find the index of this uuid */
- if((index = vtpm_get_index(uuid)) < 0) {
- index = -index-1;
- vtpmlogerror(VTPM_LOG_VTPM, "LoadKey failure: Unrecognized uuid! " UUID_FMT "\n", UUID_BYTES(uuid));
- status = TPM_BAD_PARAMETER;
- goto abort_egress;
- }
-
- /* Read the table entry */
- lseek(blkfront_fd, g_store.vtpms[index].offset, SEEK_SET);
- TRY_READ(cipher, RSA_CIPHER_SIZE, "vtpm hashkey data");
-
- /* Decrypt the table entry */
- TPMTRYRETURN(TPM_UnBind(
- vtpm_globals.storage_key_handle,
- RSA_CIPHER_SIZE,
- cipher,
- &clear_size,
- clear,
- (const TPM_AUTHDATA*)&vtpm_globals.storage_key_usage_auth,
- &vtpm_globals.oiap));
-
- if(clear_size < HASHKEYSZ) {
- vtpmloginfo(VTPM_LOG_VTPM, "Decrypted Hash key size (%" PRIu32 ") was too small!\n", clear_size);
- status = TPM_RESOURCES;
- goto abort_egress;
- }
-
- memcpy(hashkey, clear, HASHKEYSZ);
-
- vtpmloginfo(VTPM_LOG_VTPM, "Loaded hash and key for vtpm " UUID_FMT "\n", UUID_BYTES(uuid));
- goto egress;
-abort_egress:
- vtpmlogerror(VTPM_LOG_VTPM, "Failed to load key\n");
-egress:
- return status;
-}
-
-TPM_RESULT vtpm_storage_save_hashkey(const uuid_t uuid, uint8_t hashkey[HASHKEYSZ])
-{
- TPM_RESULT status = TPM_SUCCESS;
- int index;
- uint8_t buf[RSA_CIPHER_SIZE];
-
- /* Find the index of this uuid */
- if((index = vtpm_get_index(uuid)) < 0) {
- index = -index-1;
- /* Create a new vtpm */
- TPMTRYRETURN( vtpm_storage_new_vtpm(uuid, index) );
- }
-
- /* Encrypt the hash and key */
- TPMTRYRETURN( TPM_Bind(&vtpm_globals.storage_key,
- hashkey,
- HASHKEYSZ,
- buf));
-
- /* Write to disk */
- lseek(blkfront_fd, g_store.vtpms[index].offset, SEEK_SET);
- TRY_WRITE(buf, RSA_CIPHER_SIZE, "vtpm hashkey data");
-
- vtpmloginfo(VTPM_LOG_VTPM, "Saved hash and key for vtpm " UUID_FMT "\n", UUID_BYTES(uuid));
- goto egress;
-abort_egress:
- vtpmlogerror(VTPM_LOG_VTPM, "Failed to save key\n");
-egress:
- return status;
-}
-
-TPM_RESULT vtpm_storage_new_header()
-{
- TPM_RESULT status = TPM_SUCCESS;
- uint8_t buf[BUF_SIZE];
- uint8_t keybuf[AES_KEY_SIZE + sizeof(uint32_t)];
- uint8_t* ptr = buf;
- uint8_t* sptr;
-
- /* Clear everything first */
- reset_store();
-
- vtpmloginfo(VTPM_LOG_VTPM, "Creating new disk image header\n");
-
- /*Copy the ID string */
- memcpy(ptr, IDSTR, IDSTRLEN);
- ptr += IDSTRLEN;
-
- /*Copy the version */
- ptr = pack_UINT32(ptr, DISKVERS);
-
- /*Save the location of the key size */
- sptr = ptr;
- ptr += sizeof(UINT32);
-
- vtpmloginfo(VTPM_LOG_VTPM, "Saving root storage key..\n");
-
- /* Copy the storage key */
- ptr = pack_TPM_KEY(ptr, &vtpm_globals.storage_key);
-
- /* Now save the size */
- pack_UINT32(sptr, ptr - (sptr + 4));
-
- /* Create a fake aes key and set cipher text size to 0 */
- memset(keybuf, 0, sizeof(keybuf));
-
- vtpmloginfo(VTPM_LOG_VTPM, "Binding uuid table symmetric key..\n");
-
- /* Save the location of the aes key */
- g_store.aes_offset = ptr - buf;
-
- /* Store the fake aes key and vtpm count */
- TPMTRYRETURN(TPM_Bind(&vtpm_globals.storage_key,
- keybuf,
- sizeof(keybuf),
- ptr));
- ptr+= RSA_CIPHER_SIZE;
-
- /* Write the header to disk */
- lseek(blkfront_fd, 0, SEEK_SET);
- TRY_WRITE(buf, ptr-buf, "vtpm header");
-
- /* Save the location of the uuid table */
- g_store.uuid_offset = get_offset();
-
- /* Save the end offset */
- g_store.end_offset = (g_store.uuid_offset + RSA_CIPHER_SIZE) & ~(RSA_CIPHER_SIZE -1);
-
- vtpmloginfo(VTPM_LOG_VTPM, "Saved new manager disk header.\n");
-
- goto egress;
-abort_egress:
-egress:
- return status;
-}
-
-
-TPM_RESULT vtpm_storage_load_header(void)
-{
- TPM_RESULT status = TPM_SUCCESS;
- uint32_t v32;
- uint8_t buf[BUF_SIZE];
- uint8_t* ptr = buf;
- aes_context aes;
-
- /* Clear everything first */
- reset_store();
-
- /* Read the header from disk */
- lseek(blkfront_fd, 0, SEEK_SET);
- TRY_READ(buf, IDSTRLEN + sizeof(UINT32) + sizeof(UINT32), "vtpm header");
-
- vtpmloginfo(VTPM_LOG_VTPM, "Loading disk image header\n");
-
- /* Verify the ID string */
- if(memcmp(ptr, IDSTR, IDSTRLEN)) {
- vtpmlogerror(VTPM_LOG_VTPM, "Invalid ID string in disk image!\n");
- status = TPM_FAIL;
- goto abort_egress;
- }
- ptr+=IDSTRLEN;
-
- /* Unpack the version */
- ptr = unpack_UINT32(ptr, &v32);
-
- /* Verify the version */
- if(v32 != DISKVERS) {
- vtpmlogerror(VTPM_LOG_VTPM, "Unsupported disk image version number %" PRIu32 "\n", v32);
- status = TPM_FAIL;
- goto abort_egress;
- }
-
- /* Size of the storage key */
- ptr = unpack_UINT32(ptr, &v32);
-
- /* Sanity check */
- if(v32 > BUF_SIZE) {
- vtpmlogerror(VTPM_LOG_VTPM, "Size of storage key (%" PRIu32 ") is too large!\n", v32);
- status = TPM_IOERROR;
- goto abort_egress;
- }
-
- /* read the storage key */
- TRY_READ(buf, v32, "storage pub key");
-
- vtpmloginfo(VTPM_LOG_VTPM, "Unpacking storage key\n");
-
- /* unpack the storage key */
- ptr = unpack_TPM_KEY(buf, &vtpm_globals.storage_key, UNPACK_ALLOC);
-
- /* Load Storage Key into the TPM */
- TPMTRYRETURN( TPM_LoadKey(
- TPM_SRK_KEYHANDLE,
- &vtpm_globals.storage_key,
- &vtpm_globals.storage_key_handle,
- (const TPM_AUTHDATA*)&vtpm_globals.srk_auth,
- &vtpm_globals.oiap));
-
- /* Initialize the storage key auth */
- memset(vtpm_globals.storage_key_usage_auth, 0, sizeof(TPM_AUTHDATA));
-
- /* Store the offset of the aes key */
- g_store.aes_offset = get_offset();
-
- /* Read the rsa cipher text for the aes key */
- TRY_READ(buf, RSA_CIPHER_SIZE, "aes key");
- ptr = buf + RSA_CIPHER_SIZE;
-
- vtpmloginfo(VTPM_LOG_VTPM, "Unbinding uuid table symmetric key\n");
-
- /* Decrypt the aes key protecting the uuid table */
- UINT32 datalen;
- TPMTRYRETURN(TPM_UnBind(
- vtpm_globals.storage_key_handle,
- RSA_CIPHER_SIZE,
- buf,
- &datalen,
- ptr,
- (const TPM_AUTHDATA*)&vtpm_globals.storage_key_usage_auth,
- &vtpm_globals.oiap));
-
- /* Validate the length of the output buffer */
- if(datalen < AES_KEY_SIZE + sizeof(UINT32)) {
- vtpmlogerror(VTPM_LOG_VTPM, "Unbound AES key size (%d) was too small! expected (%zu)\n", datalen, AES_KEY_SIZE + sizeof(UINT32));
- status = TPM_IOERROR;
- goto abort_egress;
- }
-
- /* Extract the aes key */
- aes_setkey_dec(&aes, ptr, AES_KEY_BITS);
- ptr+= AES_KEY_SIZE;
-
- /* Extract the ciphertext size */
- ptr = unpack_UINT32(ptr, &v32);
- int cipher_size = v32;
-
- /* Sanity check */
- if(cipher_size & (AES_BLOCK_SIZE-1)) {
- vtpmlogerror(VTPM_LOG_VTPM, "Cipher text size (%" PRIu32 ") is not a multiple of the aes block size! (%d)\n", v32, AES_BLOCK_SIZE);
- status = TPM_IOERROR;
- goto abort_egress;
- }
-
- /* Save the location of the uuid table */
- g_store.uuid_offset = get_offset();
-
- /* Only decrypt the table if there are vtpms to decrypt */
- if(cipher_size > 0) {
- int rbytes;
- int overlap = 0;
- uint8_t plain[BUF_SIZE + AES_BLOCK_SIZE];
- uint8_t iv[AES_BLOCK_SIZE];
-
- vtpmloginfo(VTPM_LOG_VTPM, "Decrypting uuid table\n");
-
- /* Pre allocate the vtpm array */
- g_store.num_vtpms_alloced = cipher_size / UUID_TBL_ENT_SIZE;
- g_store.vtpms = malloc(sizeof(struct Vtpm) * g_store.num_vtpms_alloced);
-
- /* Read the iv and the first chunk of cipher text */
- rbytes = MIN(cipher_size, BUF_SIZE);
- TRY_READ(buf, rbytes, "vtpm uuid table\n");
- cipher_size -= rbytes;
-
- /* Copy the iv */
- memcpy(iv, buf, AES_BLOCK_SIZE);
- ptr = buf + AES_BLOCK_SIZE;
-
- /* Remove the iv from the number of bytes to decrypt */
- rbytes -= AES_BLOCK_SIZE;
-
- /* Decrypt and extract vtpms */
- vtpm_decrypt_block(&aes,
- iv, ptr, plain,
- rbytes, &overlap);
-
- /* Read the rest of the table if there is more */
- while(cipher_size > 0) {
- /* Read next chunk of cipher text */
- rbytes = MIN(cipher_size, BUF_SIZE);
- TRY_READ(buf, rbytes, "vtpm uuid table");
- cipher_size -= rbytes;
-
- /* Decrypt a block of text */
- vtpm_decrypt_block(&aes,
- iv, buf, plain,
- rbytes, &overlap);
-
- }
- vtpmloginfo(VTPM_LOG_VTPM, "Loaded %d vtpms!\n", g_store.num_vtpms);
- }
-
- /* The end of the key table, new vtpms go here */
- int uuid_end = (get_offset() + RSA_CIPHER_SIZE) & ~(RSA_CIPHER_SIZE -1);
- g_store.end_offset = uuid_end;
-
- /* Compute the end offset while validating vtpms*/
- for(int i = 0; i < g_store.num_vtpms; ++i) {
- /* offset must not collide with previous data */
- if(g_store.vtpms[i].offset < uuid_end) {
- vtpmlogerror(VTPM_LOG_VTPM, "vtpm: " UUID_FMT
- " offset (%d) is before end of uuid table (%d)!\n",
- UUID_BYTES(g_store.vtpms[i].uuid),
- g_store.vtpms[i].offset, uuid_end);
- status = TPM_IOERROR;
- goto abort_egress;
- }
- /* offset must be at a multiple of cipher size */
- if(g_store.vtpms[i].offset & (RSA_CIPHER_SIZE-1)) {
- vtpmlogerror(VTPM_LOG_VTPM, "vtpm: " UUID_FMT
- " offset(%d) is not at a multiple of the rsa cipher text size (%d)!\n",
- UUID_BYTES(g_store.vtpms[i].uuid),
- g_store.vtpms[i].offset, RSA_CIPHER_SIZE);
- status = TPM_IOERROR;
- goto abort_egress;
- }
- /* Save the last offset */
- if(g_store.vtpms[i].offset >= g_store.end_offset) {
- g_store.end_offset = g_store.vtpms[i].offset + RSA_CIPHER_SIZE;
- }
- }
-
- goto egress;
-abort_egress:
- //An error occured somewhere
- vtpmlogerror(VTPM_LOG_VTPM, "Failed to load manager data!\n");
-
- //Clear the data store
- reset_store();
-
- //Reset the storage key structure
- free_TPM_KEY(&vtpm_globals.storage_key);
- {
- TPM_KEY key = TPM_KEY_INIT;
- vtpm_globals.storage_key = key;
- }
-
- //Reset the storage key handle
- TPM_EvictKey(vtpm_globals.storage_key_handle);
- vtpm_globals.storage_key_handle = 0;
-egress:
- return status;
-}
-
-#if 0
-/* For testing disk IO */
-void add_fake_vtpms(int num) {
- for(int i = 0; i < num; ++i) {
- uint32_t ind = cpu_to_be32(i);
-
- uuid_t uuid;
- memset(uuid, 0, sizeof(uuid_t));
- memcpy(uuid, &ind, sizeof(ind));
- int index = vtpm_get_index(uuid);
- index = -index-1;
-
- vtpm_storage_new_vtpm(uuid, index);
- }
-}
-#endif
+++ /dev/null
-/*
- * Copyright (c) 2010-2012 United States Government, as represented by
- * the Secretary of Defense. All rights reserved.
- *
- * based off of the original tools/vtpm_manager code base which is:
- * Copyright (c) 2005, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef VTPM_STORAGE_H
-#define VTPM_STORAGE_h
-
-#include "uuid.h"
-
-#define VTPM_NVMKEY_SIZE 32
-#define HASHKEYSZ (sizeof(TPM_DIGEST) + VTPM_NVMKEY_SIZE)
-
-/* Initialize the storage system and its virtual disk */
-int vtpm_storage_init(void);
-
-/* Shutdown the storage system and its virtual disk */
-void vtpm_storage_shutdown(void);
-
-/* Loads Sha1 hash and 256 bit AES key from disk and stores them
- * packed together in outbuf. outbuf must be freed
- * by the caller using buffer_free()
- */
-TPM_RESULT vtpm_storage_load_hashkey(const uuid_t uuid, uint8_t hashkey[HASHKEYSZ]);
-
-/* inbuf must contain a sha1 hash followed by a 256 bit AES key.
- * Encrypts and stores the hash and key to disk */
-TPM_RESULT vtpm_storage_save_hashkey(const uuid_t uuid, uint8_t hashkey[HASHKEYSZ]);
-
-/* Load the vtpm manager data - call this on startup */
-TPM_RESULT vtpm_storage_load_header(void);
-
-/* Saves the vtpm manager data - call this on shutdown */
-TPM_RESULT vtpm_storage_new_header(void);
-
-
-#endif
struct vtpm_globals {
int tpm_fd;
- TPM_KEY storage_key;
- TPM_HANDLE storage_key_handle; // Key used by persistent store
TPM_AUTH_SESSION oiap; // OIAP session for storageKey
- TPM_AUTHDATA storage_key_usage_auth;
TPM_AUTHDATA owner_auth;
TPM_AUTHDATA srk_auth;
ctr_drbg_context ctr_drbg;
};
+struct tpm_opaque {
+ uuid_t *uuid;
+ struct mem_group *group;
+ struct mem_vtpm *vtpm;
+ uint8_t kern_hash[32];
+};
+
// --------------------------- Global Values --------------------------
extern struct vtpm_globals vtpm_globals; // Key info and DMI states
TPM_RESULT vtpmmgr_init(int argc, char** argv);
void vtpmmgr_shutdown(void);
-TPM_RESULT vtpmmgr_handle_cmd(const uuid_t uuid, tpmcmd_t* tpmcmd);
+TPM_RESULT vtpmmgr_handle_cmd(struct tpm_opaque *opq, tpmcmd_t* tpmcmd);
inline TPM_RESULT vtpmmgr_rand(unsigned char* bytes, size_t num_bytes) {
return ctr_drbg_random(&vtpm_globals.ctr_drbg, bytes, num_bytes) == 0 ? 0 : TPM_FAIL;